id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
16,600
|
test_installation_client.py
|
freeipa_freeipa/ipatests/test_integration/test_installation_client.py
|
#
# Copyright (C) 2020 FreeIPA Contributors see COPYING for license
#
"""
Module provides tests for various options of ipa-client-install.
"""
from __future__ import absolute_import
import os
import pytest
import re
import shlex
import textwrap
from ipaplatform.paths import paths
from ipalib.sysrestore import SYSRESTORE_STATEFILE, SYSRESTORE_INDEXFILE
from ipatests.test_integration.base import IntegrationTest
from ipatests.pytest_ipa.integration import tasks
from ipatests.pytest_ipa.integration.firewall import Firewall
class TestInstallClient(IntegrationTest):
num_clients = 1
@classmethod
def install(cls, mh):
tasks.install_master(cls.master, setup_dns=True)
tasks.install_client(cls.master, cls.clients[0])
def check_dns_lookup_kdc(self, client):
"""Check that dns_lookup_kdc is never set to false.
https://pagure.io/freeipa/issue/6523
Setting dns_lookup_kdc to False would result in a hardcoded
configuration which is less reliable in the long run.
For instance, adding a trust to an Active Directory forest
after clients are enrolled would result in clients not being
able to authenticate AD users. Recycling FreeIPA servers
could prove problematic if the original hostnames are not
reused too.
"""
result = client.run_command(
shlex.split("grep dns_lookup_kdc /etc/krb5.conf")
)
assert 'false' not in result.stdout_text.lower()
assert 'true' in result.stdout_text.lower()
def test_dns_lookup_kdc_is_true_with_default_enrollment_options(self):
self.check_dns_lookup_kdc(self.clients[0])
tasks.uninstall_client(self.clients[0])
def test_dns_lookup_kdc_is_true_with_ipa_server_on_cli(self):
tasks.install_client(
self.master,
self.clients[0],
extra_args=["--server", self.master.hostname]
)
self.check_dns_lookup_kdc(self.clients[0])
tasks.uninstall_client(self.clients[0])
def test_client_install_with_ssh_trust_dns(self):
"""no host key verification if ssh-trust-dns option is used
There will be no prompt of host key verificaiton during ssh
to IPA enrolled machines if ssh-trust-dns option is used during
ipa-client-install. This was broken for FIPS env which got fixed.
Test checks for non-existence of param HostKeyAlgorithms in
ssh_config after client-install.
related: https://pagure.io/freeipa/issue/8082
"""
tasks.install_client(self.master, self.clients[0],
extra_args=['--ssh-trust-dns'])
result = self.clients[0].run_command(['cat', '/etc/ssh/ssh_config'])
assert 'HostKeyAlgorithms' not in result.stdout_text
def test_client_install_with_krb5(self):
"""Test that SSSD_PUBCONF_KRB5_INCLUDE_D_DIR is not added in krb5.conf
SSSD already provides a config snippet which includes
SSSD_PUBCONF_KRB5_INCLUDE_D_DIR, and having both breaks Java.
Test checks that krb5.conf does not include
SSSD_PUBCONF_KRB5_INCLUDE_D_DIR.
related: https://pagure.io/freeipa/issue/9267
"""
krb5_cfg = self.master.get_file_contents(paths.KRB5_CONF)
assert 'includedir {dir}'.format(
dir=paths.SSSD_PUBCONF_KRB5_INCLUDE_D_DIR
).encode() not in krb5_cfg
tasks.uninstall_client(self.clients[0])
def test_check_ssh_service_is_activated(self):
"""
This test checks all default services are activated
in sssd.conf including ssh
"""
tasks.install_client(self.master, self.clients[0])
sssd_cfg = self.clients[0].get_file_contents(paths.SSSD_CONF)
assert 'services = nss, pam, ssh, sudo' in sssd_cfg.decode()
tasks.uninstall_client(self.clients[0])
def test_install_with_automount(self):
"""Test that installation with automount is successful"""
tasks.install_client(self.master, self.clients[0],
extra_args=['--automount-location', 'default'])
def test_uninstall_with_automount(self):
"""Test that uninstall with automount is successful and complete"""
tasks.uninstall_client(self.clients[0])
index = os.path.join(
paths.IPA_CLIENT_SYSRESTORE, SYSRESTORE_INDEXFILE
)
state = os.path.join(
paths.IPA_CLIENT_SYSRESTORE, SYSRESTORE_STATEFILE
)
for filepath in (index, state):
try:
self.clients[0].get_file_contents(filepath)
except IOError:
pass
else:
pytest.fail("The client file %s was not removed" % filepath)
class TestClientInstallBind(IntegrationTest):
"""
The test configures an external bind server on the ipa-server
(not the IPA-embedded DNS server) that allows unauthenticated nsupdates.
When the IPA client is registered using ipa-client-install,
DNS records are added for the client in the bind server using nsupdate.
The first try is using GSS-TIG but fails as expected, and the client
installer then tries with unauthenticated nsupdate.
"""
num_clients = 1
@classmethod
def install(cls, mh):
cls.client = cls.clients[0]
@pytest.fixture
def setup_bindserver(self):
bindserver = self.master
named_conf_backup = tasks.FileBackup(self.master, paths.NAMED_CONF)
# create a zone in the BIND server that is identical to the IPA
add_zone = textwrap.dedent("""
zone "{domain}" IN {{ type master;
file "{domain}.db"; allow-query {{ any; }};
allow-update {{ any; }}; }};
""").format(domain=bindserver.domain.name)
namedcfg = bindserver.get_file_contents(
paths.NAMED_CONF, encoding='utf-8')
namedcfg += '\n' + add_zone
bindserver.put_file_contents(paths.NAMED_CONF, namedcfg)
def update_contents(path, pattern, replace):
contents = bindserver.get_file_contents(path, encoding='utf-8')
namedcfg_query = re.sub(pattern, replace, contents)
bindserver.put_file_contents(path, namedcfg_query)
update_contents(paths.NAMED_CONF, 'localhost;', 'any;')
update_contents(paths.NAMED_CONF, "listen-on port 53 { 127.0.0.1; };",
"#listen-on port 53 { 127.0.0.1; };")
update_contents(paths.NAMED_CONF, "listen-on-v6 port 53 { ::1; };",
"#listen-on-v6 port 53 { ::1; };")
add_records = textwrap.dedent("""
@ IN SOA {fqdn}. root.{domain}. (
1001 ;Serial
3H ;Refresh
15M ;Retry
1W ;Expire
1D ;Minimum 1D
)
@ IN NS {fqdn}.
ns1 IN A {bindserverip}
_kerberos.{domain}. IN TXT {zoneupper}
{fqdn}. IN A {bindserverip}
ipa-ca.{domain}. IN A {bindserverip}
_kerberos-master._tcp.{domain}. IN SRV 0 100 88 {fqdn}.
_kerberos-master._udp.{domain}. IN SRV 0 100 88 {fqdn}.
_kerberos._tcp.{domain}. IN SRV 0 100 88 {fqdn}.
_kerberos._udp.{domain}. IN SRV 0 100 88 {fqdn}.
_kpasswd._tcp.{domain}. IN SRV 0 100 464 {fqdn}.
_kpasswd._udp.{domain}. IN SRV 0 100 464 {fqdn}.
_ldap._tcp.{domain}. IN SRV 0 100 389 {fqdn}.
""").format(
fqdn=bindserver.hostname,
domain=bindserver.domain.name,
bindserverip=bindserver.ip,
zoneupper=bindserver.domain.name.upper()
)
bindserverdb = "/var/named/{0}.db".format(bindserver.domain.name)
bindserver.put_file_contents(bindserverdb, add_records)
bindserver.run_command(['systemctl', 'start', 'named'])
Firewall(bindserver).enable_services(["dns"])
yield
named_conf_backup.restore()
bindserver.run_command(['rm', '-rf', bindserverdb])
def test_client_nsupdate(self, setup_bindserver):
"""Test secure nsupdate failed, then try unsecure nsupdate..
Test to verify when bind is configured with dynamic update policy,
and during client-install 'nsupdate -g' fails then it should run with
second call using unauthenticated nsupdate.
Related : https://pagure.io/freeipa/issue/8402
"""
# with pre-configured bind server, install ipa-server without dns.
tasks.install_master(self.master, setup_dns=False)
self.client.resolver.backup()
self.client.resolver.setup_resolver(
self.master.ip, self.master.domain.name)
try:
self.client.run_command(['ipa-client-install', '-U',
'--domain', self.client.domain.name,
'--realm', self.client.domain.realm,
'-p', self.client.config.admin_name,
'-w', self.client.config.admin_password,
'--server', self.master.hostname])
# call unauthenticated nsupdate if GSS-TSIG nsupdate failed.
str1 = "nsupdate (GSS-TSIG) failed"
str2 = "'/usr/bin/nsupdate', '/etc/ipa/.dns_update.txt'"
client_log = self.client.get_file_contents(
paths.IPACLIENT_INSTALL_LOG, encoding='utf-8'
)
assert str1 in client_log and str2 in client_log
dig_after = self.client.run_command(
['dig', '@{0}'.format(self.master.ip), self.client.hostname,
'-t', 'SSHFP'])
assert "ANSWER: 0" not in dig_after.stdout_text.strip()
finally:
self.client.resolver.restore()
| 9,835
|
Python
|
.py
| 206
| 37.936893
| 78
| 0.62814
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,601
|
test_pkinit_manage.py
|
freeipa_freeipa/ipatests/test_integration/test_pkinit_manage.py
|
#
# Copyright (C) 2018 FreeIPA Contributors see COPYING for license
#
"""
Module provides tests for the ipa-pkinit-manage command.
"""
from __future__ import absolute_import
from ipalib import x509
from ipaplatform.paths import paths
from ipapython.dn import DN
from ipatests.test_integration.base import IntegrationTest
from ipatests.pytest_ipa.integration import tasks
SELFSIGNED_CA_HELPER = 'SelfSign'
IPA_CA_HELPER = 'IPA'
PKINIT_STATUS_ENABLED = 'enabled'
PKINIT_STATUS_DISABLED = 'disabled'
def check_pkinit_status(host, status):
"""Ensures that ipa-pkinit-manage status returns the expected state"""
result = host.run_command(['ipa-pkinit-manage', 'status'],
raiseonerr=False)
assert result.returncode == 0
assert 'PKINIT is {}'.format(status) in result.stdout_text
def check_pkinit_tracking(host, ca_helper):
"""Ensures that the PKINIT cert is tracked by the expected helper"""
result = host.run_command(['getcert', 'list', '-f', paths.KDC_CERT],
raiseonerr=False)
assert result.returncode == 0
# Make sure that only one request exists
assert result.stdout_text.count('Request ID') == 1
# Make sure that the right CA helper is used to track the cert
assert 'CA: {}'.format(ca_helper) in result.stdout_text
def check_pkinit_cert_issuer(host, issuer):
"""Ensures that the PKINIT cert is signed by the expected issuer"""
data = host.get_file_contents(paths.KDC_CERT)
pkinit_cert = x509.load_pem_x509_certificate(data)
# Make sure that the issuer is the expected one
assert DN(pkinit_cert.issuer) == DN(issuer)
# KDC cert must have SAN for KDC hostname
assert host.hostname in pkinit_cert.san_a_label_dns_names
# at least three SANs, profile adds UPN and KRB principal name
assert len(pkinit_cert.san_general_names) >= 3
def check_pkinit(host, enabled=True):
"""Checks that PKINIT is configured as expected
If enabled:
ipa-pkinit-manage status must return 'PKINIT is enabled'
the certificate must be tracked by IPA CA helper
the certificate must be signed by IPA CA
If disabled:
ipa-pkinit-manage status must return 'PKINIT is disabled'
the certificate must be tracked by SelfSign CA helper
the certificate must be self-signed
"""
if enabled:
# When pkinit is enabled:
# cert is tracked by IPA CA helper
# cert is signed by IPA CA
check_pkinit_status(host, PKINIT_STATUS_ENABLED)
check_pkinit_tracking(host, IPA_CA_HELPER)
check_pkinit_cert_issuer(
host,
'CN=Certificate Authority,O={}'.format(host.domain.realm))
else:
# When pkinit is disabled
# cert is tracked by 'SelfSign' CA helper
# cert is self-signed
check_pkinit_status(host, PKINIT_STATUS_DISABLED)
check_pkinit_tracking(host, SELFSIGNED_CA_HELPER)
check_pkinit_cert_issuer(
host,
'CN={},O={}'.format(host.hostname, host.domain.realm))
class TestPkinitManage(IntegrationTest):
"""Tests the ipa-pkinit-manage command.
ipa-pkinit-manage can be used to enable, disable or check
the status of PKINIT.
When pkinit is enabled, the kerberos server is using a certificate
signed either externally or by IPA CA. In the latter case, certmonger
is tracking the cert with IPA helper.
When pkinit is disabled, the kerberos server is using a self-signed
certificate that is tracked by certmonger with the SelfSigned helper.
"""
num_replicas = 1
@classmethod
def install(cls, mh):
# Install the master with PKINIT disabled
tasks.install_master(cls.master, extra_args=['--no-pkinit'])
check_pkinit(cls.master, enabled=False)
def test_pkinit_enable(self):
self.master.run_command(['ipa-pkinit-manage', 'enable'])
check_pkinit(self.master, enabled=True)
def test_pkinit_disable(self):
self.master.run_command(['ipa-pkinit-manage', 'disable'])
check_pkinit(self.master, enabled=False)
def test_pkinit_reenable(self):
self.master.run_command(['ipa-pkinit-manage', 'enable'])
check_pkinit(self.master, enabled=True)
def test_pkinit_on_replica(self):
"""Test pkinit enable on a replica without CA
Test case for ticket 7795.
Install a replica with --no-pkinit (without CA)
then call ipa-pkinit-manage enable. The replica must contact
a master with a CA instance to get its KDC cert.
"""
tasks.install_replica(self.master, self.replicas[0], setup_ca=False,
extra_args=['--no-pkinit'])
check_pkinit(self.replicas[0], enabled=False)
self.replicas[0].run_command(['ipa-pkinit-manage', 'enable'])
check_pkinit(self.replicas[0], enabled=True)
class TestPkinitInstall(IntegrationTest):
"""Tests that ipa-server-install properly configures pkinit.
Non-regression test for issue 7795.
"""
num_replicas = 0
@classmethod
def install(cls, mh):
# Install the master
tasks.install_master(cls.master)
def test_pkinit(self):
# Ensure that pkinit is properly configured
check_pkinit(self.master, enabled=True)
| 5,304
|
Python
|
.py
| 119
| 37.957983
| 76
| 0.692919
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,602
|
test_topologies.py
|
freeipa_freeipa/ipatests/test_integration/test_topologies.py
|
# Authors:
# Petr Viktorin <pviktori@redhat.com>
#
# Copyright (C) 2013 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from ipatests.pytest_ipa.integration import tasks
def test_topology_star():
topo = tasks.get_topo('star')
assert topo is tasks.star_topo
assert list(topo('M', [1, 2, 3, 4, 5])) == [
('M', 1),
('M', 2),
('M', 3),
('M', 4),
('M', 5),
]
assert list(topo('M', [])) == []
def test_topology_line():
topo = tasks.get_topo('line')
assert topo is tasks.line_topo
assert list(topo('M', [1, 2, 3, 4, 5])) == [
('M', 1),
(1, 2),
(2, 3),
(3, 4),
(4, 5),
]
assert list(topo('M', [])) == []
def test_topology_tree():
topo = tasks.get_topo('tree')
assert topo is tasks.tree_topo
assert list(topo('M', [1, 2, 3, 4, 5])) == [
('M', 1),
('M', 2),
(1, 3),
(1, 4),
(2, 5),
]
assert list(topo('M', [1, 2, 3, 4, 5, 6, 7, 8, 9, 10])) == [
('M', 1),
('M', 2),
(1, 3),
(1, 4),
(2, 5),
(2, 6),
(3, 7),
(3, 8),
(4, 9),
(4, 10),
]
assert list(topo('M', [])) == []
def test_topology_tree2():
topo = tasks.get_topo('tree2')
assert topo is tasks.tree2_topo
assert list(topo('M', [1, 2, 3, 4, 5])) == [
('M', 1),
('M', 2),
(2, 3),
(3, 4),
(4, 5),
]
assert list(topo('M', [])) == []
def test_topology_complete():
topo = tasks.get_topo('complete')
assert topo is tasks.complete_topo
assert list(topo('M', [1, 2, 3])) == [
('M', 1),
('M', 2),
('M', 3),
(1, 2),
(1, 3),
(2, 3),
]
assert list(topo('M', [])) == []
def test_topology_two_connected():
topo = tasks.get_topo('2-connected')
assert topo is tasks.two_connected_topo
assert list(topo('M', [1, 2, 3, 4, 5, 6, 7, 8])) == [
('M', 1),
('M', 2),
(2, 3),
(1, 3),
('M', 4),
('M', 5),
(4, 6),
(5, 6),
(2, 4),
(2, 7),
(4, 8),
(7, 8),
]
assert list(topo('M', [])) == []
def test_topology_double_circle_topo():
topo = tasks.get_topo('double-circle')
assert topo is tasks.double_circle_topo
assert list(topo('M', list(range(1, 30)))) == [
('M', 1),
(1, 6),
(1, 12),
(6, 7),
(7, 12),
(7, 18),
(12, 13),
(13, 18),
(13, 24),
(18, 19),
(19, 24),
(19, 'M'),
(24, 25),
(25, 'M'),
(25, 6),
('M', 2),
(2, 3),
(2, 4),
(2, 5),
(3, 4),
(3, 5),
(4, 5),
(1, 5),
(6, 8),
(8, 9),
(8, 10),
(8, 11),
(9, 10),
(9, 11),
(10, 11),
(7, 11),
(12, 14),
(14, 15),
(14, 16),
(14, 17),
(15, 16),
(15, 17),
(16, 17),
(13, 17),
(18, 20),
(20, 21),
(20, 22),
(20, 23),
(21, 22),
(21, 23),
(22, 23),
(19, 23),
(24, 26),
(26, 27),
(26, 28),
(26, 29),
(27, 28),
(27, 29),
(28, 29),
(25, 29),
]
assert list(topo('M', [])) == []
| 4,064
|
Python
|
.py
| 166
| 17.536145
| 71
| 0.437548
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,603
|
test_external_ca.py
|
freeipa_freeipa/ipatests/test_integration/test_external_ca.py
|
#
# Copyright (C) 2017 FreeIPA Contributors see COPYING for license
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
import os
import re
import time
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from ipatests.pytest_ipa.integration import tasks
from ipatests.test_integration.base import IntegrationTest
from ipalib import x509 as ipa_x509
from ipaplatform.paths import paths
from ipapython.dn import DN
from itertools import chain, repeat
from ipatests.create_external_ca import ExternalCA, ISSUER_CN
IPA_CA = 'ipa_ca.crt'
ROOT_CA = 'root_ca.crt'
# string to identify PKI restart in the journal
PKI_START_STR = 'Started pki_tomcatd'
def check_CA_flag(host, nssdb=paths.PKI_TOMCAT_ALIAS_DIR,
cn=ISSUER_CN):
"""
Check if external CA (by default 'example.test' in our test env) has
CA flag in nssdb.
"""
result = host.run_command(['certutil', '-L', '-d', nssdb])
text = result.stdout_text
# match CN in cert nickname and C flag in SSL section of NSS flags table
match_CA_flag = re.compile(r'.*{}.*\s+C'.format(cn))
match = re.search(match_CA_flag, text)
return match
def match_in_journal(host, string, since='today', services=('certmonger',)):
"""
Returns match object for the particular string.
"""
# prepend '-u' before every service name
service_args = list(chain.from_iterable(list(zip(repeat('-u'), services))))
command_args = ['journalctl', '--since={}'.format(since)] + service_args
result = host.run_command(command_args)
output = result.stdout_text
traceback = re.compile(string)
match = re.search(traceback, output)
return match
def install_server_external_ca_step1(host, extra_args=(), raiseonerr=True):
"""Step 1 to install the ipa server with external ca"""
return tasks.install_master(
host, external_ca=True, extra_args=extra_args, raiseonerr=raiseonerr,
)
def install_server_external_ca_step2(host, ipa_ca_cert, root_ca_cert,
extra_args=(),
raiseonerr=True):
"""Step 2 to install the ipa server with external ca"""
args = ['ipa-server-install', '-U', '-r', host.domain.realm,
'-a', host.config.admin_password,
'-p', host.config.dirman_password,
'--external-cert-file', ipa_ca_cert,
'--external-cert-file', root_ca_cert]
args.extend(extra_args)
cmd = host.run_command(args, raiseonerr=raiseonerr)
return cmd
def service_control_dirsrv(host, function):
"""Function to control the dirsrv service i.e start, stop, restart etc"""
dashed_domain = host.domain.realm.replace(".", '-')
dirsrv_service = "dirsrv@%s.service" % dashed_domain
cmd = host.run_command(['systemctl', function, dirsrv_service])
assert cmd.returncode == 0
def check_ipaca_issuerDN(host, expected_dn):
result = host.run_command(['ipa', 'ca-show', 'ipa'])
assert "Issuer DN: {}".format(expected_dn) in result.stdout_text
def check_mscs_extension(ipa_csr, template):
csr = x509.load_pem_x509_csr(ipa_csr, default_backend())
extensions = [
ext for ext in csr.extensions
if ext.oid.dotted_string == template.ext_oid
]
assert extensions
mscs_ext = extensions[0].value
# Crypto 41.0.0 supports cryptography.x509.MSCertificateTemplate
# The extension gets decoded into MSCertificateTemplate which
# provides additional attributes (template_id, major_minor and
# minor_version)
# If the test is executed with an older python-cryptography version,
# the extension is decoded as UnrecognizedExtension instead and
# provides only the encoded payload
if isinstance(mscs_ext, x509.UnrecognizedExtension):
assert mscs_ext.value == template.get_ext_data()
else:
# Compare the decoded extension with the values specified in the
# template with a format name_or_oid:major:minor
parts = template.unparsed_input.split(':')
assert mscs_ext.template_id.dotted_string == parts[0]
if isinstance(template, ipa_x509.MSCSTemplateV2):
# Also contains OID:major[:minor]
major = int(parts[1])
assert major == mscs_ext.major_version
if len(parts) > 2:
minor = int(parts[2])
assert minor == mscs_ext.minor_version
class TestExternalCA(IntegrationTest):
"""
Test of FreeIPA server installation with external CA
"""
num_replicas = 1
num_clients = 1
def test_external_ca(self):
# Step 1 of ipa-server-install.
result = install_server_external_ca_step1(
self.master, extra_args=['--external-ca-type=ms-cs']
)
assert result.returncode == 0
# check CSR for extension
ipa_csr = self.master.get_file_contents(paths.ROOT_IPA_CSR)
check_mscs_extension(ipa_csr, ipa_x509.MSCSTemplateV1(u'SubCA'))
# Sign CA, transport it to the host and get ipa a root ca paths.
root_ca_fname, ipa_ca_fname = tasks.sign_ca_and_transport(
self.master, paths.ROOT_IPA_CSR, ROOT_CA, IPA_CA)
# Step 2 of ipa-server-install.
result = install_server_external_ca_step2(
self.master, ipa_ca_fname, root_ca_fname)
assert result.returncode == 0
# Make sure IPA server is working properly
tasks.kinit_admin(self.master)
result = self.master.run_command(['ipa', 'user-show', 'admin'])
assert 'User login: admin' in result.stdout_text
# check that we can also install replica
tasks.install_replica(self.master, self.replicas[0])
# check that nsds5ReplicaReleaseTimeout option was set
result = tasks.ldapsearch_dm(
self.master,
'cn=mapping tree,cn=config',
['(cn=replica)'],
)
# case insensitive match
text = result.stdout_text.lower()
# see ipaserver.install.replication.REPLICA_FINAL_SETTINGS
assert 'nsds5ReplicaReleaseTimeout: 60'.lower() in text
assert 'nsDS5ReplicaBindDnGroupCheckInterval: 60'.lower() in text
def test_client_installation_with_otp(self):
# Test for issue 7526: client installation fails with one-time
# password when the master is installed with an externally signed
# CA because the whole cert chain is not published in
# /usr/share/ipa/html/ca.crt
# Create a random password for the client
client = self.clients[0]
client_pwd = 'Secret123'
args = ['ipa',
'host-add', client.hostname,
'--ip-address', client.ip,
'--no-reverse',
'--password', client_pwd]
self.master.run_command(args)
# Enroll the client with the client_pwd
client.run_command(
['ipa-client-install',
'--domain', self.master.domain.name,
'--server', self.master.hostname,
'-w', client_pwd,
'-U'])
class TestExternalCAConstraints(IntegrationTest):
"""Test of FreeIPA server installation with external CA and constraints
"""
num_replicas = 0
num_clients = 1
def test_external_ca_constrained(self):
install_server_external_ca_step1(self.master)
# name constraints for IPA DNS domain (dot prefix)
nameconstraint = x509.NameConstraints(
permitted_subtrees=[
x509.DNSName("." + self.master.domain.name),
],
excluded_subtrees=None
)
root_ca_fname, ipa_ca_fname = tasks.sign_ca_and_transport(
self.master, paths.ROOT_IPA_CSR, ROOT_CA, IPA_CA,
root_ca_extensions=[nameconstraint],
)
install_server_external_ca_step2(
self.master, ipa_ca_fname, root_ca_fname
)
tasks.kinit_admin(self.master)
self.master.run_command(['ipa', 'ping'])
def verify_caentry(host, cert):
"""
Verify the content of cn=DOMAIN IPA CA,cn=certificates,cn=ipa,cn=etc,basedn
and make sure that ipaConfigString contains the expected values.
Verify the content of cn=cacert,cn=certificates,cn=ipa,cn=etc,basedn
and make sure that it contains the expected certificate.
"""
# Check the LDAP entry
ldap = host.ldap_connect()
# cn=DOMAIN IPA CA must contain ipaConfigString: ipaCa, compatCA
ca_nick = '{} IPA CA'.format(host.domain.realm)
entry = ldap.get_entry(DN(('cn', ca_nick), ('cn', 'certificates'),
('cn', 'ipa'), ('cn', 'etc'),
host.domain.basedn))
ipaconfigstring = [x.lower() for x in entry.get('ipaconfigstring')]
expected = ['compatca', 'ipaca']
assert expected == sorted(ipaconfigstring)
# cn=cacert,cn=certificates,cn=etc,basedn must contain the latest
# IPA CA
entry2 = ldap.get_entry(DN(('cn', 'CACert'), ('cn', 'ipa'),
('cn', 'etc'), host.domain.basedn))
cert_from_ldap = entry2.single_value['cACertificate']
assert cert == cert_from_ldap
class TestSelfExternalSelf(IntegrationTest):
"""
Test self-signed > external CA > self-signed test case.
"""
def test_install_master(self):
result = tasks.install_master(self.master)
assert result.returncode == 0
# Check the content of the ldap entries for the CA
remote_cacrt = self.master.get_file_contents(paths.IPA_CA_CRT)
cacrt = ipa_x509.load_pem_x509_certificate(remote_cacrt)
verify_caentry(self.master, cacrt)
def test_switch_to_external_ca(self):
result = self.master.run_command([paths.IPA_CACERT_MANAGE, 'renew',
'--external-ca'])
assert result.returncode == 0
# Sign CA, transport it to the host and get ipa a root ca paths.
root_ca_fname, ipa_ca_fname = tasks.sign_ca_and_transport(
self.master, paths.IPA_CA_CSR, ROOT_CA, IPA_CA)
# renew CA with externally signed one
result = self.master.run_command([paths.IPA_CACERT_MANAGE, 'renew',
'--external-cert-file={}'.
format(ipa_ca_fname),
'--external-cert-file={}'.
format(root_ca_fname)])
assert result.returncode == 0
# update IPA certificate databases
result = self.master.run_command([paths.IPA_CERTUPDATE])
assert result.returncode == 0
# Check if external CA have "C" flag after the switch
result = check_CA_flag(self.master)
assert bool(result), ('External CA does not have "C" flag')
# Check that ldap entries for the CA have been updated
remote_cacrt = self.master.get_file_contents(ipa_ca_fname)
cacrt = ipa_x509.load_pem_x509_certificate(remote_cacrt)
verify_caentry(self.master, cacrt)
def test_issuerDN_after_renew_to_external(self):
""" Check if issuer DN is updated after self-signed > external-ca
This test checks if issuer DN is updated properly after CA is
renewed from self-signed to external-ca
"""
check_ipaca_issuerDN(self.master, "CN={}".format(ISSUER_CN))
def test_switch_back_to_self_signed(self):
# for journalctl --since
switch_time = time.strftime('%Y-%m-%d %H:%M:%S')
# switch back to self-signed CA
result = self.master.run_command([paths.IPA_CACERT_MANAGE, 'renew',
'--self-signed'])
assert result.returncode == 0
# Confirm there is no traceback in the journal
result = match_in_journal(self.master, since=switch_time,
string='Traceback')
assert not bool(result), ('"Traceback" keyword found in the journal.'
'Please check further')
# Check if pki-tomcatd was started after switching back.
result = match_in_journal(self.master, since=switch_time,
string=PKI_START_STR)
assert bool(result), ('pki_tomcatd not started after switching back to'
'self-signed CA')
result = self.master.run_command([paths.IPA_CERTUPDATE])
assert result.returncode == 0
def test_issuerDN_after_renew_to_self_signed(self):
""" Check if issuer DN is updated after external-ca > self-signed
This test checks if issuer DN is updated properly after CA is
renewed back from external-ca to self-signed
"""
issuer_dn = 'CN=Certificate Authority,O={}'.format(
self.master.domain.realm)
check_ipaca_issuerDN(self.master, issuer_dn)
class TestExternalCAdirsrvStop(IntegrationTest):
"""When the dirsrv service, which gets started during the first
ipa-server-install --external-ca phase, is not running when the
second phase is run with --external-cert-file options, the
ipa-server-install command fail.
This test checks if second phase installs successfully when dirsrv
is stoped.
related ticket: https://pagure.io/freeipa/issue/6611"""
def test_external_ca_dirsrv_stop(self):
# Step 1 of ipa-server-install
result = install_server_external_ca_step1(self.master)
assert result.returncode == 0
# stop dirsrv server.
service_control_dirsrv(self.master, 'stop')
# Sign CA, transport it to the host and get ipa and root ca paths.
root_ca_fname, ipa_ca_fname = tasks.sign_ca_and_transport(
self.master, paths.ROOT_IPA_CSR, ROOT_CA, IPA_CA)
# Step 2 of ipa-server-install.
result = install_server_external_ca_step2(
self.master, ipa_ca_fname, root_ca_fname)
assert result.returncode == 0
# Make sure IPA server is working properly
tasks.kinit_admin(self.master)
result = self.master.run_command(['ipa', 'user-show', 'admin'])
assert 'User login: admin' in result.stdout_text
class TestExternalCAInvalidCert(IntegrationTest):
"""Manual renew external CA cert with invalid file"""
def test_external_ca(self):
# Step 1 of ipa-server-install.
install_server_external_ca_step1(self.master)
# Sign CA, transport it to the host and get ipa a root ca paths.
root_ca_fname, ipa_ca_fname = tasks.sign_ca_and_transport(
self.master, paths.ROOT_IPA_CSR, ROOT_CA, IPA_CA)
# Step 2 of ipa-server-install.
install_server_external_ca_step2(self.master, ipa_ca_fname,
root_ca_fname)
self.master.run_command([paths.IPA_CACERT_MANAGE, 'renew',
'--external-ca'])
result = self.master.run_command(['grep', '-v', 'CERTIFICATE',
ipa_ca_fname])
contents = result.stdout_text
BAD_CERT = 'bad_ca.crt'
invalid_cert = os.path.join(self.master.config.test_dir, BAD_CERT)
self.master.put_file_contents(invalid_cert, contents)
# Sign CA, transport it to the host and get ipa a root ca paths.
root_ca_fname, ipa_ca_fname = tasks.sign_ca_and_transport(
self.master, paths.IPA_CA_CSR, ROOT_CA, IPA_CA)
# renew CA with invalid cert
cmd = [paths.IPA_CACERT_MANAGE, 'renew', '--external-cert-file',
invalid_cert, '--external-cert-file', root_ca_fname]
result = self.master.run_command(cmd, raiseonerr=False)
assert result.returncode == 1
def test_external_ca_with_too_small_key(self):
# reuse the existing deployment and renewal CSR
root_ca_fname, ipa_ca_fname = tasks.sign_ca_and_transport(
self.master, paths.IPA_CA_CSR, ROOT_CA, IPA_CA, key_size=1024)
cmd = [
paths.IPA_CACERT_MANAGE, 'renew',
'--external-cert-file', ipa_ca_fname,
'--external-cert-file', root_ca_fname,
]
result = self.master.run_command(cmd, raiseonerr=False)
assert result.returncode == 1
class TestExternalCAInvalidIntermediate(IntegrationTest):
"""Test case for https://pagure.io/freeipa/issue/7877"""
def test_invalid_intermediate(self):
install_server_external_ca_step1(self.master)
root_ca_fname, ipa_ca_fname = tasks.sign_ca_and_transport(
self.master, paths.ROOT_IPA_CSR, ROOT_CA, IPA_CA,
root_ca_path_length=0
)
result = install_server_external_ca_step2(
self.master, ipa_ca_fname, root_ca_fname, raiseonerr=False
)
assert result.returncode > 0
assert "basic contraint pathlen" in result.stderr_text
class TestExternalCAInstall(IntegrationTest):
"""install CA cert manually """
def test_install_master(self):
# step 1 install ipa-server
tasks.install_master(self.master)
def test_install_external_ca(self):
# Create root CA
external_ca = ExternalCA()
# Create root CA
root_ca = external_ca.create_ca()
root_ca_fname = os.path.join(self.master.config.test_dir, ROOT_CA)
# Transport certificates (string > file) to master
self.master.put_file_contents(root_ca_fname, root_ca)
# Install new cert
self.master.run_command([paths.IPA_CACERT_MANAGE, 'install',
root_ca_fname])
class TestMultipleExternalCA(IntegrationTest):
"""Setup externally signed ca1
install ipa-server with externally signed ca1
Setup externally signed ca2 and renew ipa-server with
externally signed ca2 and check the difference in certificate
"""
def test_master_install_ca1(self):
install_server_external_ca_step1(self.master)
# Sign CA, transport it to the host and get ipa a root ca paths.
root_ca_fname1 = tasks.create_temp_file(
self.master, directory=paths.TMP, suffix="root_ca.crt"
)
ipa_ca_fname1 = tasks.create_temp_file(
self.master, directory=paths.TMP, suffix="ipa_ca.crt"
)
ipa_csr = self.master.get_file_contents(paths.ROOT_IPA_CSR)
external_ca = ExternalCA()
root_ca = external_ca.create_ca(cn='RootCA1')
ipa_ca = external_ca.sign_csr(ipa_csr)
self.master.put_file_contents(root_ca_fname1, root_ca)
self.master.put_file_contents(ipa_ca_fname1, ipa_ca)
# Step 2 of ipa-server-install.
install_server_external_ca_step2(self.master, ipa_ca_fname1,
root_ca_fname1)
cert_nick = "caSigningCert cert-pki-ca"
result = self.master.run_command([
'certutil', '-L', '-d', paths.PKI_TOMCAT_ALIAS_DIR,
'-n', cert_nick])
assert "CN=RootCA1" in result.stdout_text
def test_master_install_ca2(self):
root_ca_fname2 = tasks.create_temp_file(
self.master, directory=paths.TMP, suffix="root_ca.crt"
)
ipa_ca_fname2 = tasks.create_temp_file(
self.master, directory=paths.TMP, suffix="ipa_ca.crt"
)
self.master.run_command([
paths.IPA_CACERT_MANAGE, 'renew', '--external-ca'])
ipa_csr = self.master.get_file_contents(paths.IPA_CA_CSR)
external_ca = ExternalCA()
root_ca = external_ca.create_ca(cn='RootCA2')
ipa_ca = external_ca.sign_csr(ipa_csr)
self.master.put_file_contents(root_ca_fname2, root_ca)
self.master.put_file_contents(ipa_ca_fname2, ipa_ca)
# Step 2 of ipa-server-install.
self.master.run_command([paths.IPA_CACERT_MANAGE, 'renew',
'--external-cert-file', ipa_ca_fname2,
'--external-cert-file', root_ca_fname2])
cert_nick = "caSigningCert cert-pki-ca"
result = self.master.run_command([
'certutil', '-L', '-d', paths.PKI_TOMCAT_ALIAS_DIR,
'-n', cert_nick])
assert "CN=RootCA2" in result.stdout_text
def _step1_profile(master, s):
return install_server_external_ca_step1(
master,
extra_args=['--external-ca-type=ms-cs', f'--external-ca-profile={s}'],
raiseonerr=False,
)
def _test_invalid_profile(master, profile):
result = _step1_profile(master, profile)
assert result.returncode != 0
assert '--external-ca-profile' in result.stderr_text
def _test_valid_profile(master, profile_cls, profile):
result = _step1_profile(master, profile)
assert result.returncode == 0
ipa_csr = master.get_file_contents(paths.ROOT_IPA_CSR)
check_mscs_extension(ipa_csr, profile_cls(profile))
class TestExternalCAProfileScenarios(IntegrationTest):
"""
Test the various --external-ca-profile scenarios.
This test is broken into sections, with each section first
testing invalid arguments, then a valid argument, and finally
uninstalling the half-installed IPA.
"""
'''
Tranche 1: version 1 templates.
Test that --external-ca-profile=Foo gets propagated to the CSR.
The default template extension when --external-ca-type=ms-cs,
a V1 extension with value "SubCA", already gets tested by the
``TestExternalCA`` class.
We only need to do Step 1 of installation, then check the CSR.
'''
def test_invalid_v1_template(self):
_test_invalid_profile(self.master, 'NotAnOid:1')
def test_valid_v1_template(self):
_test_valid_profile(
self.master, ipa_x509.MSCSTemplateV1, 'TemplateOfAwesome')
def test_uninstall_1(self):
tasks.uninstall_master(self.master)
'''
Tranche 2: V2 templates without minor version.
Test that V2 template specifiers without minor version get
propagated to CSR. This class also tests all error modes in
specifying a V2 template, those being:
- no major version specified
- too many parts specified (i.e. major, minor, and then some more)
- major version is not an int
- major version is negative
- minor version is not an int
- minor version is negative
We only need to do Step 1 of installation, then check the CSR.
'''
def test_v2_template_too_few_parts(self):
_test_invalid_profile(self.master, '1.2.3.4')
def test_v2_template_too_many_parts(self):
_test_invalid_profile(self.master, '1.2.3.4:100:200:300')
def test_v2_template_major_version_not_int(self):
_test_invalid_profile(self.master, '1.2.3.4:wat:200')
def test_v2_template_major_version_negative(self):
_test_invalid_profile(self.master, '1.2.3.4:-1:200')
def test_v2_template_minor_version_not_int(self):
_test_invalid_profile(self.master, '1.2.3.4:100:wat')
def test_v2_template_minor_version_negative(self):
_test_invalid_profile(self.master, '1.2.3.4:100:-2')
def test_v2_template_valid_major_only(self):
_test_valid_profile(
self.master, ipa_x509.MSCSTemplateV2, '1.2.3.4:100')
def test_uninstall_2(self):
tasks.uninstall_master(self.master)
'''
Tranche 3: V2 templates with minor version.
Test that V2 template specifiers _with_ minor version get
propagated to CSR. All error modes of V2 template specifiers
were tested in ``TestExternalCAProfileV2Major``.
We only need to do Step 1 of installation, then check the CSR.
'''
def test_v2_template_valid_major_minor(self):
_test_valid_profile(
self.master, ipa_x509.MSCSTemplateV2, '1.2.3.4:100:200')
# this is the end; no need to uninstall.
| 24,417
|
Python
|
.py
| 509
| 39.064833
| 79
| 0.64717
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,604
|
test_customized_ds_config_install.py
|
freeipa_freeipa/ipatests/test_integration/test_customized_ds_config_install.py
|
from ipatests.test_integration.base import IntegrationTest
from ipatests.pytest_ipa.integration import tasks
DIRSRV_CONFIG_MODS = """
# https://fedorahosted.org/freeipa/ticket/4949
# https://pagure.io/freeipa/issue/8515
dn: cn=bdb,cn=config,cn=ldbm database,cn=plugins,cn=config
changetype: modify
replace: nsslapd-db-locks
nsslapd-db-locks: 100000
# https://fedorahosted.org/freeipa/ticket/1930
dn: cn=config
changetype: modify
replace: nsslapd-allow-unauthenticated-binds
nsslapd-allow-unauthenticated-binds: off
-
replace: nsslapd-require-secure-binds
nsslapd-require-secure-binds: off
-
replace: nsslapd-allow-anonymous-access
nsslapd-allow-anonymous-access: off
-
replace: nsslapd-minssf
nsslapd-minssf: 0
# https://fedorahosted.org/freeipa/ticket/4048
dn: cn=config
changetype: modify
replace: nssslapd-maxbersize
nssslapd-maxbersize: 209715201
dn: cn=userRoot,cn=ldbm database,cn=plugins,cn=config
changetype: modify
replace: nsslapd-cachememsize
nsslapd-cachememsize: 10485761
dn: cn=config,cn=ldbm database,cn=plugins,cn=config
changetype: modify
replace: nsslapd-import_cachesize
nsslapd-import_cachesize: 20000001
-
replace: nsslapd-dbcachesize
nsslapd-dbcachesize: 10000001
"""
CONFIG_LDIF_PATH = "/root/dirsrv-config-mod.ldif"
class TestCustomDSConfigInstall(IntegrationTest):
"""Install master and replica with custom DS config
"""
topology = 'star'
num_replicas = 1
@classmethod
def install(cls, mh):
# just prepare LDIF file on both master and replica
cls.master.put_file_contents(CONFIG_LDIF_PATH, DIRSRV_CONFIG_MODS)
cls.replicas[0].put_file_contents(CONFIG_LDIF_PATH,
DIRSRV_CONFIG_MODS)
def test_customized_ds_install_master(self):
tasks.install_master(self.master, setup_dns=False, extra_args=[
'--dirsrv-config-file', CONFIG_LDIF_PATH
])
def test_customized_ds_install_replica(self):
tasks.install_replica(
self.master, self.replicas[0], setup_ca=False,
nameservers=None,
extra_args=['--dirsrv-config-file', CONFIG_LDIF_PATH])
| 2,131
|
Python
|
.py
| 61
| 30.95082
| 74
| 0.755588
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,605
|
test_dns.py
|
freeipa_freeipa/ipatests/test_integration/test_dns.py
|
#
# Copyright (C) 2020 FreeIPA Contributors see COPYING for license
#
"""This covers tests for dns related feature"""
from __future__ import absolute_import
from ipatests.pytest_ipa.integration import tasks
from ipatests.test_integration.base import IntegrationTest
class TestDNS(IntegrationTest):
"""Tests for DNS feature.
This test class covers the tests for DNS feature.
"""
topology = 'line'
num_replicas = 0
def test_fake_mname_param(self):
"""Test that fake_mname param is set using dnsserver-mod option.
Test for BZ 1488732 which checks that --soa-mname-override option
from dnsserver-mod sets the fake_mname.
"""
tasks.kinit_admin(self.master)
self.master.run_command(['ipa', 'dnsserver-mod', self.master.hostname,
'--soa-mname-override', 'fake'])
tasks.restart_named(self.master)
cmd = self.master.run_command(['dig', '+short', '-t', 'SOA',
self.master.domain.name])
assert 'fake' in cmd.stdout_text
# reverting the fake_mname change to check it is reverted correctly
self.master.run_command(['ipa', 'dnsserver-mod', self.master.hostname,
'--soa-mname-override', ''])
tasks.restart_named(self.master)
cmd = self.master.run_command(['dig', '+short', '-t', 'SOA',
self.master.domain.name])
assert 'fake' not in cmd.stdout_text
| 1,524
|
Python
|
.py
| 32
| 37.625
| 78
| 0.62062
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,606
|
prci_checker.py
|
freeipa_freeipa/ipatests/prci_definitions/prci_checker.py
|
#! /usr/bin/python3
import os
import glob
import sys
import argparse
from argparse import RawTextHelpFormatter
import yaml
# Get default DIR from script location
DEFAULT_DIR = os.path.dirname(os.path.abspath(__file__))
# Default jobs specification file name and path
JOBS_SPEC_YAML = "prci_jobs_spec.yaml"
JOBS_SPEC_PATH = os.path.join(DEFAULT_DIR, JOBS_SPEC_YAML)
# Files to ignore on check
IGNORE_FILES = {JOBS_SPEC_YAML, "temp_commit.yaml"}
def load_yaml(path):
"""Load YAML file into Python object."""
with open(path, "r") as file_data:
data = yaml.safe_load(file_data)
return data
def print_error(msg):
"""Helper function to print error messages"""
print("ERROR: " + msg)
def print_warning(msg):
"""Helper function to print warning messages"""
print("WARNING: " + msg)
def print_hint(msg):
"""Helper function to print hint messages"""
print("HINT: " + msg)
def print_field_error(
jobname, fieldname=None, expected_value=None, custom_msg=None
):
"""Helper function to print field errors"""
msg = f"In job '{jobname}':\n"
if custom_msg:
msg += f" {custom_msg}"
elif fieldname and expected_value:
msg += (
f' Job field "{fieldname}" should be defined as: '
f'"{fieldname}: {expected_value}"'
)
else:
msg = f"In job '{jobname}'."
print_error(msg)
def check_jobs(filename, jobs_def, topologies, current_spec, supported_classes):
"""
Check if given job definition file has all jobs correctly defined according
to specification file.
:param filename: file name of the definition file to be checked
:param jobs_def: definition file jobs as a dict object
:param topologies: list of dicts of predefined topologies
:param jobs_spec: PRCI specification file containing correct definitions
:param supported_classes: List of supported test-run classes
:returns: Boolean with the checks result
"""
correct_fields = True
try:
job_prefix = current_spec["job_prefix"]
except KeyError as e:
print_error(
"Specification file has bad format "
f"and '{filename}' could not be analyzed.\n"
f" KeyError: {e} in '{filename}'"
)
return False
requires = [f"{job_prefix}build"]
build_url = f"{{{job_prefix}build_url}}"
# Get template from build job
build_job_name = job_prefix + "build"
build_job = jobs_def.get(build_job_name)
if not build_job:
print_error(
" Build job is not defined or has incorrect name.\n"
f" Name should be: '{build_job_name}'"
)
return False
build_args = build_job["job"]["args"]
template = build_args["template"]
copr = build_args.get("copr")
copr_defined = current_spec.get("copr_defined", False)
update_packages = current_spec.get("update_packages", False)
selinux = current_spec.get("selinux_enforcing", False)
enable_testing_repo = current_spec.get("enable_testing_repo", False)
for job_name, params in jobs_def.items():
# Checks for all kind of jobs
args = params.get("job").get("args")
if not job_name.startswith(job_prefix):
msg = f"Job name should start with prefix '{job_prefix}'"
print_field_error(job_name, custom_msg=msg)
correct_fields = False
if args.get("template") != template:
print_field_error(job_name, "template", template)
correct_fields = False
if "timeout" not in args:
msg = "'timeout' field should be defined in args section"
print_field_error(job_name, custom_msg=msg)
correct_fields = False
if args.get("topology") not in topologies:
msg = (
"'topology' field should be defined with one of the "
"pre-defined topologies"
)
print_field_error(job_name, custom_msg=msg)
correct_fields = False
if args.get("enable_testing_repo", False) != enable_testing_repo:
if enable_testing_repo:
print_field_error(
job_name, "enable_testing_repo", enable_testing_repo
)
else:
msg = (
"'enable_testing_repo' field should be set to false or not"
" defined"
)
print_field_error(job_name, custom_msg=msg)
correct_fields = False
# Checks for build job
if job_name == build_job_name:
if copr_defined and not copr:
msg = "'copr' field should be defined for the build job"
print_field_error(job_name, custom_msg=msg)
correct_fields = False
elif not copr_defined and copr:
msg = "'copr' field should NOT be defined for the build job"
print_field_error(job_name, custom_msg=msg)
correct_fields = False
if params.get("job").get("class") != "Build":
print_field_error(job_name, "class", "Build")
correct_fields = False
continue
# Checks only for non-build jobs
if params.get("requires") != requires:
print_field_error(job_name, "requires", requires)
correct_fields = False
if params.get("job").get("class") not in supported_classes:
msg = (
"'class' field should be defined with one of the "
f"supported: {supported_classes}"
)
print_field_error(job_name, custom_msg=msg)
correct_fields = False
if args.get("build_url") != build_url:
print_field_error(job_name, "build_url", f"'{build_url}'")
correct_fields = False
if "test_suite" not in args:
msg = "'test_suite' field should be defined in args section"
print_field_error(job_name, custom_msg=msg)
correct_fields = False
# Check template field against build target
if args.get("template") != template:
print_field_error(job_name, "template", template)
correct_fields = False
# If build target has a copr repo, check that the job also defines it
if args.get("copr") != copr:
if copr and copr_defined:
print_field_error(job_name, "copr", copr)
elif not copr and not copr_defined:
msg = "'copr' field should not be defined"
print_field_error(job_name, custom_msg=msg)
correct_fields = False
if args.get("update_packages", False) != update_packages:
if update_packages:
print_field_error(job_name, "update_packages", update_packages)
else:
msg = (
"'update_packages' field should be set to false or not"
" defined"
)
print_field_error(job_name, custom_msg=msg)
correct_fields = False
if args.get("selinux_enforcing", False) != selinux:
if selinux:
print_field_error(job_name, "selinux_enforcing", selinux)
else:
msg = (
"'selinux_enforcing' field should be set to false or not"
" defined"
)
print_field_error(job_name, custom_msg=msg)
correct_fields = False
return correct_fields
def process_def_file(file, jobs_spec, supported_classes):
"""Function to process PRCI definition file
:param file: name of the definition file to be
processed (extension included)
:param jobs_spec: PRCI specification file containing correct definitions
:param supported_classes: List of supported test-run classes
:returns: Boolean with the checks result, filename,
and number of jobs in the definition
file (-1 when error / warning)
"""
# File base name without extension
filename = os.path.splitext(os.path.basename(file))[0]
try:
def_suite = load_yaml(file)
except FileNotFoundError as e:
print(e)
print_error(f"File '{file}' was not found.")
sys.exit(1)
except yaml.composer.ComposerError as e:
print_error(str(e))
print_hint(
"You probably defined a wrong alias "
"in the newly added or modified job."
)
sys.exit(1)
except yaml.YAMLError as e:
print(e)
print_error(f"Error loading YAML definition file {file}")
sys.exit(1)
# Get spec for file to be analyzed
current_spec = jobs_spec.get(filename)
if current_spec is None:
print_warning(
f"'{filename}' file is not defined in the PRCI "
"specification file and "
"could not be analyzed."
)
return True, "", -1
jobs_def = def_suite.get("jobs")
if jobs_def is None:
print_error(
f"'{filename}' file doesn't have a jobs section following "
"the format."
)
return False, "", -1
# Get list of pre-defined topologies
topologies_def = def_suite.get("topologies")
if topologies_def is None:
print_error(
f"'{filename}' file doesn't have a topologies section following "
"the format."
)
return False, "", -1
topologies = list(topologies_def.values())
# Print file to be analyzed and its number of jobs
n_jobs = len(jobs_def)
print("[File] " + filename + " [Jobs] " + str(n_jobs))
result = check_jobs(
filename, jobs_def, topologies, current_spec, supported_classes
)
return result, filename, n_jobs
def process_spec_file(filepath):
"""Function to process jobs specification file
:param filepath: Filepath for spec file
:returns: Definition specification dict, supported classes and
list of files that should contain the same number of jobs
"""
try:
spec_root = load_yaml(filepath)
except FileNotFoundError as e:
print(e)
print_error(f"Jobs specification file '{filepath}' not found.")
sys.exit(1)
except yaml.YAMLError as e:
print(e)
print_error(f"Error loading YAML specification file '{filepath}'")
sys.exit(1)
jobs_spec = spec_root.get("prci_job_spec")
if not jobs_spec:
print_error(
f"Specification definition not found in spec file '{filepath}'\n"
" Key 'prci_job_spec' is not present."
)
sys.exit(1)
supported_classes = spec_root.get("classes")
if not supported_classes:
print_error(
f"Supported classes not defined in spec file '{filepath}'\n"
" Key 'classes' is not present."
)
sys.exit(1)
f_fixed_jobs = spec_root.get("fixed_n_jobs")
return jobs_spec, supported_classes, f_fixed_jobs
def check_n_jobs(defs_n_jobs):
"""
Function to check if definition files have the same number of jobs
:param defs_n_jobs: Dict of definition filenames as keys and number
of jobs as values
:returns: Boolean, if definitions have the same number of jobs
"""
if not defs_n_jobs: # Spec not defined to check num of jobs
return True
elif len(set(defs_n_jobs.values())) == 1:
return True
else:
print_error(
"Following PRCI definitions should have the same number of jobs:"
f" {list(defs_n_jobs.keys())}"
)
return False
def parse_arguments(description):
"""Parse and return arguments if specified"""
parser = argparse.ArgumentParser(
description=description, formatter_class=RawTextHelpFormatter
)
group = parser.add_mutually_exclusive_group()
group.add_argument(
"-f", "--file", help="Specify YAML definition file to be analyzed"
)
group.add_argument(
"-d",
"--defs",
default=DEFAULT_DIR,
help="Specify directory for definition files to be analyzed",
)
parser.add_argument(
"-s",
"--spec",
default=JOBS_SPEC_PATH,
help="Specify path for specification file",
)
return parser.parse_args()
def main():
"""
Checker script for prci definition files.\n
This script checks whether jobs in a prci definition file have the correct
naming format, requirements, and arguments, which are defined in the
specification file.
If no defition file, definition directory or spec file is specified,
script will look for them in its own dir location.
Examples of the usage for the tool:\n
# Check all yaml definition files in default dir\n
python3 prci_checker.py\n
# Check only specified file\n
python3 prci_checker.py -f gating.yaml\n
# Check with custom path for spec file\n
python3 prci_checker.py -s ../../alternative_spec.yaml
# Check with custom path for spec file\n
python3 prci_checker.py -d ./definitions
Find more examples of how to use the tool and spec file
at https://freeipa.readthedocs.io/en/latest/designs/index.html
"""
args = parse_arguments(main.__doc__)
print("BEGINNING PRCI JOB DEFINITIONS CHECKS")
# Get data from jobs specification file
jobs_spec, supported_classes, f_fixed_jobs = process_spec_file(args.spec)
if args.file:
result = process_def_file(args.file, jobs_spec, supported_classes)[0]
else:
# Get all yaml files in default dir, except those in IGNORE_FILES
def_files_dir = os.path.join(args.defs, "*.y*ml")
defs_files = glob.glob(def_files_dir)
ignore_files_paths = {
os.path.join(args.defs, ignore_file) for ignore_file in IGNORE_FILES
}
defs_files = set(defs_files) - ignore_files_paths
if not defs_files:
print_warning(
"No yaml job definition files found to analyze "
"in specified directory."
)
return
result = True
defs_n_jobs = {}
for def_file in defs_files:
result_file, filename, n_jobs = process_def_file(
def_file, jobs_spec, supported_classes
)
if not result_file:
result = False
continue
if n_jobs > -1 and f_fixed_jobs and filename in f_fixed_jobs:
defs_n_jobs[filename] = n_jobs
result = result and check_n_jobs(defs_n_jobs)
if not result:
print("CHECKS FINISHED WITH ERRORS")
sys.exit(1)
print("CHECKS FINISHED SUCCESSFULLY")
if __name__ == "__main__":
main()
| 14,824
|
Python
|
.py
| 370
| 31.048649
| 80
| 0.611215
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,607
|
test_changeconf.py
|
freeipa_freeipa/ipatests/test_install/test_changeconf.py
|
# Copyright (C) 2018 FreeIPA Contributors see COPYING for license
from __future__ import absolute_import
import pytest
from ipapython.ipachangeconf import IPAChangeConf
@pytest.fixture(scope='function')
def config_filename(tmpdir):
filename = tmpdir.mkdir('data').join('config_file.conf')
filename.write('SOME_CONF /some/user/defined/path\n')
return filename
def test_addifnotset_action(config_filename):
"""Test if addifnotset action adds a comment about the modified conf.
IPA doesn't want to break existing configuration, if a value already exists
it adds a comment to the modified setting and a note about that on the line
above.
New settings will be added without any note.
"""
ipa_conf = IPAChangeConf('IPA Installer Test')
ipa_conf.setOptionAssignment(' ')
opts = [
{
'action': 'addifnotset',
'name': 'SOME_CONF',
'type': 'option',
'value': '/path/defined/by/ipa',
},
{
'action': 'addifnotset',
'name': 'NEW_CONF',
'type': 'option',
'value': '/path/to/somewhere',
},
]
ipa_conf.changeConf(str(config_filename), opts)
assert config_filename.readlines() == [
'# SOME_CONF modified by IPA\n',
'#SOME_CONF /path/defined/by/ipa\n',
'SOME_CONF /some/user/defined/path\n',
'NEW_CONF /path/to/somewhere\n',
]
| 1,444
|
Python
|
.py
| 39
| 30
| 79
| 0.638451
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,608
|
test_install_checks.py
|
freeipa_freeipa/ipatests/test_install/test_install_checks.py
|
# Copyright (C) 2018 FreeIPA Contributors see COPYING for license
from __future__ import absolute_import
import tempfile
import pytest
from ipaclient.install.client import check_ldap_conf
from ipapython.admintool import ScriptError
@pytest.mark.parametrize("lines,expected", [
(["PORT 389"], "PORT"),
(["HOST example.org"], "HOST"),
(["HOST example.org", "# PORT 389"], "HOST"),
(["\tHOST example.org", "# PORT 389"], "HOST"),
(["HOST example.org", "PORT 389"], "HOST, PORT"),
(["# HOST example.org", "# PORT 389"], None),
(["URI PORT"], None),
([], None),
])
def test_check_ldap(lines, expected):
with tempfile.NamedTemporaryFile('w+') as f:
for line in lines:
f.write(line)
f.write('\n')
f.write('\n')
f.flush()
if expected is None:
assert check_ldap_conf(f.name) is True
else:
with pytest.raises(ScriptError) as e:
check_ldap_conf(f.name)
msg = e.value.msg
assert msg.endswith(expected)
| 1,062
|
Python
|
.py
| 30
| 28.7
| 66
| 0.602927
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,609
|
__init__.py
|
freeipa_freeipa/ipatests/test_install/__init__.py
|
# Authors:
# Jason Gerard DeRose <jderose@redhat.com>
#
# Copyright (C) 2008 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Package containing LDAP updates unit tests.
"""
import ipatests.util
ipatests.util.check_ipaclient_unittests()
| 896
|
Python
|
.py
| 23
| 37.826087
| 71
| 0.77931
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,610
|
test_updates.py
|
freeipa_freeipa/ipatests/test_install/test_updates.py
|
# Authors:
# Rob Crittenden <rcritten@redhat.com>
#
# Copyright (C) 2009 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Test the `ipaserver/install/ldapupdate.py` module.
"""
from __future__ import absolute_import
import os
import pytest
from ipalib import api
from ipalib import errors
from ipalib.constants import FQDN
from ipaserver.install.ldapupdate import LDAPUpdate, BadSyntax
from ipapython import ipaldap
from ipaplatform.constants import constants as platformconstants
from ipapython.dn import DN
"""
The updater works through files only so this is just a thin-wrapper controlling
which file we test at any given point.
IMPORTANT NOTE: It is easy for these tests to get out of sync. Any changes
made to the update files may require changes to the test cases as well.
Some cases pull records from LDAP and do comparisons to ensure that updates
have occurred as expected.
The DM password needs to be set in ~/.ipa/.dmpw
"""
@pytest.mark.tier0
@pytest.mark.needs_ipaapi
class TestUpdate:
"""
Test the LDAP updater.
"""
@pytest.fixture(autouse=True)
def update_setup(self, request):
pwfile = api.env.dot_ipa + os.sep + ".dmpw"
if os.path.isfile(pwfile):
with open(pwfile, "r") as fp:
self.dm_password = fp.read().rstrip()
else:
pytest.skip("No directory manager password")
self.updater = LDAPUpdate()
self.ld = ipaldap.LDAPClient.from_hostname_secure(FQDN)
self.ld.simple_bind(bind_dn=ipaldap.DIRMAN_DN,
bind_password=self.dm_password)
self.testdir = os.path.abspath(os.path.dirname(__file__))
if not os.path.isfile(os.path.join(self.testdir,
"0_reset.update")):
pytest.skip("Unable to find test update files")
self.container_dn = DN(self.updater._template_str('cn=test, cn=accounts, $SUFFIX'))
self.user_dn = DN(self.updater._template_str('uid=tuser, cn=test, cn=accounts, $SUFFIX'))
def fin():
if self.ld:
self.ld.unbind()
request.addfinalizer(fin)
def test_0_reset(self):
"""
Reset the updater test data to a known initial state (test_0_reset)
"""
try:
modified = self.updater.update([os.path.join(self.testdir,
"0_reset.update")])
except errors.NotFound:
# Just means the entry doesn't exist yet
modified = True
assert modified
with pytest.raises(errors.NotFound):
self.ld.get_entries(
self.container_dn, self.ld.SCOPE_BASE, 'objectclass=*', ['*'])
with pytest.raises(errors.NotFound):
self.ld.get_entries(
self.user_dn, self.ld.SCOPE_BASE, 'objectclass=*', ['*'])
def test_1_add(self):
"""
Test the updater with an add directive (test_1_add)
"""
modified = self.updater.update([os.path.join(self.testdir,
"1_add.update")])
assert modified
entries = self.ld.get_entries(
self.container_dn, self.ld.SCOPE_BASE, 'objectclass=*', ['*'])
assert len(entries) == 1
entry = entries[0]
objectclasses = entry.get('objectclass')
for item in ('top', 'nsContainer'):
assert item in objectclasses
assert entry.single_value['cn'] == 'test'
entries = self.ld.get_entries(
self.user_dn, self.ld.SCOPE_BASE, 'objectclass=*', ['*'])
assert len(entries) == 1
entry = entries[0]
objectclasses = entry.get('objectclass')
for item in ('top', 'person', 'posixaccount', 'krbprincipalaux', 'inetuser'):
assert item in objectclasses
actual = entry.single_value['loginshell']
assert actual == platformconstants.DEFAULT_ADMIN_SHELL
assert entry.single_value['sn'] == 'User'
assert entry.single_value['uid'] == 'tuser'
assert entry.single_value['cn'] == 'Test User'
def test_2_update(self):
"""
Test the updater when adding an attribute to an existing entry (test_2_update)
"""
modified = self.updater.update([os.path.join(self.testdir,
"2_update.update")])
assert modified
entries = self.ld.get_entries(
self.user_dn, self.ld.SCOPE_BASE, 'objectclass=*', ['*'])
assert len(entries) == 1
entry = entries[0]
assert entry.single_value['gecos'] == 'Test User'
def test_3_update(self):
"""
Test the updater forcing an attribute to a given value (test_3_update)
"""
modified = self.updater.update([os.path.join(self.testdir,
"3_update.update")])
assert modified
entries = self.ld.get_entries(
self.user_dn, self.ld.SCOPE_BASE, 'objectclass=*', ['*'])
assert len(entries) == 1
entry = entries[0]
assert entry.single_value['gecos'] == 'Test User New'
def test_4_update(self):
"""
Test the updater adding a new value to a single-valued attribute (test_4_update)
"""
modified = self.updater.update([os.path.join(self.testdir,
"4_update.update")])
assert modified
entries = self.ld.get_entries(
self.user_dn, self.ld.SCOPE_BASE, 'objectclass=*', ['*'])
assert len(entries) == 1
entry = entries[0]
assert entry.single_value['gecos'] == 'Test User New2'
def test_5_update(self):
"""
Test the updater adding a new value to a multi-valued attribute (test_5_update)
"""
modified = self.updater.update([os.path.join(self.testdir,
"5_update.update")])
assert modified
entries = self.ld.get_entries(
self.user_dn, self.ld.SCOPE_BASE, 'objectclass=*', ['*'])
assert len(entries) == 1
entry = entries[0]
actual = sorted(entry.get('cn'))
expected = sorted(['Test User', 'Test User New'])
assert actual == expected
def test_6_update(self):
"""
Test the updater removing a value from a multi-valued attribute (test_6_update)
"""
modified = self.updater.update([os.path.join(self.testdir,
"6_update.update")])
assert modified
entries = self.ld.get_entries(
self.user_dn, self.ld.SCOPE_BASE, 'objectclass=*', ['*'])
assert len(entries) == 1
entry = entries[0]
assert sorted(entry.get('cn')) == sorted(['Test User'])
def test_6_update_1(self):
"""
Test the updater removing a non-existent value from a multi-valued attribute (test_6_update_1)
"""
modified = self.updater.update([os.path.join(self.testdir,
"6_update.update")])
assert not modified
entries = self.ld.get_entries(
self.user_dn, self.ld.SCOPE_BASE, 'objectclass=*', ['*'])
assert len(entries) == 1
entry = entries[0]
assert sorted(entry.get('cn')) == sorted(['Test User'])
def test_7_cleanup(self):
"""
Reset the test data to a known initial state (test_7_cleanup)
"""
try:
modified = self.updater.update([os.path.join(self.testdir,
"0_reset.update")])
except errors.NotFound:
# Just means the entry doesn't exist yet
modified = True
assert modified
with pytest.raises(errors.NotFound):
self.ld.get_entries(
self.container_dn, self.ld.SCOPE_BASE, 'objectclass=*', ['*'])
with pytest.raises(errors.NotFound):
self.ld.get_entries(
self.user_dn, self.ld.SCOPE_BASE, 'objectclass=*', ['*'])
def test_8_badsyntax(self):
"""
Test the updater with an unknown keyword (test_8_badsyntax)
"""
with pytest.raises(BadSyntax):
self.updater.update(
[os.path.join(self.testdir, "8_badsyntax.update")])
def test_9_badsyntax(self):
"""
Test the updater with an incomplete line (test_9_badsyntax)
"""
with pytest.raises(BadSyntax):
self.updater.update(
[os.path.join(self.testdir, "9_badsyntax.update")])
| 9,375
|
Python
|
.py
| 217
| 32.930876
| 102
| 0.591114
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,611
|
version.py.in
|
freeipa_freeipa/ipapython/version.py.in
|
# Authors: Rob Crittenden <rcritten@redhat.com>
#
# Copyright (C) 2007 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from pkg_resources import parse_version
# The full version including strings
VERSION = "@VERSION@"
# A fuller version including the vendor tag (e.g. 3.3.3-34.fc20)
VENDOR_VERSION = "@VERSION@@VENDOR_SUFFIX@"
# Just the numeric portion of the version so one can do direct numeric
# comparisons to see if the API is compatible.
#
# How NUM_VERSION was generated changed over time:
# Before IPA 3.1.3, it was simply concatenated decimal numbers:
# IPA 2.2.2: NUM_VERSION=222
# IPA 2.2.99: NUM_VERSION=2299 (development version)
# IPA 3.1.0: NUM_VERSION=310
# IPA 3.1.3: NUM_VERSION=313
# In IPA 3.1.4 and 3.2.0, the version was taken as an octal number due to a bug
# (https://fedorahosted.org/freeipa/ticket/3622):
# IPA 3.1.4: NUM_VERSION=12356 (octal 030104)
# IPA 3.2.0: NUM_VERSION=12416 (octal 030200)
# After IPA 3.2.0, it is decimal number where each part has two digits:
# IPA 3.2.1: NUM_VERSION=30201
# IPA 3.2.99: NUM_VERSION=30299 (development version)
# IPA 3.3.0: NUM_VERSION=30300
NUM_VERSION = @NUM_VERSION@
# The version of the API.
API_VERSION = "@API_VERSION@"
DEFAULT_PLUGINS = frozenset(l.strip() for l in """
@DEFAULT_PLUGINS@
""".strip().splitlines())
KRB5_BUILD_VERSION = parse_version("@KRB5_BUILD_VERSION@")
| 2,038
|
Python
|
.py
| 47
| 42.148936
| 79
| 0.745078
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,612
|
ipachangeconf.py
|
freeipa_freeipa/ipapython/ipachangeconf.py
|
#
# ipachangeconf - configuration file manipulation classes and functions
# partially based on authconfig code
# Copyright (c) 1999-2007 Red Hat, Inc.
# Author: Simo Sorce <ssorce@redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import fcntl
import logging
import os
import shutil
import six
if six.PY3:
unicode = str
logger = logging.getLogger(__name__)
def openLocked(filename, perms):
fd = -1
try:
fd = os.open(filename, os.O_RDWR | os.O_CREAT, perms)
fcntl.lockf(fd, fcntl.LOCK_EX)
except OSError as e:
if fd != -1:
try:
os.close(fd)
except OSError:
pass
raise IOError(e.errno, e.strerror)
return os.fdopen(fd, "r+")
# TODO: add subsection as a concept
# (ex. REALM.NAME = { foo = x bar = y } )
# TODO: put section delimiters as separating element of the list
# so that we can process multiple sections in one go
# TODO: add a comment all but provided options as a section option
class IPAChangeConf:
def __init__(self, name):
self.progname = name
self.indent = ("", "", "")
self.assign = (" = ", "=")
self.dassign = self.assign[0]
self.comment = ("#",)
self.dcomment = self.comment[0]
self.eol = ("\n",)
self.deol = self.eol[0]
self.sectnamdel = ("[", "]")
self.subsectdel = ("{", "}")
self.case_insensitive_sections = True
def setProgName(self, name):
self.progname = name
def setIndent(self, indent):
if type(indent) is tuple:
self.indent = indent
elif type(indent) is str:
self.indent = (indent, )
else:
raise ValueError('Indent must be a list of strings')
def setOptionAssignment(self, assign):
if type(assign) is tuple:
self.assign = assign
else:
self.assign = (assign, )
self.dassign = self.assign[0]
def setCommentPrefix(self, comment):
if type(comment) is tuple:
self.comment = comment
else:
self.comment = (comment, )
self.dcomment = self.comment[0]
def setEndLine(self, eol):
if type(eol) is tuple:
self.eol = eol
else:
self.eol = (eol, )
self.deol = self.eol[0]
def setSectionNameDelimiters(self, delims):
self.sectnamdel = delims
def setSubSectionDelimiters(self, delims):
self.subsectdel = delims
def matchComment(self, line):
for v in self.comment:
if line.lstrip().startswith(v):
return line.lstrip()[len(v):]
return False
def matchEmpty(self, line):
if line.strip() == "":
return True
return False
def matchSection(self, line):
cl = "".join(line.strip().split())
cl = cl.lower() if self.case_insensitive_sections else cl
if len(self.sectnamdel) != 2:
return False
if not cl.startswith(self.sectnamdel[0]):
return False
if not cl.endswith(self.sectnamdel[1]):
return False
return cl[len(self.sectnamdel[0]):-len(self.sectnamdel[1])]
def matchSubSection(self, line):
if self.matchComment(line):
return False
parts = line.split(self.dassign, 1)
if len(parts) < 2:
return False
if parts[1].strip() == self.subsectdel[0]:
return parts[0].strip()
return False
def matchSubSectionEnd(self, line):
if self.matchComment(line):
return False
if line.strip() == self.subsectdel[1]:
return True
return False
def getSectionLine(self, section):
if len(self.sectnamdel) != 2:
return section
return self._dump_line(self.sectnamdel[0],
section,
self.sectnamdel[1],
self.deol)
def _dump_line(self, *args):
return u"".join(unicode(x) for x in args)
def dump(self, options, level=0):
output = []
if level >= len(self.indent):
level = len(self.indent) - 1
for o in options:
if o['type'] == "section":
output.append(self._dump_line(self.sectnamdel[0],
o['name'],
self.sectnamdel[1]))
output.append(self.dump(o['value'], (level + 1)))
continue
if o['type'] == "subsection":
output.append(self._dump_line(self.indent[level],
o['name'],
self.dassign,
self.subsectdel[0]))
output.append(self.dump(o['value'], (level + 1)))
output.append(self._dump_line(self.indent[level],
self.subsectdel[1]))
continue
if o['type'] == "option":
delim = o.get('delim', self.dassign)
if delim not in self.assign:
raise ValueError(
'Unknown delim "%s" must be one of "%s"' %
(delim, " ".join(list(self.assign)))
)
output.append(self._dump_line(self.indent[level],
o['name'],
delim,
o['value']))
continue
if o['type'] == "comment":
output.append(self._dump_line(self.dcomment, o['value']))
continue
if o['type'] == "empty":
output.append('')
continue
raise SyntaxError('Unknown type: [%s]' % o['type'])
# append an empty string to the output so that we add eol to the end
# of the file contents in a single join()
output.append('')
return self.deol.join(output)
def parseLine(self, line):
if self.matchEmpty(line):
return {'name': 'empty', 'type': 'empty'}
value = self.matchComment(line)
if value:
return {'name': 'comment',
'type': 'comment',
'value': value.rstrip()}
o = dict()
parts = line.split(self.dassign, 1)
if len(parts) < 2:
# The default assign didn't match, try the non-default
for d in self.assign[1:]:
parts = line.split(d, 1)
if len(parts) >= 2:
o['delim'] = d
break
if 'delim' not in o:
raise SyntaxError('Syntax Error: Unknown line format')
o.update({'name': parts[0].strip(), 'type': 'option',
'value': parts[1].rstrip()})
return o
def findOpts(self, opts, type, name, exclude_sections=False):
num = 0
for o in opts:
if o['type'] == type and o['name'] == name:
return (num, o)
if exclude_sections and (o['type'] == "section" or
o['type'] == "subsection"):
return (num, None)
num += 1
return (num, None)
def commentOpts(self, inopts, level=0):
opts = []
if level >= len(self.indent):
level = len(self.indent) - 1
for o in inopts:
if o['type'] == 'section':
no = self.commentOpts(o['value'], (level + 1))
val = self._dump_line(self.dcomment,
self.sectnamdel[0],
o['name'],
self.sectnamdel[1])
opts.append({'name': 'comment',
'type': 'comment',
'value': val})
for n in no:
opts.append(n)
continue
if o['type'] == 'subsection':
no = self.commentOpts(o['value'], (level + 1))
val = self._dump_line(self.indent[level],
o['name'],
self.dassign,
self.subsectdel[0])
opts.append({'name': 'comment',
'type': 'comment',
'value': val})
opts.extend(no)
val = self._dump_line(self.indent[level], self.subsectdel[1])
opts.append({'name': 'comment',
'type': 'comment',
'value': val})
continue
if o['type'] == 'option':
delim = o.get('delim', self.dassign)
if delim not in self.assign:
val = self._dump_line(self.indent[level],
o['name'],
delim,
o['value'])
opts.append({'name': 'comment', 'type': 'comment',
'value': val})
continue
if o['type'] == 'comment':
opts.append(o)
continue
if o['type'] == 'empty':
opts.append({'name': 'comment',
'type': 'comment',
'value': ''})
continue
raise SyntaxError('Unknown type: [%s]' % o['type'])
return opts
def mergeOld(self, oldopts, newopts):
opts = []
for o in oldopts:
if o['type'] == "section" or o['type'] == "subsection":
_num, no = self.findOpts(newopts, o['type'], o['name'])
if not no:
opts.append(o)
continue
if no['action'] == "set":
mo = self.mergeOld(o['value'], no['value'])
opts.append({'name': o['name'],
'type': o['type'],
'value': mo})
continue
if no['action'] == "comment":
co = self.commentOpts(o['value'])
for c in co:
opts.append(c)
continue
if no['action'] == "remove":
continue
raise SyntaxError('Unknown action: [%s]' % no['action'])
if o['type'] == "comment" or o['type'] == "empty":
opts.append(o)
continue
if o['type'] == "option":
_num, no = self.findOpts(newopts, 'option', o['name'], True)
if not no:
opts.append(o)
continue
if no['action'] == 'comment' or no['action'] == 'remove':
if (no['value'] is not None and
o['value'] is not no['value']):
opts.append(o)
continue
if no['action'] == 'comment':
value = self._dump_line(self.dcomment,
o['name'],
self.dassign,
o['value'])
opts.append({'name': 'comment',
'type': 'comment',
'value': value})
continue
if no['action'] == 'set':
opts.append(no)
continue
if no['action'] == 'addifnotset':
opts.append({
'name': 'comment',
'type': 'comment',
'value': self._dump_line(
' ', no['name'], ' modified by IPA'
),
})
opts.append({'name': 'comment', 'type': 'comment',
'value': self._dump_line(no['name'],
self.dassign,
no['value'],
)})
opts.append(o)
continue
raise SyntaxError('Unknown action: [%s]' % no['action'])
raise SyntaxError('Unknown type: [%s]' % o['type'])
return opts
def mergeNew(self, opts, newopts):
cline = 0
for no in newopts:
if no['type'] == "section" or no['type'] == "subsection":
(num, o) = self.findOpts(opts, no['type'], no['name'])
if not o:
if no['action'] == 'set':
opts.append(no)
continue
if no['action'] == "set":
self.mergeNew(o['value'], no['value'])
continue
cline = num + 1
continue
if no['type'] == "option":
(num, o) = self.findOpts(opts, no['type'], no['name'], True)
if not o:
if no['action'] == 'set' or no['action'] == 'addifnotset':
opts.append(no)
continue
cline = num + 1
continue
if no['type'] == "comment" or no['type'] == "empty":
opts.insert(cline, no)
cline += 1
continue
raise SyntaxError('Unknown type: [%s]' % no['type'])
def merge(self, oldopts, newopts):
"""
Uses a two pass strategy:
First we create a new opts tree from oldopts removing/commenting
the options as indicated by the contents of newopts
Second we fill in the new opts tree with options as indicated
in the newopts tree (this is becaus eentire (sub)sections may
in the newopts tree (this is becaus entire (sub)sections may
exist in the newopts that do not exist in oldopts)
"""
opts = self.mergeOld(oldopts, newopts)
self.mergeNew(opts, newopts)
return opts
# TODO: Make parse() recursive?
def parse(self, f):
opts = []
sectopts = []
section = None
subsectopts = []
subsection = None
curopts = opts
fatheropts = opts
# Read in the old file.
for line in f:
# It's a section start.
value = self.matchSection(line)
if value:
if section is not None:
opts.append({'name': section,
'type': 'section',
'value': sectopts})
sectopts = []
curopts = sectopts
fatheropts = sectopts
section = value
continue
# It's a subsection start.
value = self.matchSubSection(line)
if value:
if subsection is not None:
raise SyntaxError('nested subsections are not '
'supported yet')
subsectopts = []
curopts = subsectopts
subsection = value
continue
value = self.matchSubSectionEnd(line)
if value:
if subsection is None:
raise SyntaxError('Unmatched end subsection terminator '
'found')
fatheropts.append({'name': subsection,
'type': 'subsection',
'value': subsectopts})
subsection = None
curopts = fatheropts
continue
# Copy anything else as is.
try:
curopts.append(self.parseLine(line))
except SyntaxError as e:
raise SyntaxError('{error} in file {fname}: [{line}]'.format(
error=e, fname=f.name, line=line.rstrip()))
# Add last section if any
if len(sectopts) != 0:
opts.append({'name': section,
'type': 'section',
'value': sectopts})
return opts
def changeConf(self, file, newopts):
"""
Write settings to configuration file
:param file: path to the file
:param options: set of dictionaries in the form:
{'name': 'foo', 'value': 'bar', 'action': 'set/comment'}
:param section: section name like 'global'
"""
output = ""
f = None
try:
# Do not catch an unexisting file error
# we want to fail in that case
shutil.copy2(file, (file + ".ipabkp"))
f = openLocked(file, 0o644)
oldopts = self.parse(f)
options = self.merge(oldopts, newopts)
output = self.dump(options)
# Write it out and close it.
f.seek(0)
f.truncate(0)
f.write(output)
finally:
try:
if f:
f.close()
except IOError:
pass
logger.debug("Updating configuration file %s", file)
logger.debug(output)
return True
def newConf(self, file, options, file_perms=0o644):
""""
Write settings to a new file, backup the old
:param file: path to the file
:param options: a set of dictionaries in the form:
{'name': 'foo', 'value': 'bar', 'action': 'set/comment'}
:param file_perms: number defining the new file's permissions
"""
output = ""
f = None
try:
try:
shutil.copy2(file, (file + ".ipabkp"))
except IOError as err:
if err.errno == 2:
# The orign file did not exist
pass
f = openLocked(file, file_perms)
# Trunkate
f.seek(0)
f.truncate(0)
output = self.dump(options)
f.write(output)
finally:
try:
if f:
f.close()
except IOError:
pass
logger.debug("Writing configuration file %s", file)
logger.debug(output)
return True
@staticmethod
def setOption(name, value):
return {'name': name,
'type': 'option',
'action': 'set',
'value': value}
@staticmethod
def rmOption(name):
return {'name': name,
'type': 'option',
'action': 'remove',
'value': None}
@staticmethod
def setSection(name, options):
return {'name': name,
'type': 'section',
'action': 'set',
'value': options}
@staticmethod
def emptyLine():
return {'name': 'empty',
'type': 'empty'}
| 20,086
|
Python
|
.py
| 505
| 24.522772
| 78
| 0.458043
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,613
|
errors.py
|
freeipa_freeipa/ipapython/errors.py
|
# Authors: Petr Viktorin <pviktori@redhat.com>
#
# Copyright (C) 2014 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
class SetseboolError(Exception):
"""Raised when setting a SELinux boolean fails
:param failed: Dictionary mapping boolean names to intended values
to their intended values, for booleans that cound not be set
:param command: Command the user can run to set the booleans
The initializer arguments are copied to attributes of the same name.
"""
def __init__(self, failed, command):
message = "Could not set SELinux booleans: %s" % ' '.join(
'%s=%s' % (name, value) for name, value in failed.items())
super(SetseboolError, self).__init__(message)
self.failed = failed
self.command = command
def format_service_warning(self, service_name):
"""Format warning for display when this is raised from service install
"""
return '\n'.join([
'WARNING: %(err)s',
'',
'The %(service)s may not function correctly until ',
'the booleans are successfully changed with the command:',
' %(cmd)s',
'Try updating the policycoreutils and selinux-policy packages.'
]) % {'err': self, 'service': service_name, 'cmd': self.command}
| 1,984
|
Python
|
.py
| 42
| 41.47619
| 79
| 0.683531
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,614
|
dn.py
|
freeipa_freeipa/ipapython/dn.py
|
# Authors:
# John Dennis <jdennis@redhat.com>
#
# Copyright (C) 2011 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r'''
Goal
----
To allow a Python programmer the ability to operate on DN's
(Distinguished Names) in a simple intuitive manner supporting all the
Pythonic mechanisms for manipulating objects such that the simple
majority case remains simple with simple code, yet the corner cases
are fully supported. With the result both simple and complex cases are
100% correct.
This is achieved with a fair of amount of syntax sugar which is best
described as "Do What I Mean" (i.e. DWIM). The class implementations
take simple expressions and internally convert them to their more
complex full definitions hiding much of the complexity from the
programmer.
Anatomy of a DN
---------------
Some definitions:
AVA
An AVA is an Attribute Value Assertion. In more simple terms it's
an attribute value pair typically expressed as attr=value
(e.g. cn=Bob). Both the attr and value in an AVA when expressed in
a string representation are subject to encoding rules.
RDN
A RDN is a Relative Distinguished Name. A RDN is a non-empty set of
AVA's. In the common case a RDN is single valued consisting of 1
AVA (e.g. cn=Bob). But a RDN may be multi-valued consisting of
more than one AVA. Because the RDN is a set of AVA's the AVA's are
unordered when they appear in a multi-valued RDN. In the string
representation of a RDN AVA's are separated by the plus sign (+).
DN
A DN is a ordered sequence of 1 or more RDN's. In the string
representation of a DN each RDN is separated by a comma (,)
Thus a DN is:
Sequence of set of <encoded attr, encoded value> pairs
The following are valid DN's
# 1 RDN with 1 AVA (e.g. cn=Bob)
RDN(AVA)
# 2 RDN's each with 1 AVA (e.g. cn=Bob,dc=redhat.com)
RDN(AVA),RDN(AVA)
# 2 RDN's the first RDN is multi-valued with 2 AVA's
# the second RDN is singled valued with 1 AVA
# (e.g. cn=Bob+ou=people,dc=redhat.com
RDN({AVA,AVA}),RDN(AVA)
Common programming mistakes
---------------------------
DN's present a pernicious problem for programmers. They appear to have
a very simple string format in the majority case, a sequence of
attr=value pairs separated by commas. For example:
dn='cn=Bob,ou=people,dc=redhat,dc=com'
As such there is a tendency to believe you can form DN's by simple
string manipulations such as:
dn='%s=%s' % ('cn','Bob') + ',ou=people,dc=redhat,dc=com'
Or to extract a attr & value by searching the string, for example:
attr=dn[0 : dn.find('=')]
value=dn[dn.find('=')+1 : dn.find(',')]
Or compare a value returned by an LDAP query to a known value:
if value == 'Bob'
All of these simple coding assumptions are WRONG and will FAIL when a
DN is not one of the simple DN's (simple DN's are probably the 95% of
all DN's). This is what makes DN handling pernicious. What works in
95% of the cases and is simple, fails for the 5% of DN's which are not
simple.
Examples of where the simple assumptions fail are:
* A RDN may be multi-valued
* A multi-valued RDN has no ordering on it's components
* Attr's and values must be UTF-8 encoded
* String representations of AVA's, RDN's and DN's must be completely UTF-8
* An attr or value may have reserved characters which must be escaped.
* Whitespace needs special handling
To complicate matters a bit more the RFC for the string representation
of DN's (RFC 4514) permits a variety of different syntax's each of
which can evaluate to exactly the same DN but have different string
representations. For example, the attr "r,w" which contains a reserved
character (the comma) can be encoded as a string in these different
ways:
'r\,w' # backslash escape
'r\2cw' # hexadecimal ascii escape
'#722C77' # binary encoded
It should be clear a DN string may NOT be a simple string, rather a DN
string is ENCODED. For simple strings the encoding of the DN is
identical to the simple string value (this common case leads to
erroneous assumptions and bugs because it does not account for
encodings).
The openldap library we use at the client level uses the backslash
escape form. The LDAP server we use uses the hexadecimal ascii escape
form. Thus 'r,w' appears as 'r\,w' when sent from the client to the
LDAP server as part of a DN. But when it's returned as a DN from the
server in an LDAP search it's returned as 'r\2cw'. Any attempt to
compare 'r\,w' to 'r\2cw' for equality will fail despite the fact they
are indeed equal once decoded. Such a test fails because you're
comparing two different encodings of the same value. In MIME you
wouldn't expect the base64 encoding of a string to be equal to the
same string encoded as quoted-printable would you?
When you are comparing attrs or values which are part of a DN and
other string you MUST:
* Know if either of the strings have been encoded and make sure you're
comparing only decoded components component-wise.
* Extract the component from the DN and decode it. You CANNOT decode
the entire DN as a string and operate on it. Why? Consider a value
with a comma embedded in it. For example:
cn=r\2cw,cn=privilege
Is a DN with 2 RDN components: cn=r,w followed by "cn=privilege"
But if you decode the entire DN string as a whole you would get:
cn=r,w,cn=privilege
Which is a malformed DN with 3 RDN's, the 2nd RDN is invalid.
* Determine if a RDN is multi-valued, if so you must account
for the fact each AVA component in the multi-valued RDN can appear
in any order and still be equivalent. For example the following two
RDN's are equal:
cn=Bob+ou=people
ou=people+cn=Bob
In addition each AVA (cn=Bob & ou=people) needs to be
INDEPENDENTLY decoded prior to comparing the unordered set of AVA's
in the multi-valued RDN.
If you are trying to form a new DN or RDN from a raw string you cannot
simply do string concatenation or string formatting unless you ESCAPE
the components independently prior to concatenation, for example:
base = 'dc=redhat,dc=com'
value = 'r,w'
dn = 'cn=%s,%s' % (value, base)
Will result in the malformed DN 'cn=r,w,dc=redhat,dc=com'
Syntax Sugar
------------
The majority of DN's have a simple string form:
attr=value,attr=value
We want the programmer to be able to create DN's, compare them, and
operate on their components as simply and concisely as possible so
the classes are implemented to provide a lot of syntax sugar.
The classes automatically handle UTF-8 <-> Unicode conversions. Every
attr and value which is returned from a class will be Unicode. Every
attr and value assigned into an object will be promoted to
Unicode. All string representations in RFC 4514 format will be UTF-8
and properly escaped. Thus at the "user" or "API" level every string
is Unicode with the single exception that the str() method returns RFC
compliant escaped UTF-8.
RDN's are assumed to be single-valued. If you need a multi-valued RDN
(an exception) you must explicitly create a multi-valued RDN.
Thus DN's are assumed to be a sequence of attr, value pairs, which is
equivalent to a sequence of RDN's. The attr and value in the pair MUST
be strings.
The DN and RDN constructors take a sequence, the constructor parses
the sequence to find items it knows about.
The DN constructor will accept in it's sequence:
* tuple of 2 strings, converting it to an RDN
* list of 2 strings, converting it to an RDN
* a RDN object
* a DN syntax string (e.g. 'cn=Bob,dc=redhat.com')
Note DN syntax strings should be avoided if possible when passing to a
constructor because they run afoul of the problems outlined above
which the DN, RDN & AVA classes are meant to overcome. But sometimes a
DN syntax string is all you have to work with. DN strings which come
from a LDAP library or server will be properly formed and it's safe to
use those. However DN strings provided via user input should be
treated suspiciously as they may be improperly formed. You can test
for this by passing the string to the DN constructor and see if it
throws an exception.
The sequence passed to the DN constructor takes each item in order,
produces one or more RDN's from it and appends those RDN in order to
its internal RDN sequence.
For example:
DN(('cn', 'Bob'), ('dc', 'redhat.com'))
This is equivalent to the DN string:
cn=Bob,dc=redhat.com
And is exactly equal to:
DN(RDN(AVA('cn','Bob')),RDN(AVA('dc','redhat.com')))
The following are alternative syntax's which are all exactly
equivalent to the above example.
DN(['cn', 'Bob'], ['dc', 'redhat.com'])
DN(RDN('cn', 'Bob'), RDN('dc', 'redhat.com'))
You can provide a properly escaped string representation.
DN('cn=Bob,dc=redhat.com')
You can mix and match any of the forms in the constructor parameter
list.
DN(('cn', 'Bob'), 'dc=redhat.com')
DN(('cn', 'Bob'), RDN('dc', 'redhat.com'))
AVA's have an attr and value property, thus if you have an AVA
# Get the attr and value
ava.attr -> u'cn'
ava.value -> u'Bob'
# Set the attr and value
ava.attr = 'cn'
ava.value = 'Bob'
Since RDN's are assumed to be single valued, exactly the same
behavior applies to an RDN. If the RDN is multi-valued then the attr
property returns the attr of the first AVA, likewise for the value.
# Get the attr and value
rdn.attr -> u'cn'
rdn.value -> u'Bob'
# Set the attr and value
rdn.attr = 'cn'
rdn.value = 'Bob'
Also RDN's can be indexed by name or position (see the RDN class doc
for details).
rdn['cn'] -> u'Bob'
rdn[0] -> AVA('cn', 'Bob')
A DN is a sequence of RDN's, as such any of Python's container
operators can be applied to a DN in a intuitive way.
# How many RDN's in a DN?
len(dn)
# WARNING, this a count of RDN's not how characters there are in the
# string representation the dn, instead that would be:
len(str(dn))
# Iterate over each RDN in a DN
for rdn in dn:
# Get the first RDN in a DN
dn[0] -> RDN('cn', 'Bob')
# Get the value of the first RDN in a DN
dn[0].value -> u'Bob'
# Get the value of the first RDN by indexing by attr name
dn['cn'] -> u'Bob'
# WARNING, when a string is used as an index key the FIRST RDN's value
# in the sequence whose attr matches the key is returned. Thus if you
# have a DN like this "cn=foo,cn=bar" then dn['cn'] will always return
# 'foo' even though there is another attr with the name 'cn'. This is
# almost always what the programmer wants. See the class doc for how
# you can override this default behavior and get a list of every value
# whose attr matches the key.
# Set the first RDN in the DN (all are equivalent)
dn[0] = ('cn', 'Bob')
dn[0] = ['cn', 'Bob']
dn[0] = RDN('cn', 'Bob')
dn[0].attr = 'cn'
dn[0].value = 'Bob'
# Get the first two RDN's using slices
dn[0:2]
# Get the last two RDN's using slices
dn[-2:]
# Get a list of all RDN's using slices
dn[:]
# Set the 2nd and 3rd RDN using slices (all are equivalent)
dn[1:3] = ('cn', 'Bob), ('dc', 'redhat.com')
dn[1:3] = RDN('cn', 'Bob), RDN('dc', 'redhat.com')
String representations and escapes:
# To get an RFC compliant string representation of a DN, RDN or AVA
# simply call str() on it or evaluate it in a string context.
str(dn) -> 'cn=Bob,dc=redhat.com'
# When working with attr's and values you do not have to worry about
# escapes, simply use the raw unescaped string in a natural fashion.
rdn = RDN('cn', 'r,w')
# Thus:
rdn.value == 'r,w' -> True
# But:
str(rdn) == 'cn=r,w' -> False
# Because:
str(rdn) -> 'cn=r\2cw' or 'cn='r\,w' # depending on the underlying LDAP library
Equality and Comparing:
# All DN's, RDN's and AVA's support equality testing in an intuitive
# manner.
dn1 = DN(('cn', 'Bob'))
dn2 = DN(RDN('cn', 'Bob'))
dn1 == dn2 -> True
dn1[0] == dn2[0] -> True
dn1[0].value = 'Bobby'
dn1 == dn2 -> False
DN objects implement startswith(), endswith() and the "in" membership
operator. You may pass a DN or RDN object to these. Examples:
if dn.endswith(base_dn):
if dn.startswith(rdn1):
if container_dn in dn:
# See the class doc for how DN's, RDN's and AVA's compare
# (e.g. cmp()). The general rule is for objects supporting multiple
# values first their lengths are compared, then if the lengths match
# the respective components of each are pair-wise compared until one
# is discovered to be non-equal. The comparison is case insensitive.
Concatenation, In-Place Addition, Insertion:
# DN's and RDN's can be concatenated.
# Return a new DN by appending the RDN's of dn2 to dn1
dn3 = dn1 + dn2
# Append a RDN to DN's RDN sequence (all are equivalent)
dn += ('cn', 'Bob')
dn += RDN('cn', 'Bob')
# Append a DN to an existing DN
dn1 += dn2
# Prepend a RDN to an existing DN
dn1.insert(0, RDN('cn', 'Bob'))
Finally see the unittest for a more complete set of ways you can
manipulate these objects.
Immutability
------------
All the class types are immutable.
As with other immutable types (such as str and int), you must not rely on
the object identity operator ("is") for comparisons.
It is possible to "copy" an object by passing an object of the same type
to the constructor. The result may share underlying structure.
'''
from __future__ import print_function
import sys
import functools
import cryptography.x509
import six
try:
from ldap import DECODING_ERROR
except ImportError:
from ipapython.dn_ctypes import str2dn, dn2str, DECODING_ERROR
else:
from ldap.dn import str2dn, dn2str
if six.PY3:
unicode = str
__all__ = 'AVA', 'RDN', 'DN'
def _adjust_indices(start, end, length):
'helper to fixup start/end slice values'
if end > length:
end = length
elif end < 0:
end += length
if end < 0:
end = 0
if start < 0:
start += length
if start < 0:
start = 0
return start, end
def _normalize_ava_input(val):
if six.PY3 and isinstance(val, bytes):
raise TypeError('expected str, got bytes: %r' % val)
elif not isinstance(val, str):
val = val_encode(str(val))
elif six.PY2 and isinstance(val, unicode):
val = val.encode('utf-8')
return val
def str2rdn(value):
try:
rdns = str2dn(value.encode('utf-8'))
except DECODING_ERROR:
raise ValueError("malformed AVA string = \"%s\"" % value)
if len(rdns) != 1:
raise ValueError("multiple RDN's specified by \"%s\"" % (value))
return rdns[0]
def get_ava(*args):
"""
Get AVA from args in open ldap format(raw). Optimized for construction
from openldap format.
Allowed formats of argument list:
1) three args - open ldap format (attr and value have to be utf-8 encoded):
a) ['attr', 'value', 0]
2) two args:
a) ['attr', 'value']
3) one arg:
a) [('attr', 'value')]
b) [['attr', 'value']]
c) [AVA(..)]
d) ['attr=value']
"""
ava = None
l = len(args)
if l == 3: # raw values - constructed FROM RDN
ava = args
elif l == 2: # user defined values
ava = [_normalize_ava_input(args[0]), _normalize_ava_input(args[1]), 0]
elif l == 1: # slow mode, tuple, string,
arg = args[0]
if isinstance(arg, AVA):
ava = arg.to_openldap()
elif isinstance(arg, (tuple, list)):
if len(arg) != 2:
raise ValueError("tuple or list must be 2-valued, not \"%s\"" % (arg))
ava = [_normalize_ava_input(arg[0]), _normalize_ava_input(arg[1]), 0]
elif isinstance(arg, str):
rdn = str2rdn(arg)
if len(rdn) > 1:
raise TypeError("multiple AVA's specified by \"%s\"" % (arg))
ava = list(rdn[0])
else:
raise TypeError("with 1 argument, argument must be str, unicode, tuple or list, got %s instead" %
arg.__class__.__name__)
else:
raise TypeError("invalid number of arguments. 1-3 allowed")
return ava
def sort_avas(rdn):
if len(rdn) <= 1:
return
rdn.sort(key=ava_key)
def ava_key(ava):
return ava[0].lower(), ava[1].lower()
def cmp_rdns(a, b):
key_a = rdn_key(a)
key_b = rdn_key(b)
if key_a == key_b:
return 0
elif key_a < key_b:
return -1
else:
return 1
def rdn_key(rdn):
return (len(rdn),) + tuple(ava_key(k) for k in rdn)
if six.PY2:
# Python 2: Input/output is unicode; we store UTF-8 bytes
def val_encode(s):
return s.encode('utf-8')
def val_decode(s):
return s.decode('utf-8')
else:
# Python 3: Everything is unicode (str)
def val_encode(s):
if isinstance(s, bytes):
raise TypeError('expected str, got bytes: %s' % s)
return s
def val_decode(s):
return s
@functools.total_ordering
class AVA:
'''
AVA(arg0, ...)
An AVA is an LDAP Attribute Value Assertion. It is convenient to think of
AVA's as a <attr,value> pair. AVA's are members of RDN's (Relative
Distinguished Name).
The AVA constructor is passed a sequence of args and a set of
keyword parameters used for configuration.
The arg sequence may be:
1) With 2 arguments, the first argument will be the attr, the 2nd
the value. Each argument must be scalar convertable to unicode.
2) With a sigle list or tuple argument containing exactly 2 items.
Each item must be scalar convertable to unicode.
3) With a single string (or unicode) argument, in this case the string will
be interpretted using the DN syntax described in RFC 4514 to yield a AVA
<attr,value> pair. The parsing recognizes the DN syntax escaping rules.
For example:
ava = AVA('cn', 'Bob') # case 1: two strings
ava = AVA(('cn', 'Bob')) # case 2: 2-valued tuple
ava = AVA(['cn', 'Bob']) # case 2: 2-valued list
ava = AVA('cn=Bob') # case 3: DN syntax
AVA object have two properties for accessing their data:
attr: the attribute name, cn in our exmaple
value: the attribute's value, Bob in our example
When attr and value are returned they will always be unicode. When
attr or value are set they will be promoted to unicode.
AVA objects support indexing by name, e.g.
ava['cn']
returns the value (Bob in our example). If the index does key does not match
the attr then a KeyError will be raised.
AVA objects support equality testing and comparsion (e.g. cmp()). When they
are compared the attr is compared first, if the 2 attr's are equal then the
values are compared. The comparison is case insensitive (because attr's map
to numeric OID's and their values derive from from the 'name' atribute type
(OID 2.5.4.41) whose EQUALITY MATCH RULE is caseIgnoreMatch.
The str method of an AVA returns the string representation in RFC 4514 DN
syntax with proper escaping.
'''
def __init__(self, *args):
self._ava = get_ava(*args)
def _get_attr(self):
return val_decode(self._ava[0])
def _set_attr(self, new_attr):
try:
self._ava[0] = _normalize_ava_input(new_attr)
except Exception as e:
raise ValueError('unable to convert attr "%s": %s' % (new_attr, e))
attr = property(_get_attr)
def _get_value(self):
return val_decode(self._ava[1])
def _set_value(self, new_value):
try:
self._ava[1] = _normalize_ava_input(new_value)
except Exception as e:
raise ValueError('unable to convert value "%s": %s' % (new_value, e))
value = property(_get_value)
def to_openldap(self):
return list(self._ava)
def __str__(self):
return dn2str([[self.to_openldap()]])
def __repr__(self):
return "%s.%s('%s')" % (self.__module__, self.__class__.__name__, self.__str__())
def __getitem__(self, key):
if key == 0:
return self.attr
elif key == 1:
return self.value
elif key == self.attr:
return self.value
else:
raise KeyError("\"%s\" not found in %s" % (key, self.__str__()))
def __hash__(self):
# Hash is computed from AVA's string representation.
#
# Because attrs & values are comparison case-insensitive the
# hash value between two objects which compare as equal but
# differ in case must yield the same hash value.
return hash(str(self).lower())
def __eq__(self, other):
'''
The attr comparison is case insensitive because attr is
really an LDAP attribute type which means it's specified with
an OID (dotted number) and not a string. Since OID's are
numeric the human readable name which maps to the OID is not
significant in case.
The value comparison is also case insensitive because the all
attribute types used in a DN are derived from the 'name'
atribute type (OID 2.5.4.41) whose EQUALITY MATCH RULE is
caseIgnoreMatch.
'''
# Try coercing string to AVA, if successful compare to coerced object
if isinstance(other, str):
try:
other_ava = AVA(other)
return self.__eq__(other_ava)
except Exception:
return False
# If it's not an AVA it can't be equal
if not isinstance(other, AVA):
return False
# Perform comparison between objects of same type
return ava_key(self._ava) == ava_key(other._ava)
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
'comparison is case insensitive, see __eq__ doc for explanation'
if not isinstance(other, AVA):
raise TypeError("expected AVA but got %s" % (other.__class__.__name__))
return ava_key(self._ava) < ava_key(other._ava)
@functools.total_ordering
class RDN:
'''
RDN(arg0, ...)
An RDN is a LDAP Relative Distinguished Name. RDN's are members of DN's
(Distinguished Name). An RDN contains 1 or more AVA's. If the RDN contains
more than one AVA it is said to be a multi-valued RDN. When an RDN is
multi-valued the AVA's are unorderd comprising a set. However this
implementation orders the AVA's according to the AVA comparison function to
make equality and comparison testing easier. Think of this a canonical
normalization (however LDAP does not impose any ordering on multiple AVA's
within an RDN). Single valued RDN's are the norm and thus the RDN
constructor has simple syntax for them.
The RDN constructor is passed a sequence of args and a set of
keyword parameters used for configuration.
The constructor iterates though the sequence and adds AVA's to the RDN.
The arg sequence may be:
* A 2-valued tuple or list denotes the <attr,value> pair of an AVA. The
first member is the attr and the second member is the value, both members
must be strings (or unicode). The tuple or list is passed to the AVA
constructor and the resulting AVA is added to the RDN. Multiple tuples or
lists may appear in the argument list, each adds one additional AVA to the
RDN.
* A single string (or unicode) argument, in this case the string will
be interpretted using the DN syntax described in RFC 4514 to yield one or
more AVA <attr,value> pairs. The parsing recognizes the DN syntax escaping
rules.
* A AVA object, the AVA will be copied into the new RDN respecting
the constructors keyword configuration parameters.
* A RDN object, the AVA's in the RDN are copied into the new RDN
respecting the constructors keyword configuration parameters.
Single AVA Examples:
RDN(('cn', 'Bob')) # tuple yields 1 AVA
RDN('cn=Bob') # DN syntax with 1 AVA
RDN(AVA('cn', 'Bob')) # AVA object adds 1 AVA
Multiple AVA Examples:
RDN(('cn', 'Bob'),('ou', 'people')) # 2 tuples yields 2 AVA's
RDN('cn=Bob+ou=people') # DN syntax with 2 AVA's
RDN(AVA('cn', 'Bob'),AVA('ou', 'people')) # 2 AVA objects adds 2 AVA's
RDN(('cn', 'Bob'), 'ou=people') # 2 args, 1st tuple forms 1 AVA,
# 2nd DN syntax string adds 1 AVA,
# 2 AVA's in total
Note: The RHS of a slice assignment is interpreted exactly in the
same manner as the constructor argument list (see above examples).
RDN objects support iteration over their AVA members. You can iterate all
AVA members via any Python iteration syntax. RDN objects support full Python
indexing using bracket [] notation. Examples:
len(rdn) # return the number of AVA's
rdn[0] # indexing the first AVA
rdn['cn'] # index by AVA attr, returns AVA value
for ava in rdn: # iterate over each AVA
rdn[:] # a slice, in this case a copy of each AVA
WARNING: When indexing by attr (e.g. rdn['cn']) there is a possibility more
than one AVA has the same attr name as the index key. The default behavior
is to return the value of the first AVA whose attr matches the index
key.
RDN objects support the AVA attr and value properties as another programmer
convenience because the vast majority of RDN's are single valued. The attr
and value properties return the attr and value properties of the first AVA
in the RDN, for example:
rdn = RDN(('cn', 'Bob')) # rdn has 1 AVA whose attr == 'cn' and value == 'Bob'
len(rdn) -> 1
rdn.attr -> u'cn' # exactly equivalent to rdn[0].attr
rdn.value -> u'Bob' # exactly equivalent to rdn[0].value
When attr and value are returned they will always be unicode. When
attr or value are set they will be promoted to unicode.
If an RDN is multi-valued the attr and value properties still return only
the first AVA's properties, programmer beware! Recall the AVA's in the RDN
are sorted according the to AVA collating semantics.
RDN objects support equality testing and comparison. See AVA for the
definition of the comparison method.
RDN objects support concatenation and addition with other RDN's or AVA's
rdn1 + rdn2 # yields a new RDN object with the contents of each RDN.
rdn1 + ava1 # yields a new RDN object with the contents of rdn1 and ava1
RDN objects can add AVA's objects via in-place addition.
rdn1 += rdn2 # rdn1 now contains the sum of rdn1 and rdn2
rdn1 += ava1 # rdn1 has ava1 added to it.
The str method of an RDN returns the string representation in RFC 4514 DN
syntax with proper escaping.
'''
AVA_type = AVA
def __init__(self, *args, **kwds):
self._avas = self._avas_from_sequence(args, kwds.get('raw', False))
def _avas_from_sequence(self, args, raw=False):
avas = []
sort = 0
ava_count = len(args)
if raw: # fast raw mode
avas = args
elif ava_count == 1 and isinstance(args[0], str):
avas = str2rdn(args[0])
sort = 1
elif ava_count == 1 and isinstance(args[0], RDN):
avas = args[0].to_openldap()
elif ava_count > 0:
sort = 1
for arg in args:
avas.append(get_ava(arg))
if sort:
sort_avas(avas)
return avas
def to_openldap(self):
return [list(a) for a in self._avas]
def __str__(self):
return dn2str([self.to_openldap()])
def __repr__(self):
return "%s.%s('%s')" % (self.__module__, self.__class__.__name__, self.__str__())
def _get_ava(self, ava):
return self.AVA_type(*ava)
def _next(self):
for ava in self._avas:
yield self._get_ava(ava)
def __iter__(self):
return self._next()
def __len__(self):
return len(self._avas)
def __getitem__(self, key):
if isinstance(key, int):
return self._get_ava(self._avas[key])
if isinstance(key, slice):
return [self._get_ava(ava) for ava in self._avas[key]]
elif isinstance(key, str):
for ava in self._avas:
if key == val_decode(ava[0]):
return val_decode(ava[1])
raise KeyError("\"%s\" not found in %s" % (key, self.__str__()))
else:
raise TypeError("unsupported type for RDN indexing, must be int, basestring or slice; not %s" % \
(key.__class__.__name__))
def _get_attr(self):
if len(self._avas) == 0:
raise IndexError("No AVA's in this RDN")
return val_decode(self._avas[0][0])
def _set_attr(self, new_attr):
if len(self._avas) == 0:
raise IndexError("No AVA's in this RDN")
self._avas[0][0] = val_encode(str(new_attr))
attr = property(_get_attr)
def _get_value(self):
if len(self._avas) == 0:
raise IndexError("No AVA's in this RDN")
return val_decode(self._avas[0][1])
def _set_value(self, new_value):
if len(self._avas) == 0:
raise IndexError("No AVA's in this RDN")
self._avas[0][1] = val_encode(str(new_value))
value = property(_get_value)
def __hash__(self):
# Hash is computed from RDN's string representation.
#
# Because attrs & values are comparison case-insensitive the
# hash value between two objects which compare as equal but
# differ in case must yield the same hash value.
return hash(str(self).lower())
def __eq__(self, other):
# Try coercing string to RDN, if successful compare to coerced object
if isinstance(other, str):
try:
other_rdn = RDN(other)
return self.__eq__(other_rdn)
except Exception:
return False
# If it's not an RDN it can't be equal
if not isinstance(other, RDN):
return False
# Perform comparison between objects of same type
return rdn_key(self._avas) == rdn_key(other._avas)
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
if not isinstance(other, RDN):
raise TypeError("expected RDN but got %s" % (other.__class__.__name__))
return rdn_key(self._avas) < rdn_key(other._avas)
def __add__(self, other):
result = self.__class__(self)
if isinstance(other, RDN):
for ava in other._avas:
result._avas.append((ava[0], ava[1], ava[2]))
elif isinstance(other, AVA):
result._avas.append(other.to_openldap())
elif isinstance(other, str):
rdn = self.__class__(other)
for ava in rdn._avas:
result._avas.append((ava[0], ava[1], ava[2]))
else:
raise TypeError("expected RDN, AVA or basestring but got %s" % (other.__class__.__name__))
sort_avas(result._avas)
return result
@functools.total_ordering
class DN:
'''
DN(arg0, ...)
A DN is a LDAP Distinguished Name. A DN is an ordered sequence of RDN's.
The DN constructor is passed a sequence of args and a set of
keyword parameters used for configuration. normalize means the
attr and value will be converted to lower case.
The constructor iterates through the sequence and adds the RDN's
it finds in order to the DN object. Each item in the sequence may
be:
* A 2-valued tuple or list. The first member is the attr and the
second member is the value of an RDN, both members must be
strings (or unicode). The tuple or list is passed to the RDN
constructor and the resulting RDN is appended to the
DN. Multiple tuples or lists may appear in the argument list,
each adds one additional RDN to the DN.
* A single string (or unicode) argument, in this case the string
will be interpretted using the DN syntax described in RFC 4514
to yield one or more RDN's which will be appended in order to
the DN. The parsing recognizes the DN syntax escaping rules.
* A single ``cryptography.x509.name.Name`` object.
* A RDN object, the RDN will copied respecting the constructors
keyword configuration parameters and appended in order.
* A DN object, the RDN's in the DN are copied respecting the
constructors keyword configuration parameters and appended in
order.
Single DN Examples:
DN(('cn', 'Bob')) # tuple yields 1 RDN
DN(['cn', 'Bob']) # list yields 1 RDN
DN('cn=Bob') # DN syntax with 1 RDN
DN(RDN('cn', 'Bob')) # RDN object adds 1 RDN
Multiple RDN Examples:
DN(('cn', 'Bob'),('ou', 'people')) # 2 tuples yields 2 RDN's
# 2 RDN's total
DN('cn=Bob,ou=people') # DN syntax with 2 RDN's
# 2 RDN's total
DN(RDN('cn', 'Bob'),RDN('ou', 'people')) # 2 RDN objects
# 2 RDN's total
DN(('cn', 'Bob'), "ou=people') # 1st tuple adds 1 RDN
# 2nd DN syntax string adds 1 RDN
# 2 RDN's total
base_dn = DN('dc=redhat,dc=com')
container_dn = DN('cn=sudorules,cn=sudo')
DN(('cn', 'Bob'), container_dn, base_dn)
# 1st arg adds 1 RDN, cn=Bob
# 2nd arg adds 2 RDN's, cn=sudorules,cn=sudo
# 3rd arg adds 2 RDN's, dc=redhat,dc=com
# 5 RDN's total
Note: The RHS of a slice assignment is interpreted exactly in the
same manner as the constructor argument list (see above examples).
DN objects support iteration over their RDN members. You can iterate all
RDN members via any Python iteration syntax. DN objects support full Python
indexing using bracket [] notation. Examples:
len(rdn) # return the number of RDN's
rdn[0] # indexing the first RDN
rdn['cn'] # index by RDN attr, returns RDN value
for ava in rdn: # iterate over each RDN
rdn[:] # a slice, in this case a copy of each RDN
WARNING: When indexing by attr (e.g. dn['cn']) there is a
possibility more than one RDN has the same attr name as the index
key. The default behavior is to return the value of the first RDN
whose attr matches the index key. If it's important the attr
belong to a specific RDN (e.g. the first) then this is the
suggested construct:
try:
cn = dn[0]['cn']
except (IndexError, KeyError):
raise ValueError("dn '%s' missing expected cn as first attribute" % dn)
The IndexError catches a DN which does not have the expected
number of RDN's and the KeyError catches the case where the
indexed RDN does not have the expected attr.
DN object support slices.
# Get the first two RDN's using slices
dn[0:2]
# Get the last two RDN's using slices
dn[-2:]
# Get a list of all RDN's using slices
dn[:]
# Set the 2nd and 3rd RDN using slices (all are equivalent)
dn[1:3] = ('cn', 'Bob'), ('dc', 'redhat.com')
dn[1:3] = [['cn', 'Bob'], ['dc', 'redhat.com']]
dn[1:3] = RDN('cn', 'Bob'), RDN('dc', 'redhat.com')
DN objects support the insert operation.
dn.insert(i,x) is exactly equivalent to dn[i:i] = [x], thus the following
are all equivalent:
dn.insert(i, ('cn','Bob'))
dn.insert(i, ['cn','Bob'])
dn.insert(i, RDN(('cn','Bob')))
dn[i:i] = [('cn','Bob')]
DN objects support equality testing and comparison. See RDN for the
definition of the comparison method.
DN objects implement startswith(), endswith() and the "in" membership
operator. You may pass a DN or RDN object to these. Examples:
# Test if dn ends with the contents of base_dn
if dn.endswith(base_dn):
# Test if dn starts with a rdn
if dn.startswith(rdn1):
# Test if a container is present in a dn
if container_dn in dn:
DN objects support concatenation and addition with other DN's or RDN's
or strings (interpreted as RFC 4514 DN syntax).
# yields a new DN object with the RDN's of dn2 appended to the RDN's of dn1
dn1 + dn2
# yields a new DN object with the rdn1 appended to the RDN's of dn1
dn1 + rdn1
DN objects can add RDN's objects via in-place addition.
dn1 += dn2 # dn2 RDN's are appended to the dn1's RDN's
dn1 += rdn1 # dn1 has rdn appended to its RDN's
dn1 += "dc=redhat.com" # string is converted to DN, then appended
The str method of an DN returns the string representation in RFC 4514 DN
syntax with proper escaping.
'''
AVA_type = AVA
RDN_type = RDN
def __init__(self, *args, **kwds):
self.rdns = self._rdns_from_sequence(args)
def _copy_rdns(self, rdns=None):
if not rdns:
rdns = self.rdns
return [[list(a) for a in rdn] for rdn in rdns]
def _rdns_from_value(self, value):
if isinstance(value, str):
try:
if isinstance(value, str):
value = val_encode(value)
rdns = str2dn(value)
except DECODING_ERROR:
raise ValueError("malformed RDN string = \"%s\"" % value)
for rdn in rdns:
sort_avas(rdn)
elif isinstance(value, DN):
rdns = value._copy_rdns()
elif isinstance(value, (tuple, list, AVA)):
ava = get_ava(value)
rdns = [[ava]]
elif isinstance(value, RDN):
rdns = [value.to_openldap()]
elif isinstance(value, cryptography.x509.name.Name):
rdns = list(reversed([
[get_ava(
ATTR_NAME_BY_OID.get(ava.oid, ava.oid.dotted_string),
ava.value) for ava in rdn]
for rdn in value.rdns
]))
for rdn in rdns:
sort_avas(rdn)
else:
raise TypeError(
"must be str, unicode, tuple, Name, RDN or DN, got %s instead"
% type(value))
return rdns
def _rdns_from_sequence(self, seq):
rdns = []
for item in seq:
rdn = self._rdns_from_value(item)
rdns.extend(rdn)
return rdns
def __deepcopy__(self, memo):
return self
def _get_rdn(self, rdn):
return self.RDN_type(*rdn, **{'raw': True})
def ldap_text(self):
return dn2str(self.rdns)
def x500_text(self):
return dn2str(reversed(self.rdns))
def __str__(self):
return self.ldap_text()
def __repr__(self):
return "%s.%s('%s')" % (self.__module__, self.__class__.__name__, self.__str__())
def _next(self):
for rdn in self.rdns:
yield self._get_rdn(rdn)
def __iter__(self):
return self._next()
def __len__(self):
return len(self.rdns)
def __getitem__(self, key):
if isinstance(key, int):
return self._get_rdn(self.rdns[key])
if isinstance(key, slice):
cls = self.__class__
new_dn = cls.__new__(cls)
new_dn.rdns = self.rdns[key]
return new_dn
elif isinstance(key, str):
for rdn in self.rdns:
for ava in rdn:
if key == val_decode(ava[0]):
return val_decode(ava[1])
raise KeyError("\"%s\" not found in %s" % (key, self.__str__()))
else:
raise TypeError("unsupported type for DN indexing, must be int, basestring or slice; not %s" % \
(key.__class__.__name__))
def __hash__(self):
# Hash is computed from DN's string representation.
#
# Because attrs & values are comparison case-insensitive the
# hash value between two objects which compare as equal but
# differ in case must yield the same hash value.
str_dn = ';,'.join([
'++'.join([
'=='.join((atype, avalue or ''))
for atype, avalue, _dummy in rdn
]) for rdn in self.rdns
])
return hash(str_dn.lower())
def __eq__(self, other):
# Try coercing to DN, if successful compare to coerced object
if isinstance(other, (str, RDN, AVA)):
try:
other_dn = DN(other)
return self.__eq__(other_dn)
except Exception:
return False
# If it's not an DN it can't be equal
if not isinstance(other, DN):
return False
if len(self) != len(other):
return False
# Perform comparison between objects of same type
return self._cmp_sequence(other, 0, len(self)) == 0
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
if not isinstance(other, DN):
raise TypeError("expected DN but got %s" % (other.__class__.__name__))
if len(self) != len(other):
return len(self) < len(other)
return self._cmp_sequence(other, 0, len(self)) < 0
def _cmp_sequence(self, pattern, self_start, pat_len):
self_idx = self_start
pat_idx = 0
while pat_idx < pat_len:
r = cmp_rdns(self.rdns[self_idx], pattern.rdns[pat_idx])
if r != 0:
return r
self_idx += 1
pat_idx += 1
return 0
def __add__(self, other):
return self.__class__(self, other)
# The implementation of startswith, endswith, tailmatch, adjust_indices
# was based on the Python's stringobject.c implementation
def startswith(self, prefix, start=0, end=sys.maxsize):
'''
Return True if the dn starts with the specified prefix (either a DN or
RDN object), False otherwise. With optional start, test dn beginning at
that position. With optional end, stop comparing dn at that position.
prefix can also be a tuple of dn's or rdn's to try.
'''
if isinstance(prefix, tuple):
for pat in prefix:
if self._tailmatch(pat, start, end, -1):
return True
return False
return self._tailmatch(prefix, start, end, -1)
def endswith(self, suffix, start=0, end=sys.maxsize):
'''
Return True if dn ends with the specified suffix (either a DN or RDN
object), False otherwise. With optional start, test dn beginning at
that position. With optional end, stop comparing dn at that position.
suffix can also be a tuple of dn's or rdn's to try.
'''
if isinstance(suffix, tuple):
for pat in suffix:
if self._tailmatch(pat, start, end, +1):
return True
return False
return self._tailmatch(suffix, start, end, +1)
def _tailmatch(self, pattern, start, end, direction):
'''
Matches the end (direction >= 0) or start (direction < 0) of self
against pattern (either a DN or RDN), using the start and end
arguments. Returns 0 if not found and 1 if found.
'''
if isinstance(pattern, RDN):
pattern = DN(pattern)
if isinstance(pattern, DN):
pat_len = len(pattern)
else:
raise TypeError("expected DN or RDN but got %s" % (pattern.__class__.__name__))
self_len = len(self)
start, end = _adjust_indices(start, end, self_len)
if direction < 0: # starswith
if start+pat_len > self_len:
return 0
else: # endswith
if end-start < pat_len or start > self_len:
return 0
if end-pat_len >= start:
start = end - pat_len
if end-start >= pat_len:
return not self._cmp_sequence(pattern, start, pat_len)
return 0
def __contains__(self, other):
"""Return the outcome of the test other in self.
Note the reversed operands.
"""
if isinstance(other, RDN):
other = DN(other)
if isinstance(other, DN):
other_len = len(other)
end = len(self) - other_len
i = 0
while i <= end:
result = self._cmp_sequence(other, i, other_len)
if result == 0:
return True
i += 1
return False
raise TypeError(
"expected DN or RDN but got %s" % other.__class__.__name__
)
def find(self, pattern, start=None, end=None):
'''
Return the lowest index in the DN where pattern DN is found,
such that pattern is contained in the range [start, end]. Optional
arguments start and end are interpreted as in slice notation. Return
-1 if pattern is not found.
'''
if isinstance(pattern, DN):
pat_len = len(pattern)
else:
raise TypeError("expected DN but got %s" % (pattern.__class__.__name__))
self_len = len(self)
if start is None:
start = 0
if end is None:
end = self_len
start, end = _adjust_indices(start, end, self_len)
i = start
stop = max(start, end - pat_len)
while i <= stop:
result = self._cmp_sequence(pattern, i, pat_len)
if result == 0:
return i
i += 1
return -1
def index(self, pattern, start=None, end=None):
'''
Like find() but raise ValueError when the pattern is not found.
'''
i = self.find(pattern, start, end)
if i == -1:
raise ValueError("pattern not found")
return i
def rfind(self, pattern, start=None, end=None):
'''
Return the highest index in the DN where pattern DN is found,
such that pattern is contained in the range [start, end]. Optional
arguments start and end are interpreted as in slice notation. Return
-1 if pattern is not found.
'''
if isinstance(pattern, DN):
pat_len = len(pattern)
else:
raise TypeError("expected DN but got %s" % (pattern.__class__.__name__))
self_len = len(self)
if start is None:
start = 0
if end is None:
end = self_len
start, end = _adjust_indices(start, end, self_len)
i = max(start, min(end, self_len - pat_len))
stop = start
while i >= stop:
result = self._cmp_sequence(pattern, i, pat_len)
if result == 0:
return i
i -= 1
return -1
def rindex(self, pattern, start=None, end=None):
'''
Like rfind() but raise ValueError when the pattern is not found.
'''
i = self.rfind(pattern, start, end)
if i == -1:
raise ValueError("pattern not found")
return i
ATTR_NAME_BY_OID = {
cryptography.x509.oid.NameOID.COMMON_NAME: 'CN',
cryptography.x509.oid.NameOID.COUNTRY_NAME: 'C',
cryptography.x509.oid.NameOID.LOCALITY_NAME: 'L',
cryptography.x509.oid.NameOID.STATE_OR_PROVINCE_NAME: 'ST',
cryptography.x509.oid.NameOID.ORGANIZATION_NAME: 'O',
cryptography.x509.oid.NameOID.ORGANIZATIONAL_UNIT_NAME: 'OU',
cryptography.x509.oid.NameOID.SERIAL_NUMBER: 'serialNumber',
cryptography.x509.oid.NameOID.SURNAME: 'SN',
cryptography.x509.oid.NameOID.GIVEN_NAME: 'givenName',
cryptography.x509.oid.NameOID.TITLE: 'title',
cryptography.x509.oid.NameOID.GENERATION_QUALIFIER: 'generationQualifier',
cryptography.x509.oid.NameOID.DN_QUALIFIER: 'dnQualifier',
cryptography.x509.oid.NameOID.PSEUDONYM: 'pseudonym',
cryptography.x509.oid.NameOID.DOMAIN_COMPONENT: 'DC',
cryptography.x509.oid.NameOID.EMAIL_ADDRESS: 'E',
cryptography.x509.oid.NameOID.JURISDICTION_COUNTRY_NAME:
'incorporationCountry',
cryptography.x509.oid.NameOID.JURISDICTION_LOCALITY_NAME:
'incorporationLocality',
cryptography.x509.oid.NameOID.JURISDICTION_STATE_OR_PROVINCE_NAME:
'incorporationState',
cryptography.x509.oid.NameOID.BUSINESS_CATEGORY: 'businessCategory',
cryptography.x509.ObjectIdentifier('2.5.4.9'): 'STREET',
cryptography.x509.ObjectIdentifier('2.5.4.17'): 'postalCode',
cryptography.x509.ObjectIdentifier('0.9.2342.19200300.100.1.1'): 'UID',
}
| 49,450
|
Python
|
.py
| 1,122
| 37.072193
| 109
| 0.640929
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,615
|
setup.py
|
freeipa_freeipa/ipapython/setup.py
|
# Copyright (C) 2007 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""FreeIPA python support library
FreeIPA is a server for identity, policy, and audit.
"""
from os.path import abspath, dirname
import sys
if __name__ == '__main__':
# include ../ for ipasetup.py
sys.path.append(dirname(dirname(abspath(__file__))))
from ipasetup import ipasetup # noqa: E402
ipasetup(
name="ipapython",
doc=__doc__,
package_dir={'ipapython': ''},
packages=[
"ipapython",
"ipapython.install"
],
install_requires=[
"cffi",
"cryptography",
"dnspython",
"gssapi",
# "ipalib", # circular dependency
"ipaplatform",
"netaddr",
"six",
],
extras_require={
"ldap": ["python-ldap"], # ipapython.ipaldap
# CheckedIPAddress.get_matching_interface
"ifaddr": ["ifaddr"],
},
)
| 1,655
|
Python
|
.py
| 49
| 27.632653
| 71
| 0.635456
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,616
|
kerberos.py
|
freeipa_freeipa/ipapython/kerberos.py
|
#
# Copyright (C) 2016 FreeIPA Contributors see COPYING for license
#
"""
classes/utils for Kerberos principal name validation/manipulation
"""
import re
import six
from ipapython.ipautil import escape_seq, unescape_seq
if six.PY3:
unicode = str
REALM_SPLIT_RE = re.compile(r'(?<!\\)@')
COMPONENT_SPLIT_RE = re.compile(r'(?<!\\)/')
def parse_princ_name_and_realm(principal, realm=None):
"""
split principal to the <principal_name>, <realm> components
:param principal: unicode representation of principal
:param realm: if not None, replace the parsed realm with the specified one
:returns: tuple containing the principal name and realm
realm will be `None` if no realm was found in the input string
"""
realm_and_name = REALM_SPLIT_RE.split(principal)
if len(realm_and_name) > 2:
raise ValueError(
"Principal is not in <name>@<realm> format")
principal_name = realm_and_name[0]
try:
parsed_realm = realm_and_name[1]
except IndexError:
parsed_realm = None if realm is None else realm
return principal_name, parsed_realm
def split_principal_name(principal_name):
"""
Split principal name (without realm) into the components
NOTE: operates on the following RFC 1510 types:
* NT-PRINCIPAL
* NT-SRV-INST
* NT-SRV-HST
Enterprise principals (NT-ENTERPRISE, see RFC 6806) are also handled
:param principal_name: unicode representation of principal name
:returns: tuple of individual components (i.e. primary name for
NT-PRINCIPAL and NT-ENTERPRISE, primary name and instance for others)
"""
return tuple(COMPONENT_SPLIT_RE.split(principal_name))
@six.python_2_unicode_compatible
class Principal:
"""
Container for the principal name and realm according to RFC 1510
"""
def __init__(self, components, realm=None):
if isinstance(components, bytes):
raise TypeError(
"Cannot create a principal object from bytes: {!r}".format(
components)
)
elif isinstance(components, str):
# parse principal components from realm
self.components, self.realm = self._parse_from_text(
components, realm)
elif isinstance(components, Principal):
self.components = components.components
self.realm = components.realm if realm is None else realm
else:
self.components = tuple(components)
self.realm = realm
def __eq__(self, other):
if not isinstance(other, Principal):
return False
return (self.components == other.components and
self.realm == other.realm)
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
return unicode(self) < unicode(other)
def __le__(self, other):
return self.__lt__(other) or self.__eq__(other)
def __gt__(self, other):
return not self.__le__(other)
def __ge__(self, other):
return self.__gt__(other) or self.__eq__(other)
def __hash__(self):
return hash(self.components + (self.realm,))
def _parse_from_text(self, principal, realm=None):
r"""
parse individual principal name components from the string
representation of the principal. This is done in three steps:
1.) split the string at the unescaped '@'
2.) unescape any leftover '\@' sequences
3.) split the primary at the unescaped '/'
4.) unescape leftover '\/'
:param principal: unicode representation of the principal name
:param realm: if not None, this realm name will be used instead of the
one parsed from `principal`
:returns: tuple containing the principal name components and realm
"""
principal_name, parsed_realm = parse_princ_name_and_realm(
principal, realm=realm)
(principal_name,) = unescape_seq(u'@', principal_name)
if parsed_realm is not None:
(parsed_realm,) = unescape_seq(u'@', parsed_realm)
name_components = split_principal_name(principal_name)
name_components = unescape_seq(u'/', *name_components)
return name_components, parsed_realm
@property
def is_user(self):
return len(self.components) == 1
@property
def is_enterprise(self):
return self.is_user and u'@' in self.components[0]
@property
def is_service(self):
return len(self.components) > 1
@property
def is_host(self):
return (self.is_service and len(self.components) == 2 and
self.components[0] == u'host')
@property
def username(self):
if self.is_user:
return self.components[0]
else:
raise ValueError(
"User name is defined only for user and enterprise principals")
@property
def upn_suffix(self):
if not self.is_enterprise:
raise ValueError("Only enterprise principals have UPN suffix")
return self.components[0].split(u'@')[1]
@property
def hostname(self):
if not (self.is_host or self.is_service):
raise ValueError(
"hostname is defined for host and service principals")
return self.components[-1]
@property
def service_name(self):
if not self.is_service:
raise ValueError(
"Only service principals have meaningful service name")
return u'/'.join(c for c in escape_seq('/', *self.components[:-1]))
def __str__(self):
"""
return the unicode representation of principal
works in reverse of the `from_text` class method
"""
name_components = escape_seq(u'/', *self.components)
name_components = escape_seq(u'@', *name_components)
principal_string = u'/'.join(name_components)
if self.realm is not None:
(realm,) = escape_seq(u'@', self.realm)
principal_string = u'@'.join([principal_string, realm])
return principal_string
def __repr__(self):
return "{0.__module__}.{0.__name__}('{1}')".format(
self.__class__, self)
| 6,300
|
Python
|
.py
| 155
| 32.412903
| 79
| 0.632382
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,617
|
config.py
|
freeipa_freeipa/ipapython/config.py
|
# Authors: Karl MacMillan <kmacmill@redhat.com>
#
# Copyright (C) 2007 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import
# pylint: disable=deprecated-module
from optparse import (
Option, Values, OptionParser, IndentedHelpFormatter, OptionValueError)
# pylint: enable=deprecated-module
from copy import copy
from configparser import ConfigParser as SafeConfigParser
from urllib.parse import urlsplit
import functools
from dns.exception import DNSException
import dns.name
from ipaplatform.paths import paths
from ipapython.dn import DN
from ipapython.dnsutil import query_srv
from ipapython.ipautil import CheckedIPAddress, CheckedIPAddressLoopback
class IPAConfigError(Exception):
def __init__(self, msg=''):
self.msg = msg
Exception.__init__(self, msg)
def __repr__(self):
return self.msg
__str__ = __repr__
class IPAFormatter(IndentedHelpFormatter):
"""Our own optparse formatter that indents multiple lined usage string."""
def format_usage(self, usage):
usage_string = "Usage:"
spacing = " " * len(usage_string)
lines = usage.split("\n")
ret = "%s %s\n" % (usage_string, lines[0])
for line in lines[1:]:
ret += "%s %s\n" % (spacing, line)
return ret
def check_ip_option(option, opt, value, allow_loopback=False):
try:
if allow_loopback:
return CheckedIPAddressLoopback(value)
else:
return CheckedIPAddress(value)
except Exception as e:
raise OptionValueError("option {}: invalid IP address {}: {}"
.format(opt, value, e))
def check_dn_option(option, opt, value):
try:
return DN(value)
except Exception as e:
raise OptionValueError("option %s: invalid DN: %s" % (opt, e))
def check_constructor(option, opt, value):
con = option.constructor
assert con is not None, "Oops! Developer forgot to set 'constructor' kwarg"
try:
return con(value)
except Exception as e:
raise OptionValueError("option {} invalid: {}".format(opt, e))
class IPAOption(Option):
"""
optparse.Option subclass with support of options labeled as
security-sensitive such as passwords.
"""
ATTRS = Option.ATTRS + ["sensitive", "constructor"]
TYPES = Option.TYPES + ("ip", "dn", "constructor", "ip_with_loopback")
TYPE_CHECKER = copy(Option.TYPE_CHECKER)
TYPE_CHECKER["ip"] = check_ip_option
TYPE_CHECKER["ip_with_loopback"] = functools.partial(check_ip_option,
allow_loopback=True)
TYPE_CHECKER["dn"] = check_dn_option
TYPE_CHECKER["constructor"] = check_constructor
class IPAOptionParser(OptionParser):
"""
optparse.OptionParser subclass that uses IPAOption by default
for storing options.
"""
def __init__(self,
usage=None,
option_list=None,
option_class=IPAOption,
version=None,
conflict_handler="error",
description=None,
formatter=None,
add_help_option=True,
prog=None):
OptionParser.__init__(self, usage, option_list, option_class,
version, conflict_handler, description,
formatter, add_help_option, prog)
def get_safe_opts(self, opts):
"""
Returns all options except those with sensitive=True in the same
fashion as parse_args would
"""
all_opts_dict = {
o.dest: o for o in self._get_all_options()
if hasattr(o, 'sensitive')
}
safe_opts_dict = {}
for option, value in opts.__dict__.items():
if not all_opts_dict[option].sensitive:
safe_opts_dict[option] = value
return Values(safe_opts_dict)
def verify_args(parser, args, needed_args = None):
"""Verify that we have all positional arguments we need, if not, exit."""
if needed_args:
needed_list = needed_args.split(" ")
else:
needed_list = []
len_need = len(needed_list)
len_have = len(args)
if len_have > len_need:
parser.error("too many arguments")
elif len_have < len_need:
parser.error("no %s specified" % needed_list[len_have])
class IPAConfig:
def __init__(self):
self.default_realm = None
self.default_server = []
self.default_domain = None
def get_realm(self):
if self.default_realm:
return self.default_realm
else:
raise IPAConfigError("no default realm")
def get_server(self):
if len(self.default_server):
return self.default_server
else:
raise IPAConfigError("no default server")
def get_domain(self):
if self.default_domain:
return self.default_domain
else:
raise IPAConfigError("no default domain")
# Global library config
config = IPAConfig()
def __parse_config(discover_server = True):
p = SafeConfigParser()
p.read(paths.IPA_DEFAULT_CONF)
try:
if not config.default_realm:
config.default_realm = p.get("global", "realm")
except Exception:
pass
if discover_server:
try:
s = p.get("global", "xmlrpc_uri")
server = urlsplit(s)
config.default_server.append(server.netloc)
except Exception:
pass
try:
if not config.default_domain:
config.default_domain = p.get("global", "domain")
except Exception:
pass
def __discover_config(discover_server = True):
servers = []
try:
if not config.default_domain:
# try once with REALM -> domain
domain = str(config.default_realm).lower()
name = "_ldap._tcp." + domain
try:
servers = query_srv(name)
except DNSException:
# try cycling on domain components of FQDN
# pylint: disable=ipa-forbidden-import
from ipalib.constants import FQDN
# pylint: enable=ipa-forbidden-import
try:
domain = dns.name.from_text(FQDN)
except DNSException:
return False
while True:
domain = domain.parent()
if str(domain) == '.':
return False
name = "_ldap._tcp.%s" % domain
try:
servers = query_srv(name)
break
except DNSException:
pass
config.default_domain = str(domain).rstrip(".")
if discover_server:
if not servers:
name = "_ldap._tcp.%s." % config.default_domain
try:
servers = query_srv(name)
except DNSException:
pass
for server in servers:
hostname = str(server.target).rstrip(".")
config.default_server.append(hostname)
except Exception:
pass
return None
def add_standard_options(parser):
parser.add_option("--realm", dest="realm", help="Override default IPA realm")
parser.add_option("--server", dest="server",
help="Override default FQDN of IPA server")
parser.add_option("--domain", dest="domain", help="Override default IPA DNS domain")
def init_config(options=None):
if options:
config.default_realm = options.realm
config.default_domain = options.domain
if options.server:
config.default_server.extend(options.server.split(","))
if len(config.default_server):
discover_server = False
else:
discover_server = True
__parse_config(discover_server)
__discover_config(discover_server)
# make sure the server list only contains unique items
new_server = []
for server in config.default_server:
if server not in new_server:
new_server.append(server)
config.default_server = new_server
if not config.default_realm:
raise IPAConfigError("IPA realm not found in DNS, in the config file (/etc/ipa/default.conf) or on the command line.")
if not config.default_server:
raise IPAConfigError("IPA server not found in DNS, in the config file (/etc/ipa/default.conf) or on the command line.")
if not config.default_domain:
raise IPAConfigError("IPA domain not found in the config file (/etc/ipa/default.conf) or on the command line.")
| 9,406
|
Python
|
.py
| 241
| 30.020747
| 127
| 0.61686
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,618
|
ssh.py
|
freeipa_freeipa/ipapython/ssh.py
|
# Authors:
# Jan Cholasta <jcholast@redhat.com>
#
# Copyright (C) 2012 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
SSH utilities.
"""
import base64
import re
import struct
from hashlib import sha1
from hashlib import sha256
import six
if six.PY3:
unicode = str
__all__ = ['SSHPublicKey']
OPENSSH_BASE_REGEX = re.compile(r'^[\t ]*(?P<keytype>[^\x00\n\r]+?) [\t ]*(?P<key>[^\x00\n\r]+?)(?:[\t ]+(?P<comment>[^\x00\n\r]*?)[\t ]*)?$')
OPENSSH_OPTIONS_REGEX = re.compile(r'(?P<name>[-0-9A-Za-z]+)(?:="(?P<value>(?:\\"|[^\x00\n\r"])*)")?')
class SSHPublicKey:
"""
SSH public key object.
"""
__slots__ = ('_key', '_keytype', '_comment', '_options')
def __init__(self, key, comment=None, options=None, encoding='utf-8'):
if isinstance(key, SSHPublicKey):
self._key = key._key
self._keytype = key._keytype
self._comment = key._comment
self._options = key._options
return
if not isinstance(key, (bytes, unicode)):
raise TypeError("argument must be bytes or unicode, got %s" % type(key).__name__)
# All valid public key blobs start with 3 null bytes (see RFC 4253
# section 6.6, RFC 4251 section 5 and RFC 4250 section 4.6)
if isinstance(key, bytes) and key[:3] != b'\0\0\0':
key = key.decode(encoding)
valid = self._parse_raw(key) or self._parse_base64(key) or self._parse_openssh(key)
if not valid:
raise ValueError("not a valid SSH public key")
if comment is not None:
self._comment = comment
if options is not None:
self._options = options
def _parse_raw(self, key):
if not isinstance(key, bytes):
return False
try:
(ktlen,) = struct.unpack('>I', key[:4])
except struct.error:
return False
if ktlen < 1 or ktlen > len(key) - 4:
return False
try:
keytype = key[4:ktlen+4].decode('ascii')
except UnicodeDecodeError:
return False
self._key = key
self._keytype = keytype
self._options = {}
self._comment = None
return True
def _parse_base64(self, key):
if not isinstance(key, unicode):
return False
try:
key = base64.b64decode(key)
except (TypeError, ValueError):
return False
return self._parse_raw(key)
def _parse_openssh_without_options(self, key):
match = OPENSSH_BASE_REGEX.match(key)
if not match:
return False
if not self._parse_base64(match.group('key')):
return False
if self._keytype != match.group('keytype'):
return False
self._comment = match.group('comment')
return True
def _parse_openssh_with_options(self, key):
key = key.lstrip('\t ')
# Options that allow multiple entries
multiple_allowed = ('permitopen', 'permitlisten')
options = {}
while True:
match = OPENSSH_OPTIONS_REGEX.match(key)
if not match:
return False
name = match.group('name').lower()
value = match.group('value')
if value:
value = value.replace('\\"', '"')
if name in multiple_allowed:
if name in options:
options[name].append(value)
else:
options[name] = [value]
else:
options[name] = value
key = key[len(match.group(0)):]
key0, key = key[:1], key[1:]
if key0 != ',':
break
if not self._parse_openssh_without_options(key):
return False
self._options = options
return True
def _parse_openssh(self, key):
if not isinstance(key, unicode):
return False
if self._parse_openssh_without_options(key):
return True
else:
return self._parse_openssh_with_options(key)
def keytype(self):
return self._keytype
def comment(self):
return self._comment
def has_options(self):
return bool(self._options)
def openssh(self):
key = base64.b64encode(self._key).decode('ascii')
out = u'%s %s' % (self._keytype, key)
if self._options:
options = []
for name in sorted(self._options):
value = self._options[name]
if value is None:
options.append(name)
elif type(value) is list:
for v in value:
v = v.replace('"', '\\"')
options.append(u'%s="%s"' % (name, v))
else:
value = value.replace('"', '\\"')
options.append(u'%s="%s"' % (name, value))
options = u','.join(options)
out = u'%s %s' % (options, out)
if self._comment:
out = u'%s %s' % (out, self._comment)
return out
def fingerprint_hex_sha256(self):
# OpenSSH trims the trailing '=' of base64 sha256 FP representation
fp = base64.b64encode(sha256(self._key).digest()).rstrip(b'=')
return u'SHA256:{fp}'.format(fp=fp.decode('utf-8'))
def _fingerprint_dns(self, fpfunc, fptype):
if self._keytype == 'ssh-rsa':
keytype = 1
elif self._keytype == 'ssh-dss':
keytype = 2
elif self._keytype.startswith('ecdsa-sha2-') and '@' not in self._keytype:
keytype = 3
elif self._keytype == 'ssh-ed25519':
keytype = 4
else:
return None
fp = fpfunc(self._key).hexdigest().upper()
return u'%d %d %s' % (keytype, fptype, fp)
def fingerprint_dns_sha1(self):
return self._fingerprint_dns(sha1, 1)
def fingerprint_dns_sha256(self):
return self._fingerprint_dns(sha256, 2)
| 6,713
|
Python
|
.py
| 177
| 28.468927
| 142
| 0.564862
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,619
|
session_storage.py
|
freeipa_freeipa/ipapython/session_storage.py
|
#
# Copyright (C) 2017 FreeIPA Contributors see COPYING for license
#
import ctypes
import sys
KRB5_CC_NOSUPP = -1765328137
if sys.platform == 'darwin':
LIBKRB5_FILENAME = 'libkrb5.dylib'
else:
LIBKRB5_FILENAME = 'libkrb5.so.3'
try:
LIBKRB5 = ctypes.CDLL(LIBKRB5_FILENAME)
except OSError as e: # pragma: no cover
raise ImportError(str(e))
krb5_int32 = ctypes.c_int32
krb5_error_code = krb5_int32
krb5_magic = krb5_error_code
krb5_enctype = krb5_int32
krb5_octet = ctypes.c_uint8
krb5_timestamp = krb5_int32
class _krb5_context(ctypes.Structure): # noqa
"""krb5/krb5.h struct _krb5_context"""
_fields_ = []
class _krb5_ccache(ctypes.Structure): # noqa
"""krb5/krb5.h struct _krb5_ccache"""
_fields_ = []
class _krb5_data(ctypes.Structure): # noqa
"""krb5/krb5.h struct _krb5_data"""
_fields_ = [
("magic", krb5_magic),
("length", ctypes.c_uint),
("data", ctypes.c_char_p),
]
class krb5_principal_data(ctypes.Structure): # noqa
"""krb5/krb5.h struct krb5_principal_data"""
_fields_ = []
class _krb5_keyblock(ctypes.Structure): # noqa
"""krb5/krb5.h struct _krb5_keyblock"""
_fields_ = [
("magic", krb5_magic),
("enctype", krb5_enctype),
("length", ctypes.c_uint),
("contents", ctypes.POINTER(krb5_octet))
]
class _krb5_ticket_times(ctypes.Structure): # noqa
"""krb5/krb5.h struct _krb5_ticket_times"""
_fields_ = [
("authtime", krb5_timestamp),
("starttime", krb5_timestamp),
("endtime", krb5_timestamp),
("renew_till", krb5_timestamp),
]
class _krb5_address(ctypes.Structure): # noqa
"""krb5/krb5.h struct _krb5_address"""
_fields_ = []
class _krb5_authdata(ctypes.Structure): # noqa
"""krb5/krb5.h struct _krb5_authdata"""
_fields_ = []
krb5_principal = ctypes.POINTER(krb5_principal_data)
krb5_keyblock = _krb5_keyblock
krb5_ticket_times = _krb5_ticket_times
krb5_boolean = ctypes.c_uint
krb5_flags = krb5_int32
krb5_data = _krb5_data
krb5_address_p = ctypes.POINTER(_krb5_address)
krb5_authdata_p = ctypes.POINTER(_krb5_authdata)
class _krb5_creds(ctypes.Structure): # noqa
"""krb5/krb5.h struct _krb5_creds"""
_fields_ = [
("magic", krb5_magic),
("client", krb5_principal),
("server", krb5_principal),
("keyblock", krb5_keyblock),
("times", krb5_ticket_times),
("is_skey", krb5_boolean),
("ticket_flags", krb5_flags),
("addresses", ctypes.POINTER(krb5_address_p)),
("ticket", krb5_data),
("second_ticket", krb5_data),
("authdata", ctypes.POINTER(krb5_authdata_p))
]
class KRB5Error(Exception):
pass
def krb5_errcheck(result, func, arguments):
"""Error checker for krb5_error_code return value"""
if result != 0:
raise KRB5Error(result, func.__name__, arguments)
krb5_context = ctypes.POINTER(_krb5_context)
krb5_ccache = ctypes.POINTER(_krb5_ccache)
krb5_data_p = ctypes.POINTER(_krb5_data)
krb5_creds = _krb5_creds
krb5_pointer = ctypes.c_void_p
krb5_cc_cursor = krb5_pointer
krb5_init_context = LIBKRB5.krb5_init_context
krb5_init_context.argtypes = (ctypes.POINTER(krb5_context), )
krb5_init_context.restype = krb5_error_code
krb5_init_context.errcheck = krb5_errcheck
krb5_free_context = LIBKRB5.krb5_free_context
krb5_free_context.argtypes = (krb5_context, )
krb5_free_context.restype = None
krb5_free_principal = LIBKRB5.krb5_free_principal
krb5_free_principal.argtypes = (krb5_context, krb5_principal)
krb5_free_principal.restype = None
krb5_free_data_contents = LIBKRB5.krb5_free_data_contents
krb5_free_data_contents.argtypes = (krb5_context, krb5_data_p)
krb5_free_data_contents.restype = None
krb5_cc_default = LIBKRB5.krb5_cc_default
krb5_cc_default.argtypes = (krb5_context, ctypes.POINTER(krb5_ccache), )
krb5_cc_default.restype = krb5_error_code
krb5_cc_default.errcheck = krb5_errcheck
krb5_cc_close = LIBKRB5.krb5_cc_close
krb5_cc_close.argtypes = (krb5_context, krb5_ccache, )
krb5_cc_close.restype = krb5_error_code
krb5_cc_close.errcheck = krb5_errcheck
krb5_parse_name = LIBKRB5.krb5_parse_name
krb5_parse_name.argtypes = (krb5_context, ctypes.c_char_p,
ctypes.POINTER(krb5_principal), )
krb5_parse_name.restype = krb5_error_code
krb5_parse_name.errcheck = krb5_errcheck
krb5_cc_set_config = LIBKRB5.krb5_cc_set_config
krb5_cc_set_config.argtypes = (krb5_context, krb5_ccache, krb5_principal,
ctypes.c_char_p, krb5_data_p, )
krb5_cc_set_config.restype = krb5_error_code
krb5_cc_set_config.errcheck = krb5_errcheck
krb5_cc_get_principal = LIBKRB5.krb5_cc_get_principal
krb5_cc_get_principal.argtypes = (krb5_context, krb5_ccache,
ctypes.POINTER(krb5_principal), )
krb5_cc_get_principal.restype = krb5_error_code
krb5_cc_get_principal.errcheck = krb5_errcheck
# krb5_build_principal is a variadic function but that can't be expressed
# in a ctypes argtypes definition, so I explicitly listed the number of
# arguments we actually use through the code for type checking purposes
krb5_build_principal = LIBKRB5.krb5_build_principal
krb5_build_principal.argtypes = (krb5_context, ctypes.POINTER(krb5_principal),
ctypes.c_uint, ctypes.c_char_p,
ctypes.c_char_p, ctypes.c_char_p,
ctypes.c_char_p, ctypes.c_char_p, )
krb5_build_principal.restype = krb5_error_code
krb5_build_principal.errcheck = krb5_errcheck
krb5_cc_start_seq_get = LIBKRB5.krb5_cc_start_seq_get
krb5_cc_start_seq_get.argtypes = (krb5_context, krb5_ccache,
ctypes.POINTER(krb5_cc_cursor), )
krb5_cc_start_seq_get.restype = krb5_error_code
krb5_cc_start_seq_get.errcheck = krb5_errcheck
krb5_cc_next_cred = LIBKRB5.krb5_cc_next_cred
krb5_cc_next_cred.argtypes = (krb5_context, krb5_ccache,
ctypes.POINTER(krb5_cc_cursor),
ctypes.POINTER(krb5_creds), )
krb5_cc_next_cred.restype = krb5_error_code
krb5_cc_next_cred.errcheck = krb5_errcheck
krb5_cc_end_seq_get = LIBKRB5.krb5_cc_end_seq_get
krb5_cc_end_seq_get.argtypes = (krb5_context, krb5_ccache,
ctypes.POINTER(krb5_cc_cursor), )
krb5_cc_end_seq_get.restype = krb5_error_code
krb5_cc_end_seq_get.errcheck = krb5_errcheck
krb5_free_cred_contents = LIBKRB5.krb5_free_cred_contents
krb5_free_cred_contents.argtypes = (krb5_context, ctypes.POINTER(krb5_creds))
krb5_free_cred_contents.restype = None
krb5_principal_compare = LIBKRB5.krb5_principal_compare
krb5_principal_compare.argtypes = (krb5_context, krb5_principal,
krb5_principal, )
krb5_principal_compare.restype = krb5_boolean
krb5_unparse_name = LIBKRB5.krb5_unparse_name
krb5_unparse_name.argtypes = (krb5_context, krb5_principal,
ctypes.POINTER(ctypes.c_char_p), )
krb5_unparse_name.restype = krb5_error_code
krb5_unparse_name.errcheck = krb5_errcheck
krb5_free_unparsed_name = LIBKRB5.krb5_free_unparsed_name
krb5_free_unparsed_name.argtypes = (krb5_context, ctypes.c_char_p, )
krb5_free_unparsed_name.restype = None
CONF_REALM = b"X-CACHECONF:"
CONF_NAME = b"krb5_ccache_conf_data"
def store_data(princ_name, key, value):
"""
Stores the session cookie in a hidden ccache entry.
"""
if not isinstance(princ_name, bytes):
princ_name = princ_name.encode('utf-8')
if not isinstance(key, bytes):
key = key.encode('ascii')
if not isinstance(value, bytes):
value = value.encode('utf-8')
# FILE ccaches grow every time an entry is stored, so we need
# to avoid storing the same entry multiple times.
oldvalue = get_data(princ_name, key)
if oldvalue == value:
return
context = krb5_context()
principal = krb5_principal()
ccache = krb5_ccache()
try:
krb5_init_context(ctypes.byref(context))
krb5_parse_name(context, ctypes.c_char_p(princ_name),
ctypes.byref(principal))
krb5_cc_default(context, ctypes.byref(ccache))
buf = ctypes.create_string_buffer(value)
data = _krb5_data()
data.data = buf.value
data.length = len(buf)
krb5_cc_set_config(context, ccache, principal, key,
ctypes.byref(data))
finally:
if principal:
krb5_free_principal(context, principal)
if ccache:
krb5_cc_close(context, ccache)
if context:
krb5_free_context(context)
def get_data(princ_name, key):
"""
Gets the session cookie in a hidden ccache entry.
"""
if not isinstance(princ_name, bytes):
princ_name = princ_name.encode('utf-8')
if not isinstance(key, bytes):
key = key.encode('utf-8')
context = krb5_context()
principal = krb5_principal()
srv_princ = krb5_principal()
ccache = krb5_ccache()
pname_princ = krb5_principal()
pname = ctypes.c_char_p()
try:
krb5_init_context(ctypes.byref(context))
krb5_cc_default(context, ctypes.byref(ccache))
krb5_cc_get_principal(context, ccache, ctypes.byref(principal))
# We need to parse and then unparse the name in case the pric_name
# passed in comes w/o a realm attached
krb5_parse_name(context, ctypes.c_char_p(princ_name),
ctypes.byref(pname_princ))
krb5_unparse_name(context, pname_princ, ctypes.byref(pname))
krb5_build_principal(context, ctypes.byref(srv_princ),
len(CONF_REALM), ctypes.c_char_p(CONF_REALM),
ctypes.c_char_p(CONF_NAME), ctypes.c_char_p(key),
pname, ctypes.c_char_p(None))
# Unfortunately we can't just use krb5_cc_get_config()
# because of bugs in some ccache handling code in krb5
# libraries that would always return the first entry
# stored and not the last one, which is the one we want.
cursor = krb5_cc_cursor()
creds = krb5_creds()
got_creds = False
krb5_cc_start_seq_get(context, ccache, ctypes.byref(cursor))
try:
while True:
checkcreds = krb5_creds()
# the next function will throw an error and break out of the
# while loop when we try to access past the last cred
try:
krb5_cc_next_cred(context, ccache, ctypes.byref(cursor),
ctypes.byref(checkcreds))
except KRB5Error:
break
if (krb5_principal_compare(context, principal,
checkcreds.client) == 1 and
krb5_principal_compare(context, srv_princ,
checkcreds.server) == 1):
if got_creds:
krb5_free_cred_contents(context, ctypes.byref(creds))
creds = checkcreds
got_creds = True
# We do not stop here, as we want the LAST entry
# in the ccache for those ccaches that cannot delete
# but only always append, like FILE
else:
krb5_free_cred_contents(context,
ctypes.byref(checkcreds))
finally:
krb5_cc_end_seq_get(context, ccache, ctypes.byref(cursor))
if got_creds:
data = creds.ticket.data
krb5_free_cred_contents(context, ctypes.byref(creds))
return data
finally:
if principal:
krb5_free_principal(context, principal)
if srv_princ:
krb5_free_principal(context, srv_princ)
if pname_princ:
krb5_free_principal(context, pname_princ)
if pname:
krb5_free_unparsed_name(context, pname)
if ccache:
krb5_cc_close(context, ccache)
if context:
krb5_free_context(context)
return None
def remove_data(princ_name, key):
"""
Removes the hidden ccache entry with the session cookie.
"""
if not isinstance(princ_name, bytes):
princ_name = princ_name.encode('utf-8')
if not isinstance(key, bytes):
key = key.encode('utf-8')
context = krb5_context()
principal = krb5_principal()
ccache = krb5_ccache()
try:
krb5_init_context(ctypes.byref(context))
krb5_parse_name(context, ctypes.c_char_p(princ_name),
ctypes.byref(principal))
krb5_cc_default(context, ctypes.byref(ccache))
try:
krb5_cc_set_config(context, ccache, principal, key, None)
except KRB5Error as e:
if e.args[0] == KRB5_CC_NOSUPP:
# removal not supported with this CC type, just pass
pass
finally:
if principal:
krb5_free_principal(context, principal)
if ccache:
krb5_cc_close(context, ccache)
if context:
krb5_free_context(context)
| 13,247
|
Python
|
.py
| 316
| 33.708861
| 78
| 0.640423
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,620
|
fqdn.py
|
freeipa_freeipa/ipapython/fqdn.py
|
#
# Copyright (C) 2020 FreeIPA Contributors see COPYING for license
#
"""Get host's FQDN
"""
import socket
def gethostfqdn():
"""Get the fully qualified domain name of current host from glibc
This function may return an FQDN with up to MAXHOSTFQDNLEN characters
(253). The effective hostname is still limited to MAXHOSTNAMELEN (64).
:return: FQDN as str
"""
hostname = socket.gethostname()
# this call can never fail except for misconfigured nsswitch.conf
# without nss-myhostname provider. The myhostname provider translates
# gethostname() to local interfaces.
gai = socket.getaddrinfo(
hostname,
None, # service/port is irrelevant
family=socket.AF_UNSPEC, # IPv4 or IPv6
type=socket.SOCK_DGRAM, # optimization, TCP/RAW gives same result
# include canonical name in first addrinfo struct
# only use address family when at least one non-local interface
# is configured with that address family
flags=socket.AI_CANONNAME | socket.AI_ADDRCONFIG
)
# first addrinfo struct, fourth field is canonical name
# getaddrinfo() either raises an exception or returns at least one entry
return gai[0][3]
| 1,220
|
Python
|
.py
| 29
| 36.793103
| 76
| 0.719461
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,621
|
dogtag.py
|
freeipa_freeipa/ipapython/dogtag.py
|
# Authors: Rob Crittenden <rcritten@redhat.com>
#
# Copyright (C) 2009 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import collections
import gzip
import io
import json
import logging
from urllib.parse import urlencode
import xml.dom.minidom
import zlib
import six
# pylint: disable=ipa-forbidden-import
from ipalib import api, errors
from ipalib.util import create_https_connection
from ipalib.errors import NetworkError
from ipalib.text import _
# pylint: enable=ipa-forbidden-import
from ipapython import ipautil
# Python 3 rename. The package is available in "six.moves.http_client", but
# pylint cannot handle classes from that alias
try:
import httplib
except ImportError:
import http.client as httplib
if six.PY3:
unicode = str
logger = logging.getLogger(__name__)
Profile = collections.namedtuple('Profile', ['profile_id', 'description', 'store_issued'])
INCLUDED_PROFILES = {
Profile(u'caIPAserviceCert', u'Standard profile for network services', True),
Profile(u'IECUserRoles', u'User profile that includes IECUserRoles extension from request', True),
Profile(u'KDCs_PKINIT_Certs',
u'Profile for PKINIT support by KDCs',
False),
Profile(u'acmeIPAServerCert',
u'ACME IPA service certificate profile',
False),
}
DEFAULT_PROFILE = u'caIPAserviceCert'
KDC_PROFILE = u'KDCs_PKINIT_Certs'
if six.PY3:
gzip_decompress = gzip.decompress
else:
# note: gzip.decompress available in Python >= 3.2
def gzip_decompress(data):
with gzip.GzipFile(fileobj=io.BytesIO(data)) as f:
return f.read()
def error_from_xml(doc, message_template):
try:
item_node = doc.getElementsByTagName("Error")
reason = item_node[0].childNodes[0].data
return errors.RemoteRetrieveError(reason=reason)
except Exception as e:
return errors.RemoteRetrieveError(reason=message_template % e)
def get_ca_certchain(ca_host=None):
"""
Retrieve the CA Certificate chain from the configured Dogtag server.
"""
if ca_host is None:
ca_host = api.env.ca_host
chain = None
conn = httplib.HTTPConnection(
ca_host,
api.env.ca_install_port or 8080)
conn.request("GET", "/ca/ee/ca/getCertChain")
res = conn.getresponse()
doc = None
if res.status == 200:
data = res.read()
conn.close()
try:
doc = json.loads(data)
chain = doc['Response']['ChainBase64']
except (json.JSONDecodeError, KeyError):
logger.debug("Response is not valid JSON, try XML")
doc = xml.dom.minidom.parseString(data)
try:
item_node = doc.getElementsByTagName("ChainBase64")
chain = item_node[0].childNodes[0].data
except IndexError:
raise error_from_xml(
doc, _("Retrieving CA cert chain failed: %s"))
finally:
if doc:
doc.unlink()
else:
raise errors.RemoteRetrieveError(
reason=_("request failed with HTTP status %d") % res.status)
return chain
def _parse_ca_status(body):
try:
doc = json.loads(body)
return doc['Response']['Status']
except (json.JSONDecodeError, KeyError):
logger.debug("Response is not valid JSON, try XML")
doc = xml.dom.minidom.parseString(body)
try:
item_node = doc.getElementsByTagName("XMLResponse")[0]
item_node = item_node.getElementsByTagName("Status")[0]
return item_node.childNodes[0].data
except IndexError:
raise error_from_xml(doc, _("Retrieving CA status failed: %s"))
def ca_status(ca_host=None):
"""Return the status of the CA, and the httpd proxy in front of it
The returned status can be:
- running
- starting
- Service Temporarily Unavailable
"""
if ca_host is None:
ca_host = api.env.ca_host
status, _headers, body = http_request(
ca_host, 8080, '/ca/admin/ca/getStatus',
# timeout: CA sometimes forgot to answer, we have to try again
timeout=api.env.http_timeout)
if status == 503:
# Service temporarily unavailable
return status
elif status != 200:
raise errors.RemoteRetrieveError(
reason=_("Retrieving CA status failed with status %d") % status)
return _parse_ca_status(body)
def acme_status(ca_host=None):
"""Return the status of ACME
Returns a boolean.
If the proxy is not working or the CA is not running then this could
return a false negative.
"""
if ca_host is None:
ca_host = api.env.ca_host
status, _headers, _body = https_request(
ca_host, 443,
url='/acme/directory',
cafile=api.env.tls_ca_cert,
client_certfile=None,
client_keyfile=None,
method='GET',
timeout=api.env.http_timeout)
if status == 200:
return True
elif status == 503:
# This is what it should return when disabled
return False
else:
# Unexpected status code, log and return False
logger.error('ACME status request returned %d', status)
return False
def https_request(
host, port, url, cafile, client_certfile, client_keyfile,
method='POST', headers=None, body=None, **kw):
"""
:param method: HTTP request method (defalut: 'POST')
:param url: The path (not complete URL!) to post to.
:param body: The request body (encodes kw if None)
:param kw: Keyword arguments to encode into POST body.
:return: (http_status, http_headers, http_body)
as (integer, dict, str)
Perform a client authenticated HTTPS request
"""
def connection_factory(host, port):
return create_https_connection(
host, port,
cafile=cafile,
client_certfile=client_certfile,
client_keyfile=client_keyfile,
tls_version_min=api.env.tls_version_min,
tls_version_max=api.env.tls_version_max)
if body is None:
body = urlencode(kw)
return _httplib_request(
'https', host, port, url, connection_factory, body,
method=method, headers=headers)
def http_request(host, port, url, timeout=None, **kw):
"""
:param url: The path (not complete URL!) to post to.
:param timeout: Timeout in seconds for waiting for reply.
:param kw: Keyword arguments to encode into POST body.
:return: (http_status, http_headers, http_body)
as (integer, dict, str)
Perform an HTTP request.
"""
body = urlencode(kw)
if timeout is None:
conn_opt = {}
else:
conn_opt = {"timeout": timeout}
return _httplib_request(
'http', host, port, url, httplib.HTTPConnection, body,
connection_options=conn_opt)
def _httplib_request(
protocol, host, port, path, connection_factory, request_body,
method='POST', headers=None, connection_options=None):
"""
:param request_body: Request body
:param connection_factory: Connection class to use. Will be called
with the host and port arguments.
:param method: HTTP request method (default: 'POST')
:param connection_options: a dictionary that will be passed to
connection_factory as keyword arguments.
Perform a HTTP(s) request.
"""
if connection_options is None:
connection_options = {}
uri = u'%s://%s%s' % (protocol, ipautil.format_netloc(host, port), path)
logger.debug('request %s %s', method, uri)
logger.debug('request body %r', request_body)
headers = headers or {}
if (
method == 'POST'
and 'content-type' not in (str(k).lower() for k in headers)
):
headers['content-type'] = 'application/x-www-form-urlencoded'
try:
conn = connection_factory(host, port, **connection_options)
conn.request(method, path, body=request_body, headers=headers)
res = conn.getresponse()
http_status = res.status
http_headers = res.msg
http_body = res.read()
conn.close()
except Exception as e:
logger.debug("httplib request failed:", exc_info=True)
raise NetworkError(uri=uri, error=str(e))
encoding = res.getheader('Content-Encoding')
if encoding == 'gzip':
http_body = gzip_decompress(http_body)
elif encoding == 'deflate':
http_body = zlib.decompress(http_body)
logger.debug('response status %d', http_status)
logger.debug('response headers %s', http_headers)
logger.debug('response body (decoded): %r', http_body)
return http_status, http_headers, http_body
| 9,410
|
Python
|
.py
| 248
| 31.334677
| 102
| 0.661549
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,622
|
ipavalidate.py
|
freeipa_freeipa/ipapython/ipavalidate.py
|
# Authors: Rob Crittenden <rcritten@redhat.com>
#
# Copyright (C) 2007 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import re
def Email(mail, notEmpty=True):
"""Do some basic validation of an e-mail address.
Return True if ok
Return False if not
If notEmpty is True the this will return an error if the field
is "" or None.
"""
usernameRE = re.compile(r"^[^ \t\n\r@<>()]+$", re.I)
domainRE = re.compile(r"^[a-z0-9][a-z0-9\.\-_]*\.[a-z]+$", re.I)
if not mail or mail is None:
if notEmpty is True:
return False
else:
return True
mail = mail.strip()
s = mail.split('@', 1)
try:
username, domain=s
except ValueError:
return False
if not usernameRE.search(username):
return False
if not domainRE.search(domain):
return False
return True
def Plain(text, notEmpty=False, allowSpaces=True):
"""Do some basic validation of a plain text field
Return True if ok
Return False if not
If notEmpty is True the this will return an error if the field
is "" or None.
"""
if (text is None) or (not text.strip()):
if notEmpty is True:
return False
else:
return True
if allowSpaces:
textRE = re.compile(r"^[a-zA-Z_\-0-9\'\ ]*$")
else:
textRE = re.compile(r"^[a-zA-Z_\-0-9\']*$")
if not textRE.search(text):
return False
return True
def String(text, notEmpty=False):
"""A string type. This is much looser in what it allows than plain"""
if text is None or not text.strip():
if notEmpty is True:
return False
else:
return True
return True
def Path(text, notEmpty=False):
"""Do some basic validation of a path
Return True if ok
Return False if not
If notEmpty is True the this will return an error if the field
is "" or None.
"""
textRE = re.compile(r"^[a-zA-Z_\-0-9\\ \.\/\\:]*$")
if not text and notEmpty is True:
return False
if text is None:
if notEmpty is True:
return False
else:
return True
if not textRE.search(text):
return False
return True
def GoodName(text, notEmpty=False):
"""From shadow-utils:
User/group names must match gnu e-regex:
[a-zA-Z0-9_.][a-zA-Z0-9_.-]{0,30}[a-zA-Z0-9_.$-]?
as a non-POSIX, extension, allow "$" as the last char for
sake of Samba 3.x "add machine script"
Return True if ok
Return False if not
"""
textRE = re.compile(r"^[a-zA-Z0-9_.][a-zA-Z0-9_.-]{0,30}[a-zA-Z0-9_.$-]?$")
if not text and notEmpty is True:
return False
if text is None:
if notEmpty is True:
return False
else:
return True
m = textRE.match(text)
if not m or text != m.group(0):
return False
return True
| 3,633
|
Python
|
.py
| 110
| 26.672727
| 79
| 0.622998
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,623
|
ipautil.py
|
freeipa_freeipa/ipapython/ipautil.py
|
# Authors: Simo Sorce <ssorce@redhat.com>
#
# Copyright (C) 2007-2016 Red Hat, Inc.
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function
import codecs
import logging
import string
import tempfile
import subprocess
import random
import math
import os
import sys
import errno
import copy
import shutil
import socket
import re
import datetime
import netaddr
import time
import textwrap
import io
from contextlib import contextmanager
from configparser import RawConfigParser, ParsingError
import locale
import collections
import urllib
import six
from six.moves import input
try:
import ifaddr
except ImportError:
ifaddr = None
from ipapython.dn import DN
from ipaplatform.paths import paths
from ipaplatform.constants import User, Group
logger = logging.getLogger(__name__)
# only for OTP password that is manually retyped by user
TMP_PWD_ENTROPY_BITS = 128
PROTOCOL_NAMES = {
socket.SOCK_STREAM: 'tcp',
socket.SOCK_DGRAM: 'udp'
}
InterfaceDetails = collections.namedtuple(
'InterfaceDetails', [
'name', # interface name
'ifnet' # network details of interface
])
class UnsafeIPAddress(netaddr.IPAddress):
"""Any valid IP address with or without netmask."""
# Use inet_pton() rather than inet_aton() for IP address parsing. We
# will use the same function in IPv4/IPv6 conversions + be stricter
# and don't allow IP addresses such as '1.1.1' in the same time
netaddr_ip_flags = netaddr.INET_PTON
def __init__(self, addr):
if isinstance(addr, UnsafeIPAddress):
self._net = addr._net
super(UnsafeIPAddress, self).__init__(addr,
flags=self.netaddr_ip_flags)
return
elif isinstance(addr, netaddr.IPAddress):
self._net = None # no information about netmask
super(UnsafeIPAddress, self).__init__(addr,
flags=self.netaddr_ip_flags)
return
elif isinstance(addr, netaddr.IPNetwork):
self._net = addr
super(UnsafeIPAddress, self).__init__(self._net.ip,
flags=self.netaddr_ip_flags)
return
# option of last resort: parse it as string
self._net = None
addr = str(addr)
try:
try:
addr = netaddr.IPAddress(addr, flags=self.netaddr_ip_flags)
except netaddr.AddrFormatError:
# netaddr.IPAddress doesn't handle zone indices in textual
# IPv6 addresses. Try removing zone index and parse the
# address again.
addr, sep, _foo = addr.partition('%')
if sep != '%':
raise
addr = netaddr.IPAddress(addr, flags=self.netaddr_ip_flags)
if addr.version != 6:
raise
except ValueError:
self._net = netaddr.IPNetwork(addr, flags=0)
addr = self._net.ip
super(UnsafeIPAddress, self).__init__(addr,
flags=self.netaddr_ip_flags)
def __getstate__(self):
state = {
'_net': self._net,
'super_state': super(UnsafeIPAddress, self).__getstate__(),
}
return state
def __setstate__(self, state):
super(UnsafeIPAddress, self).__setstate__(state['super_state'])
self._net = state['_net']
class CheckedIPAddress(UnsafeIPAddress):
"""IPv4 or IPv6 address with additional constraints.
Reserved or link-local addresses are never accepted.
"""
def __init__(self, addr, parse_netmask=True,
allow_loopback=False, allow_multicast=False):
try:
super(CheckedIPAddress, self).__init__(addr)
except netaddr.core.AddrFormatError as e:
raise ValueError(e)
if isinstance(addr, CheckedIPAddress):
self.prefixlen = addr.prefixlen
return
if not parse_netmask and self._net:
raise ValueError(
"netmask and prefix length not allowed here: {}".format(addr))
if self.version not in (4, 6):
raise ValueError("unsupported IP version {}".format(self.version))
if not allow_loopback and self.is_loopback():
raise ValueError("cannot use loopback IP address {}".format(addr))
if (not self.is_loopback() and self.is_reserved()) \
or self in netaddr.ip.IPV4_6TO4:
raise ValueError(
"cannot use IANA reserved IP address {}".format(addr))
if self.is_link_local():
raise ValueError(
"cannot use link-local IP address {}".format(addr))
if not allow_multicast and self.is_multicast():
raise ValueError("cannot use multicast IP address {}".format(addr))
if self._net is None:
if self.version == 4:
self._net = netaddr.IPNetwork(
netaddr.cidr_abbrev_to_verbose(str(self)))
elif self.version == 6:
self._net = netaddr.IPNetwork(str(self) + '/64')
self.prefixlen = self._net.prefixlen
def __getstate__(self):
state = {
'prefixlen': self.prefixlen,
'super_state': super(CheckedIPAddress, self).__getstate__(),
}
return state
def __setstate__(self, state):
super(CheckedIPAddress, self).__setstate__(state['super_state'])
self.prefixlen = state['prefixlen']
def is_network_addr(self):
return self == self._net.network
def is_broadcast_addr(self):
return self.version == 4 and self == self._net.broadcast
def get_matching_interface(self):
"""Find matching local interface for address
:return: InterfaceDetails named tuple or None if no interface has
this address
"""
if ifaddr is None:
raise ImportError("ifaddr")
logger.debug("Searching for an interface of IP address: %s", self)
if self.version == 4:
family_ips = (
(ip.ip, ip.network_prefix, ip.nice_name)
for ips in [a.ips for a in ifaddr.get_adapters()]
for ip in ips if not isinstance(ip.ip, tuple)
)
elif self.version == 6:
family_ips = (
(ip.ip[0], ip.network_prefix, ip.nice_name)
for ips in [a.ips for a in ifaddr.get_adapters()]
for ip in ips if isinstance(ip.ip, tuple)
)
else:
raise ValueError(
"Unsupported address family ({})".format(self.version)
)
for ip, prefix, ifname in family_ips:
ifaddrmask = "{ip}/{prefix}".format(ip=ip, prefix=prefix)
logger.debug(
"Testing local IP address: %s (interface: %s)",
ifaddrmask, ifname)
ifnet = netaddr.IPNetwork(ifaddrmask)
if ifnet.ip == self:
return InterfaceDetails(ifname, ifnet)
return None
def set_ip_net(self, ifnet):
"""Set IP Network details for this address. IPNetwork is valid only
locally, so this should be set only for local IP addresses
:param ifnet: netaddr.IPNetwork object with information about IP
network where particula address belongs locally
"""
assert isinstance(ifnet, netaddr.IPNetwork)
self._net = ifnet
class CheckedIPAddressLoopback(CheckedIPAddress):
"""IPv4 or IPv6 address with additional constraints with
possibility to use a loopback IP.
Reserved or link-local addresses are never accepted.
"""
def __init__(self, addr, parse_netmask=True, allow_multicast=False):
super(CheckedIPAddressLoopback, self).__init__(
addr, parse_netmask=parse_netmask,
allow_multicast=allow_multicast,
allow_loopback=True)
if self.is_loopback():
# print is being used instead of a logger, because at this
# moment, in execution process, there is no logger configured
print("WARNING: You are using a loopback IP: {}".format(addr),
file=sys.stderr)
def valid_ip(addr):
return netaddr.valid_ipv4(addr) or netaddr.valid_ipv6(addr)
def format_netloc(host, port=None):
"""
Format network location (host:port).
If the host part is a literal IPv6 address, it must be enclosed in square
brackets (RFC 2732).
"""
host = str(host)
try:
socket.inet_pton(socket.AF_INET6, host)
host = '[%s]' % host
except socket.error:
pass
if port is None:
return host
else:
return '%s:%s' % (host, str(port))
def realm_to_suffix(realm_name):
'Convert a kerberos realm to a IPA suffix.'
s = realm_name.split(".")
suffix_dn = DN(*[('dc', x.lower()) for x in s])
return suffix_dn
def suffix_to_realm(suffix_dn):
'Convert a IPA suffix to a kerberos realm.'
assert isinstance(suffix_dn, DN)
realm = '.'.join([x.value for x in suffix_dn])
return realm
def template_str(txt, vars):
val = string.Template(txt).substitute(vars)
# eval() is a special string one can insert into a template to have the
# Python interpreter evaluate the string. This is intended to allow
# math to be performed in templates.
pattern = re.compile(r'(eval\s*\(([^()]*)\))')
val = pattern.sub(lambda x: str(eval(x.group(2))), val)
return val
def template_file(infilename, vars):
"""Read a file and perform template substitutions"""
with open(infilename) as f:
return template_str(f.read(), vars)
def copy_template_file(infilename, outfilename, vars):
"""Copy a file, performing template substitutions"""
txt = template_file(infilename, vars)
with open(outfilename, 'w') as file:
file.write(txt)
def write_tmp_file(txt):
fd = tempfile.NamedTemporaryFile('w+')
fd.write(txt)
fd.flush()
return fd
def flush_sync(f):
"""Flush and fsync file to disk
:param f: a file object with fileno and name
"""
# flush file buffer to file descriptor
f.flush()
# flush Kernel buffer to disk
os.fsync(f.fileno())
# sync metadata in directory
dirname = os.path.dirname(os.path.abspath(f.name))
dirfd = os.open(dirname, os.O_RDONLY | os.O_DIRECTORY)
try:
os.fsync(dirfd)
finally:
os.close(dirfd)
def shell_quote(string):
if isinstance(string, str):
return "'" + string.replace("'", "'\\''") + "'"
else:
return b"'" + string.replace(b"'", b"'\\''") + b"'"
class _RunResult(collections.namedtuple('_RunResult',
'output error_output returncode')):
"""Result of ipautil.run"""
class CalledProcessError(subprocess.CalledProcessError):
"""CalledProcessError with stderr
Hold stderr of failed call and print it in repr() to simplify debugging.
"""
def __init__(self, returncode, cmd, output=None, stderr=None):
super(CalledProcessError, self).__init__(returncode, cmd, output)
self.stderr = stderr
def __str__(self):
args = [
self.__class__.__name__, '('
'Command {!s} '.format(self.cmd),
'returned non-zero exit status {!r}'.format(self.returncode)
]
if self.stderr is not None:
args.append(': {!r}'.format(self.stderr))
args.append(')')
return ''.join(args)
__repr__ = __str__
def run(args, stdin=None, raiseonerr=True, nolog=(), env=None,
capture_output=False, skip_output=False, cwd=None,
runas=None, suplementary_groups=(),
capture_error=False, encoding=None, redirect_output=False,
umask=None, nolog_output=False, nolog_error=False):
"""
Execute an external command.
:param args: List of arguments for the command
:param stdin: Optional input to the command
:param raiseonerr: If True, raises an exception if the return code is
not zero
:param nolog: Tuple of strings that shouldn't be logged, like passwords.
Each tuple consists of a string to be replaced by XXXXXXXX.
Example:
We have a command
['/usr/bin/setpasswd', '--password', 'Secret123', 'someuser']
and we don't want to log the password so nolog would be set to:
('Secret123',)
The resulting log output would be:
/usr/bin/setpasswd --password XXXXXXXX someuser
If a value isn't found in the list it is silently ignored.
:param env: Dictionary of environment variables passed to the command.
When None, current environment is copied
:param capture_output: Capture stdout
:param skip_output: Redirect the output to /dev/null and do not log it
:param cwd: Current working directory
:param runas: Name or User object of a user that the command should be
run as. The spawned process will have both real and effective UID and
GID set.
:param suplementary_groups: List of group names or Group object that will
be used as suplementary groups for subporcess. The option runas must
be specified together with this option.
:param capture_error: Capture stderr
:param nolog_output: do not log stdout even if it is being captured
:param nolog_error: do not log stderr even if it is being captured
:param encoding: For Python 3, the encoding to use for output,
error_output, and (if it's not bytes) stdin.
If None, the current encoding according to locale is used.
:param redirect_output: Redirect (error) output to standard (error) output.
:param umask: Set file-creation mask before running the command.
:return: An object with these attributes:
`returncode`: The process' exit status
`output` and `error_output`: captured output, as strings. Under
Python 3, these are encoded with the given `encoding`.
None unless `capture_output` or `capture_error`, respectively, are
given
`raw_output`, `raw_error_output`: captured output, as bytes.
`output_log` and `error_log`: The captured output, as strings, with any
unencodable characters discarded. These should only be used
for logging or error messages.
If skip_output is given, all output-related attributes on the result
(that is, all except `returncode`) are None.
For backwards compatibility, the return value can also be used as a
(output, error_output, returncode) triple.
"""
assert isinstance(suplementary_groups, (tuple, list))
p_in = None
p_out = None
p_err = None
if isinstance(nolog, str):
# We expect a tuple (or list, or other iterable) of nolog strings.
# Passing just a single string is bad: strings are iterable, so this
# would result in every individual character of that string being
# replaced by XXXXXXXX.
# This is a sanity check to prevent that.
raise ValueError('nolog must be a tuple of strings.')
if skip_output and (capture_output or capture_error):
raise ValueError('skip_output is incompatible with '
'capture_output or capture_error')
if redirect_output and (capture_output or capture_error):
raise ValueError('redirect_output is incompatible with '
'capture_output or capture_error')
if skip_output and redirect_output:
raise ValueError('skip_output is incompatible with redirect_output')
if env is None:
# copy default env
env = copy.deepcopy(os.environ)
env["PATH"] = "/bin:/sbin:/usr/kerberos/bin:/usr/kerberos/sbin:/usr/bin:/usr/sbin"
if stdin:
p_in = subprocess.PIPE
if skip_output:
p_out = p_err = open(os.devnull, 'w')
elif redirect_output:
p_out = sys.stdout
p_err = sys.stderr
else:
p_out = subprocess.PIPE
p_err = subprocess.PIPE
if encoding is None:
encoding = locale.getpreferredencoding()
if six.PY3 and isinstance(stdin, str):
stdin = stdin.encode(encoding)
arg_string = nolog_replace(repr(args), nolog)
logger.debug('Starting external process')
logger.debug('args=%s', arg_string)
if runas is not None:
runas = User(runas)
suplementary_groups = [Group(group) for group in suplementary_groups]
suplementary_gids = [group.gid for group in suplementary_groups]
logger.debug(
'runas=%s (UID %d, GID %s)', runas, runas.uid, runas.pgid
)
if suplementary_groups:
for group in suplementary_groups:
logger.debug(
'supplementary_group=%s (GID %d)', group, group.gid
)
if runas is not None or umask is not None:
# preexec function is not supported in WSGI environment
def preexec_fn():
if runas is not None:
os.setgroups(suplementary_gids)
os.setregid(runas.pgid, runas.pgid)
os.setreuid(runas.uid, runas.uid)
if umask is not None:
os.umask(umask)
else:
preexec_fn = None
try:
# pylint: disable=subprocess-popen-preexec-fn
p = subprocess.Popen(args, stdin=p_in, stdout=p_out, stderr=p_err,
close_fds=True, env=env, cwd=cwd,
preexec_fn=preexec_fn)
stdout, stderr = p.communicate(stdin)
except KeyboardInterrupt:
logger.debug('Process interrupted')
p.wait()
raise
except BaseException:
logger.debug('Process execution failed')
raise
finally:
if skip_output:
p_out.close()
logger.debug('Process finished, return code=%s', p.returncode)
# The command and its output may include passwords that we don't want
# to log. Replace those.
if skip_output or redirect_output:
output_log = None
error_log = None
else:
if six.PY3:
output_log = stdout.decode(locale.getpreferredencoding(),
errors='replace')
else:
output_log = stdout
if six.PY3:
error_log = stderr.decode(locale.getpreferredencoding(),
errors='replace')
else:
error_log = stderr
output_log = nolog_replace(output_log, nolog)
if nolog_output:
logger.debug('stdout=<REDACTED>')
else:
logger.debug('stdout=%s', output_log)
error_log = nolog_replace(error_log, nolog)
if nolog_error:
logger.debug('stderr=<REDACTED>')
else:
logger.debug('stderr=%s', error_log)
if capture_output:
if six.PY2:
output = stdout
else:
output = stdout.decode(encoding)
else:
output = None
if capture_error:
if six.PY2:
error_output = stderr
else:
error_output = stderr.decode(encoding)
else:
error_output = None
if p.returncode != 0 and raiseonerr:
raise CalledProcessError(
p.returncode, arg_string, output_log, error_log
)
result = _RunResult(output, error_output, p.returncode)
result.raw_output = stdout
result.raw_error_output = stderr
result.output_log = output_log
result.error_log = error_log
return result
def nolog_replace(string, nolog):
"""Replace occurences of strings given in `nolog` with XXXXXXXX"""
for value in nolog:
if not value or not isinstance(value, str):
continue
quoted = urllib.parse.quote(value)
shquoted = shell_quote(value)
for nolog_value in (shquoted, value, quoted):
string = string.replace(nolog_value, 'XXXXXXXX')
return string
def install_file(fname, dest):
# SELinux: use copy to keep the right context
if os.path.isfile(dest):
os.rename(dest, dest + ".orig")
shutil.copy(fname, dest)
os.remove(fname)
def backup_file(fname):
if os.path.isfile(fname):
os.rename(fname, fname + ".orig")
class CIDict(dict):
"""
Case-insensitive but case-respecting dictionary.
This code is derived from python-ldap's cidict.py module,
written by stroeder: http://python-ldap.sourceforge.net/
This version extends 'dict' so it works properly with TurboGears.
If you extend UserDict, isinstance(foo, dict) returns false.
"""
def __init__(self, default=None, **kwargs):
super(CIDict, self).__init__()
self._keys = {} # mapping of lowercased keys to proper case
if default:
self.update(default)
if kwargs:
self.update(kwargs)
def __getitem__(self, key):
return super(CIDict, self).__getitem__(key.lower())
def __setitem__(self, key, value, seen_keys=None):
"""cidict[key] = value
The ``seen_keys`` argument is used by ``update()`` to keep track of
duplicate keys. It should be an initially empty set that is
passed to all calls to __setitem__ that should not set duplicate keys.
"""
lower_key = key.lower()
if seen_keys is not None:
if lower_key in seen_keys:
raise ValueError('Duplicate key in update: %s' % key)
seen_keys.add(lower_key)
self._keys[lower_key] = key
return super(CIDict, self).__setitem__(lower_key, value)
def __delitem__(self, key):
lower_key = key.lower()
del self._keys[lower_key]
return super(CIDict, self).__delitem__(lower_key)
def update(self, new=None, **kwargs):
"""Update self from dict/iterable new and kwargs
Functions like ``dict.update()``.
Neither ``new`` nor ``kwargs`` may contain two keys that only differ in
case, as this situation would result in loss of data.
"""
seen = set()
if new:
try:
keys = new.keys
except AttributeError:
self.update(dict(new))
else:
for key in keys():
# pylint: disable=unnecessary-dunder-call
self.__setitem__(key, new[key], seen)
seen = set()
for key, value in kwargs.items():
# pylint: disable=unnecessary-dunder-call
self.__setitem__(key, value, seen)
def __contains__(self, key):
return super(CIDict, self).__contains__(key.lower())
if six.PY2:
def has_key(self, key):
# pylint: disable=no-member
return super(CIDict, self).has_key(key.lower()) # noqa
# pylint: enable=no-member
def get(self, key, failobj=None):
try:
return self[key]
except KeyError:
return failobj
def __iter__(self):
return six.itervalues(self._keys)
def keys(self):
if six.PY2:
return list(self.iterkeys())
else:
return self.iterkeys()
def items(self):
if six.PY2:
return list(self.iteritems())
else:
return self.iteritems()
def values(self):
if six.PY2:
return list(self.itervalues())
else:
return self.itervalues()
def copy(self):
"""Returns a shallow copy of this CIDict"""
return CIDict(list(self.items()))
def iteritems(self):
return ((k, self[k]) for k in six.itervalues(self._keys))
def iterkeys(self):
return six.itervalues(self._keys)
def itervalues(self):
return (v for k, v in six.iteritems(self))
def setdefault(self, key, value=None):
try:
return self[key]
except KeyError:
self[key] = value
return value
def pop(self, key, *args):
try:
value = self[key]
del self[key]
return value
except KeyError:
if len(args) == 1:
return args[0]
raise
def popitem(self):
(lower_key, value) = super(CIDict, self).popitem()
key = self._keys[lower_key]
del self._keys[lower_key]
return (key, value)
def clear(self):
self._keys.clear()
return super(CIDict, self).clear()
def viewitems(self):
raise NotImplementedError('CIDict.viewitems is not implemented')
def viewkeys(self):
raise NotImplementedError('CIDict.viewkeys is not implemented')
def viewvvalues(self):
raise NotImplementedError('CIDict.viewvvalues is not implemented')
class GeneralizedTimeZone(datetime.tzinfo):
"""This class is a basic timezone wrapper for the offset specified
in a Generalized Time. It is dst-ignorant."""
def __init__(self,offsetstr="Z"):
super(GeneralizedTimeZone, self).__init__()
self.name = offsetstr
self.houroffset = 0
self.minoffset = 0
if offsetstr == "Z":
self.houroffset = 0
self.minoffset = 0
else:
if (len(offsetstr) >= 3) and re.match(r'[-+]\d\d', offsetstr):
self.houroffset = int(offsetstr[0:3])
offsetstr = offsetstr[3:]
if (len(offsetstr) >= 2) and re.match(r'\d\d', offsetstr):
self.minoffset = int(offsetstr[0:2])
offsetstr = offsetstr[2:]
if len(offsetstr) > 0:
raise ValueError()
if self.houroffset < 0:
self.minoffset *= -1
def utcoffset(self, dt):
return datetime.timedelta(hours=self.houroffset, minutes=self.minoffset)
def dst(self):
return datetime.timedelta(0)
def tzname(self):
return self.name
def parse_generalized_time(timestr):
"""Parses are Generalized Time string (as specified in X.680),
returning a datetime object. Generalized Times are stored inside
the krbPasswordExpiration attribute in LDAP.
This method doesn't attempt to be perfect wrt timezones. If python
can't be bothered to implement them, how can we..."""
if len(timestr) < 8:
return None
try:
date = timestr[:8]
time = timestr[8:]
year = int(date[:4])
month = int(date[4:6])
day = int(date[6:8])
hour = min = sec = msec = 0
tzone = None
if (len(time) >= 2) and re.match(r'\d', time[0]):
hour = int(time[:2])
time = time[2:]
if len(time) >= 2 and (time[0] == "," or time[0] == "."):
hour_fraction = "."
time = time[1:]
while (len(time) > 0) and re.match(r'\d', time[0]):
hour_fraction += time[0]
time = time[1:]
total_secs = int(float(hour_fraction) * 3600)
min, sec = divmod(total_secs, 60)
if (len(time) >= 2) and re.match(r'\d', time[0]):
min = int(time[:2])
time = time[2:]
if len(time) >= 2 and (time[0] == "," or time[0] == "."):
min_fraction = "."
time = time[1:]
while (len(time) > 0) and re.match(r'\d', time[0]):
min_fraction += time[0]
time = time[1:]
sec = int(float(min_fraction) * 60)
if (len(time) >= 2) and re.match(r'\d', time[0]):
sec = int(time[:2])
time = time[2:]
if len(time) >= 2 and (time[0] == "," or time[0] == "."):
sec_fraction = "."
time = time[1:]
while (len(time) > 0) and re.match(r'\d', time[0]):
sec_fraction += time[0]
time = time[1:]
msec = int(float(sec_fraction) * 1000000)
if (len(time) > 0):
tzone = GeneralizedTimeZone(time)
return datetime.datetime(year, month, day, hour, min, sec, msec, tzone)
except ValueError:
return None
def ipa_generate_password(entropy_bits=256, uppercase=1, lowercase=1, digits=1,
special=1, min_len=0):
"""
Generate token containing at least `entropy_bits` bits and with the given
character restraints.
:param entropy_bits:
The minimal number of entropy bits attacker has to guess:
128 bits entropy: secure
256 bits of entropy: secure enough if you care about quantum
computers
Integer values specify minimal number of characters from given
character class and length.
Value None prevents given character from appearing in the token.
Example:
TokenGenerator(uppercase=3, lowercase=3, digits=0, special=None)
At least 3 upper and 3 lower case ASCII chars, may contain digits,
no special chars.
"""
special_chars = '!$%&()*+,-./:;<>?@[]^_{|}~'
pwd_charsets = {
'uppercase': {
'chars': string.ascii_uppercase,
'entropy': math.log(len(string.ascii_uppercase), 2)
},
'lowercase': {
'chars': string.ascii_lowercase,
'entropy': math.log(len(string.ascii_lowercase), 2)
},
'digits': {
'chars': string.digits,
'entropy': math.log(len(string.digits), 2)
},
'special': {
'chars': special_chars,
'entropy': math.log(len(special_chars), 2)
},
}
req_classes = dict(
uppercase=uppercase,
lowercase=lowercase,
digits=digits,
special=special
)
# 'all' class is used when adding entropy to too-short tokens
# it contains characters from all allowed classes
pwd_charsets['all'] = {
'chars': ''.join([
charclass['chars'] for charclass_name, charclass
in pwd_charsets.items()
if req_classes[charclass_name] is not None
])
}
pwd_charsets['all']['entropy'] = math.log(
len(pwd_charsets['all']['chars']), 2)
rnd = random.SystemRandom()
todo_entropy = entropy_bits
password = u''
# Generate required character classes:
# The order of generated characters is fixed to comply with check in
# NSS function sftk_newPinCheck() in nss/lib/softoken/fipstokn.c.
for charclass_name in ['digits', 'uppercase', 'lowercase', 'special']:
charclass = pwd_charsets[charclass_name]
todo_characters = req_classes[charclass_name]
if todo_characters is None:
continue
while todo_characters > 0:
password += rnd.choice(charclass['chars'])
todo_entropy -= charclass['entropy']
todo_characters -= 1
# required character classes do not provide sufficient entropy
# or does not fulfill minimal length constraint
allchars = pwd_charsets['all']
while todo_entropy > 0 or len(password) < min_len:
password += rnd.choice(allchars['chars'])
todo_entropy -= allchars['entropy']
return password
def user_input(prompt, default = None, allow_empty = True):
if default is None:
while True:
try:
ret = input("%s: " % prompt)
if allow_empty or ret.strip():
return ret.strip()
except EOFError:
if allow_empty:
return ''
raise RuntimeError("Failed to get user input")
if isinstance(default, str):
while True:
try:
ret = input("%s [%s]: " % (prompt, default))
if not ret and (allow_empty or default):
return default
elif ret.strip():
return ret.strip()
except EOFError:
return default
if isinstance(default, bool):
choice = "yes" if default else "no"
while True:
try:
ret = input("%s [%s]: " % (prompt, choice))
ret = ret.strip()
if not ret:
return default
elif ret.lower()[0] == "y":
return True
elif ret.lower()[0] == "n":
return False
except EOFError:
return default
if isinstance(default, int):
while True:
try:
ret = input("%s [%s]: " % (prompt, default))
ret = ret.strip()
if not ret:
return default
ret = int(ret)
except ValueError:
pass
except EOFError:
return default
else:
return ret
return None
def host_port_open(host, port, socket_type=socket.SOCK_STREAM,
socket_timeout=None, log_errors=False,
log_level=logging.DEBUG):
"""
host: either hostname or IP address;
if hostname is provided, port MUST be open on ALL resolved IPs
returns True is port is open, False otherwise
"""
port_open = True
# port has to be open on ALL resolved IPs
for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC, socket_type):
af, socktype, proto, _canonname, sa = res
s = None
try:
s = socket.socket(af, socktype, proto)
if socket_timeout is not None:
s.settimeout(socket_timeout)
s.connect(sa)
if socket_type == socket.SOCK_DGRAM:
s.send(b'')
s.recv(512)
except socket.error:
port_open = False
if log_errors:
msg = ('Failed to connect to port %(port)s %(proto)s on '
'%(addr)s' % dict(port=port,
proto=PROTOCOL_NAMES[socket_type],
addr=sa[0]))
logger.log(log_level, msg)
finally:
if s is not None:
s.close()
return port_open
def check_port_bindable(port, socket_type=socket.SOCK_STREAM):
"""Check if a port is free and not bound by any other application
:param port: port number
:param socket_type: type (SOCK_STREAM for TCP, SOCK_DGRAM for UDP)
Returns True if the port is free, False otherwise
"""
if socket_type == socket.SOCK_STREAM:
proto = 'TCP'
elif socket_type == socket.SOCK_DGRAM:
proto = 'UDP'
else:
raise ValueError(socket_type)
# Detect dual stack or IPv4 single stack
try:
s = socket.socket(socket.AF_INET6, socket_type)
anyaddr = '::'
logger.debug(
"check_port_bindable: Checking IPv4/IPv6 dual stack and %s",
proto
)
except socket.error:
s = socket.socket(socket.AF_INET, socket_type)
anyaddr = ''
logger.debug("check_port_bindable: Checking IPv4 only and %s", proto)
# Attempt to bind
try:
if socket_type == socket.SOCK_STREAM:
# reuse TCP sockets in TIME_WAIT state
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
s.bind((anyaddr, port))
except socket.error as e:
logger.debug(
"check_port_bindable: failed to bind to port %i/%s: %s",
port, proto, e
)
return False
else:
logger.debug(
"check_port_bindable: bind success: %i/%s", port, proto
)
return True
finally:
s.close()
def config_replace_variables(filepath, replacevars=dict(), appendvars=dict(),
removevars=None):
"""
Take a key=value based configuration file, and write new version
with certain values replaced, appended, or removed.
All (key,value) pairs from replacevars and appendvars that were not found
in the configuration file, will be added there.
All entries in set removevars are removed.
It is responsibility of a caller to ensure that replacevars and
appendvars do not overlap.
It is responsibility of a caller to back up file.
returns dictionary of affected keys and their previous values
One have to run restore_context(filepath) afterwards or
security context of the file will not be correct after modification
"""
pattern = re.compile(r'''
(^
\s*
(?P<option> [^\#;]+?)
(\s*=\s*)
(?P<value> .+?)?
(\s*((\#|;).*)?)?
$)''', re.VERBOSE)
orig_stat = os.stat(filepath)
old_values = dict()
temp_filename = None
with tempfile.NamedTemporaryFile(mode="w", delete=False) as new_config:
temp_filename = new_config.name
with open(filepath, 'r') as f:
for line in f:
new_line = line
m = pattern.match(line)
if m:
option, value = m.group('option', 'value')
if option is not None:
if replacevars and option in replacevars:
# replace value completely
new_line = u"%s=%s\n" % (option, replacevars[option])
old_values[option] = value
if appendvars and option in appendvars:
# append new value unless it is already existing in the original one
if not value:
new_line = u"%s=%s\n" % (option, appendvars[option])
elif value.find(appendvars[option]) == -1:
new_line = u"%s=%s %s\n" % (option, value, appendvars[option])
old_values[option] = value
if removevars and option in removevars:
old_values[option] = value
new_line = None
if new_line is not None:
new_config.write(new_line)
# Now add all options from replacevars and appendvars that were not found in the file
new_vars = replacevars.copy()
new_vars.update(appendvars)
newvars_view = set(new_vars.keys()) - set(old_values.keys())
append_view = (set(appendvars.keys()) - newvars_view)
for item in newvars_view:
new_config.write("%s=%s\n" % (item,new_vars[item]))
for item in append_view:
new_config.write("%s=%s\n" % (item,appendvars[item]))
new_config.flush()
# Make sure the resulting file is readable by others before installing it
os.fchmod(new_config.fileno(), orig_stat.st_mode)
os.fchown(new_config.fileno(), orig_stat.st_uid, orig_stat.st_gid)
# At this point new_config is closed but not removed due to 'delete=False' above
# Now, install the temporary file as configuration and ensure old version is available as .orig
# While .orig file is not used during uninstall, it is left there for administrator.
install_file(temp_filename, filepath)
return old_values
def inifile_replace_variables(filepath, section, replacevars=dict(), appendvars=dict()):
"""
Take a section-structured key=value based configuration file, and write new version
with certain values replaced or appended within the section
All (key,value) pairs from replacevars and appendvars that were not found
in the configuration file, will be added there.
It is responsibility of a caller to ensure that replacevars and
appendvars do not overlap.
It is responsibility of a caller to back up file.
returns dictionary of affected keys and their previous values
One have to run restore_context(filepath) afterwards or
security context of the file will not be correct after modification
"""
pattern = re.compile(r'''
(^
\[
(?P<section> .+) \]
(\s+((\#|;).*)?)?
$)|(^
\s*
(?P<option> [^\#;]+?)
(\s*=\s*)
(?P<value> .+?)?
(\s*((\#|;).*)?)?
$)''', re.VERBOSE)
def add_options(config, replacevars, appendvars, oldvars):
# add all options from replacevars and appendvars that were not found in the file
new_vars = replacevars.copy()
new_vars.update(appendvars)
newvars_view = set(new_vars.keys()) - set(oldvars.keys())
append_view = (set(appendvars.keys()) - newvars_view)
for item in newvars_view:
config.write("%s=%s\n" % (item,new_vars[item]))
for item in append_view:
config.write("%s=%s\n" % (item,appendvars[item]))
orig_stat = os.stat(filepath)
old_values = dict()
temp_filename = None
with tempfile.NamedTemporaryFile(mode='w', delete=False) as new_config:
temp_filename = new_config.name
with open(filepath, 'r') as f:
in_section = False
finished = False
line_idx = 1
for line in f:
line_idx = line_idx + 1
new_line = line
m = pattern.match(line)
if m:
sect, option, value = m.group('section', 'option', 'value')
if in_section and sect is not None:
# End of the searched section, add remaining options
add_options(new_config, replacevars, appendvars, old_values)
finished = True
if sect is not None:
# New section is found, check whether it is the one we are looking for
in_section = (str(sect).lower() == str(section).lower())
if option is not None and in_section:
# Great, this is an option from the section we are loking for
if replacevars and option in replacevars:
# replace value completely
new_line = u"%s=%s\n" % (option, replacevars[option])
old_values[option] = value
if appendvars and option in appendvars:
# append a new value unless it is already existing in the original one
if not value:
new_line = u"%s=%s\n" % (option, appendvars[option])
elif value.find(appendvars[option]) == -1:
new_line = u"%s=%s %s\n" % (option, value, appendvars[option])
old_values[option] = value
new_config.write(new_line)
# We have finished parsing the original file.
# There are two remaining cases:
# 1. Section we were looking for was not found, we need to add it.
if not (in_section or finished):
new_config.write("[%s]\n" % (section))
# 2. The section is the last one but some options were not found, add them.
if in_section or not finished:
add_options(new_config, replacevars, appendvars, old_values)
new_config.flush()
# Make sure the resulting file is readable by others before installing it
os.fchmod(new_config.fileno(), orig_stat.st_mode)
os.fchown(new_config.fileno(), orig_stat.st_uid, orig_stat.st_gid)
# At this point new_config is closed but not removed due to 'delete=False' above
# Now, install the temporary file as configuration and ensure old version is available as .orig
# While .orig file is not used during uninstall, it is left there for administrator.
install_file(temp_filename, filepath)
return old_values
def backup_config_and_replace_variables(
fstore, filepath, replacevars=dict(), appendvars=dict()):
"""
Take a key=value based configuration file, back up it, and
write new version with certain values replaced or appended
All (key,value) pairs from replacevars and appendvars that
were not found in the configuration file, will be added there.
The file must exist before this function is called.
It is responsibility of a caller to ensure that replacevars and
appendvars do not overlap.
returns dictionary of affected keys and their previous values
One have to run restore_context(filepath) afterwards or
security context of the file will not be correct after modification
"""
# Backup original filepath
fstore.backup_file(filepath)
old_values = config_replace_variables(filepath, replacevars, appendvars)
return old_values
def wait_for_open_ports(host, ports, timeout=0):
"""
Wait until the specified port(s) on the remote host are open. Timeout
in seconds may be specified to limit the wait. If the timeout is
exceeded, socket.timeout exception is raised.
"""
timeout = float(timeout)
if not isinstance(ports, (tuple, list)):
ports = [ports]
logger.debug('wait_for_open_ports: %s %s timeout %d', host, ports, timeout)
op_timeout = time.time() + timeout
for port in ports:
logger.debug('waiting for port: %s', port)
log_error = True
while True:
port_open = host_port_open(host, port, log_errors=log_error)
log_error = False # Log only first err so that the log is readable
if port_open:
logger.debug('SUCCESS: port: %s', port)
break
if timeout and time.time() > op_timeout: # timeout exceeded
raise socket.timeout("Timeout exceeded")
time.sleep(1)
def wait_for_open_socket(socket_name, timeout=0):
"""
Wait until the specified socket on the local host is open. Timeout
in seconds may be specified to limit the wait.
"""
timeout = float(timeout)
op_timeout = time.time() + timeout
while True:
try:
s = socket.socket(socket.AF_UNIX)
s.connect(socket_name)
s.close()
break
except socket.error as e:
if e.errno in (2,111): # 111: Connection refused, 2: File not found
if timeout and time.time() > op_timeout: # timeout exceeded
raise e
time.sleep(1)
else:
raise e
def dn_attribute_property(private_name):
'''
Create a property for a dn attribute which assures the attribute
is a DN or None. If the value is not None the setter converts it to
a DN. The getter assures it's either None or a DN instance.
The private_name parameter is the class internal attribute the property
shadows.
For example if a class has an attribute called base_dn, then:
base_dn = dn_attribute_property('_base_dn')
Thus the class with have an attriubte called base_dn which can only
ever be None or a DN instance. The actual value is stored in _base_dn.
'''
def setter(self, value):
if value is not None:
value = DN(value)
setattr(self, private_name, value)
def getter(self):
value = getattr(self, private_name)
if value is not None:
assert isinstance(value, DN)
return value
return property(getter, setter)
def posixify(string):
"""
Convert a string to a more strict alpha-numeric representation.
- Alpha-numeric, underscore, dot and dash characters are accepted
- Space is converted to underscore
- Other characters are omitted
- Leading dash is stripped
Note: This mapping is not one-to-one and may map different input to the
same result. When using posixify, make sure the you do not map two different
entities to one unintentionally.
"""
def valid_char(char):
return char.isalnum() or char in ('_', '.', '-')
# First replace space characters
replaced = string.replace(' ','_')
omitted = ''.join(filter(valid_char, replaced))
# Leading dash is not allowed
return omitted.lstrip('-')
@contextmanager
def private_ccache(path=None):
if path is None:
dir_path = tempfile.mkdtemp(prefix='krbcc')
path = os.path.join(dir_path, 'ccache')
else:
dir_path = None
original_value = os.environ.get('KRB5CCNAME', None)
os.environ['KRB5CCNAME'] = path
try:
yield path
finally:
if original_value is not None:
os.environ['KRB5CCNAME'] = original_value
else:
os.environ.pop('KRB5CCNAME', None)
if os.path.exists(path):
os.remove(path)
if dir_path is not None:
try:
os.rmdir(dir_path)
except OSError:
pass
@contextmanager
def private_krb5_config(realm, server, dir="/run/ipa"):
"""Generate override krb5 config file for a trusted domain DC access
Provide a context where environment variable KRB5_CONFIG is set
with the overlay on top of paths.KRB5_CONF. Overlay's file path
is passed to the context in case it is needed for something else
:param realm: realm of the trusted AD domain
:param server: server to override KDC to
:param dir: path where to create a temporary krb5.conf overlay
"""
cfg = paths.KRB5_CONF
tcfg = None
if server:
content = textwrap.dedent(u"""
[realms]
%s = {
kdc = %s
}
""") % (
realm.upper(),
server,
)
(fd, tcfg) = tempfile.mkstemp(dir=dir, prefix="krb5conf", text=True)
with io.open(fd, mode='w', encoding='utf-8') as o:
o.write(content)
cfg = ":".join([tcfg, cfg])
original_value = os.environ.get('KRB5_CONFIG', None)
os.environ['KRB5_CONFIG'] = cfg
try:
yield tcfg
except GeneratorExit:
pass
finally:
if original_value is not None:
os.environ['KRB5_CONFIG'] = original_value
else:
os.environ.pop('KRB5_CONFIG', None)
if tcfg is not None and os.path.exists(tcfg):
os.remove(tcfg)
if six.PY2:
def fsdecode(value):
"""
Decode argument using the file system encoding, as returned by
`sys.getfilesystemencoding()`.
"""
if isinstance(value, bytes):
return value.decode(sys.getfilesystemencoding())
elif isinstance(value, str):
return value
else:
raise TypeError("expect {0} or {1}, not {2}".format(
bytes.__name__,
str.__name__,
type(value).__name__))
else:
fsdecode = os.fsdecode
def unescape_seq(seq, *args):
"""
unescape (remove '\\') all occurences of sequence in input strings.
:param seq: sequence to unescape
:param args: input string to process
:returns: tuple of strings with unescaped sequences
"""
unescape_re = re.compile(r'\\{}'.format(seq))
return tuple(re.sub(unescape_re, seq, a) for a in args)
def escape_seq(seq, *args):
"""
escape (prepend '\\') all occurences of sequence in input strings
:param seq: sequence to escape
:param args: input string to process
:returns: tuple of strings with escaped sequences
"""
return tuple(a.replace(seq, u'\\{}'.format(seq)) for a in args)
def decode_json(data):
"""Decode JSON bytes to string with proper encoding
Only for supporting Py 3.5
Py 3.6 supports bytes as parameter for json.load, we can drop this when
there is no need for python 3.5 anymore
Code from:
https://bugs.python.org/file43513/json_detect_encoding_3.patch
:param data: JSON bytes
:return: return JSON string
"""
def detect_encoding(b):
bstartswith = b.startswith
if bstartswith((codecs.BOM_UTF32_BE, codecs.BOM_UTF32_LE)):
return 'utf-32'
if bstartswith((codecs.BOM_UTF16_BE, codecs.BOM_UTF16_LE)):
return 'utf-16'
if bstartswith(codecs.BOM_UTF8):
return 'utf-8-sig'
if len(b) >= 4:
if not b[0]:
# 00 00 -- -- - utf-32-be
# 00 XX -- -- - utf-16-be
return 'utf-16-be' if b[1] else 'utf-32-be'
if not b[1]:
# XX 00 00 00 - utf-32-le
# XX 00 XX XX - utf-16-le
return 'utf-16-le' if b[2] or b[3] else 'utf-32-le'
elif len(b) == 2:
if not b[0]:
# 00 XX - utf-16-be
return 'utf-16-be'
if not b[1]:
# XX 00 - utf-16-le
return 'utf-16-le'
# default
return 'utf-8'
if isinstance(data, str):
return data
return data.decode(detect_encoding(data), 'surrogatepass')
class APIVersion(tuple):
"""API version parser and handler
The class is used to parse ipapython.version.API_VERSION and plugin
versions.
"""
__slots__ = ()
def __new__(cls, version):
major, dot, minor = version.partition(u'.')
major = int(major)
minor = int(minor) if dot else 0
return tuple.__new__(cls, (major, minor))
def __str__(self):
return '{}.{}'.format(*self)
def __repr__(self):
return "<APIVersion('{}.{}')>".format(*self)
def __getnewargs__(self):
return (str(self),)
@property
def major(self):
return self[0]
@property
def minor(self):
return self[1]
def remove_keytab(keytab_path):
"""
Remove Kerberos keytab and issue a warning if the procedure fails
:param keytab_path: path to the keytab file
"""
try:
logger.debug("Removing service keytab: %s", keytab_path)
os.remove(keytab_path)
except OSError as e:
if e.errno != errno.ENOENT:
logger.warning("Failed to remove Kerberos keytab '%s': %s",
keytab_path, e)
logger.warning("You may have to remove it manually")
def remove_ccache(ccache_path=None, run_as=None):
"""
remove Kerberos credential cache, essentially a wrapper around kdestroy.
:param ccache_path: path to the ccache file
:param run_as: run kdestroy as this user
"""
logger.debug("Removing service credentials cache")
kdestroy_cmd = [paths.KDESTROY]
if ccache_path is not None:
logger.debug("Ccache path: '%s'", ccache_path)
kdestroy_cmd.extend(['-c', ccache_path])
try:
run(kdestroy_cmd, runas=run_as, env={})
except CalledProcessError as e:
logger.warning(
"Failed to clear Kerberos credentials cache: %s", e)
def remove_file(filename, only_if_empty=False):
"""Remove a file and log any exceptions raised.
:only_if_empty: only remove the file if empty. Default False.
"""
if only_if_empty and os.path.exists(filename):
file_stat = os.stat(filename)
if file_stat.st_size > 0:
logger.debug('%s is not empty.', filename)
return
try:
os.unlink(filename)
except Exception as e:
# ignore missing file
if getattr(e, 'errno', None) != errno.ENOENT:
logger.error('Error removing %s: %s', filename, str(e))
def remove_directory(dir):
"""Remove an empty directory."""
try:
os.rmdir(dir)
except OSError as e:
if e.errno not in {errno.ENOENT, errno.ENOTEMPTY}:
logger.error("Failed to remove directory %s", dir)
def rmtree(path):
"""
Remove a directory structure and log any exceptions raised.
"""
try:
if os.path.exists(path):
shutil.rmtree(path)
except Exception as e:
logger.error('Error removing %s: %s', path, str(e))
def datetime_from_utctimestamp(t, units=1):
"""
Convert a timestamp or a time.struct_time to a datetime.datetime
object down to seconds, with UTC timezone
The conversion is safe for year 2038 problem
:param t: int or float timestamp in (milli)seconds since UNIX epoch
or time.struct_time
:param units: normalizing factor for the timestamp
(1 for seconds, 1000 for milliseconds)
defaults to 1
:return: datetime.datetime object in UTC timezone
"""
if isinstance(t, time.struct_time):
v = int(time.mktime(t))
elif isinstance(t, (float, int)):
v = int(t)
else:
raise TypeError(t)
epoch = datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc)
return epoch + datetime.timedelta(seconds=v // units)
class Sleeper:
"""Helper for time.sleep() loop with timeout
A sleeper object sleeps *sleep* seconds when it is called. Close to its
deadline it sleeps shorter to not oversleep the *timeout* deadline. A
sleeper object is *True* and returns *True* before it reaches *timeout*
deadline. After its deadline a sleeper raises the exception object/class
in *raises*. If *raises* is not given, it returns False instead.
sleep = Sleeper(sleep=1, timeout=60, raises=TimeoutError)
while True:
do_something()
sleep()
sleep = Sleeper(sleep=0.5, timeout=60)
while True:
do_something
if not sleep():
log.info("timeout")
break
longsleep = Sleeper(sleep=1, timeout=sys.maxsize)
"""
multiplier = 2
def __init__(self, *, sleep, timeout, raises=None):
if timeout <= 0:
raise ValueError(f"invalid timeout {timeout}")
if sleep < 0.01:
raise ValueError(f"sleep duration {sleep} is too short.")
self.timeout = timeout
self.sleep = sleep
self.raises = raises
self.deadline = time.monotonic() + self.timeout
def __bool__(self):
return time.monotonic() < self.deadline
def __call__(self):
now = time.monotonic()
if now >= self.deadline:
if self.raises is not None:
raise self.raises
else:
return False
# don't sleep over deadline
dur = min(self.deadline - now, self.sleep)
time.sleep(dur)
return True
def get_config_debug(context):
"""A simplified version of ipalib/config in order to determine if the
API should be bootstrapped in debug mode or not.
A number of daemons setup logging, then bootstrap the API in order
to capture the startup. This presents a chicken-and-egg problem
over debug output.
Debug is rather spammy and probably shouldn't be enabled by default
but if some problem is occuring during startup we want a way to
see that.
So use this limited function to pluck out the debug value out of
the system context config file (in_tree is not supported, another
chicken-and-egg).
This isn't more generalized for two reasons:
1. There is currently only a need for 'debug'
2. Type conversion. They will all be strings.
"""
CONFIG_SECTION = 'global' # duplicated from ipalib.constants
config_file = os.path.join('/', 'etc', 'ipa', '%s.conf' % context)
parser = RawConfigParser()
try:
parser.read(config_file)
except ParsingError:
return False
if not parser.has_section(CONFIG_SECTION):
return False
if not parser.has_option(CONFIG_SECTION, 'debug'):
return False
return parser.get(CONFIG_SECTION, 'debug').lower() == 'true'
| 60,554
|
Python
|
.py
| 1,485
| 31.182492
| 99
| 0.599731
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,624
|
directivesetter.py
|
freeipa_freeipa/ipapython/directivesetter.py
|
#
# Copyright (C) 2018 FreeIPA Contributors see COPYING for license
#
import io
import os
import re
import stat
import tempfile
from ipapython.ipautil import unescape_seq, escape_seq
_SENTINEL = object()
class DirectiveSetter:
"""Safe directive setter
with DirectiveSetter('/path/to/conf') as ds:
ds.set(key, value)
"""
def __init__(self, filename, quotes=True, separator=' ', comment='#'):
self.filename = os.path.abspath(filename)
self.quotes = quotes
self.separator = separator
self.comment = comment
self.lines = None
self.stat = None
def __enter__(self):
with io.open(self.filename) as f:
self.stat = os.fstat(f.fileno())
self.lines = list(f)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is not None:
# something went wrong, reset
self.lines = None
self.stat = None
return
directory, prefix = os.path.split(self.filename)
# use tempfile in same directory to have atomic rename
fd, name = tempfile.mkstemp(prefix=prefix, dir=directory, text=True)
with io.open(fd, mode='w', closefd=True) as f:
for line in self.lines:
if not isinstance(line, str):
line = line.decode('utf-8')
f.write(line)
self.lines = None
os.fchmod(f.fileno(), stat.S_IMODE(self.stat.st_mode))
os.fchown(f.fileno(), self.stat.st_uid, self.stat.st_gid)
self.stat = None
# flush and sync tempfile inode
f.flush()
os.fsync(f.fileno())
# rename file and sync directory inode
os.rename(name, self.filename)
dirfd = os.open(directory, os.O_RDONLY | os.O_DIRECTORY)
try:
os.fsync(dirfd)
finally:
os.close(dirfd)
def set(self, directive, value, quotes=_SENTINEL, separator=_SENTINEL,
comment=_SENTINEL):
"""Set a single directive
"""
if quotes is _SENTINEL:
quotes = self.quotes
if separator is _SENTINEL:
separator = self.separator
if comment is _SENTINEL:
comment = self.comment
# materialize lines
# set_directive_lines() modify item, shrink or enlage line count
self.lines = list(set_directive_lines(
quotes, separator, directive, value, self.lines, comment
))
def setitems(self, items):
"""Set multiple directives from a dict or list with key/value pairs
"""
if isinstance(items, dict):
# dict-like, use sorted for stable order
items = sorted(items.items())
for k, v in items:
self.set(k, v)
def set_directive(filename, directive, value, quotes=True, separator=' ',
comment='#'):
"""Set a name/value pair directive in a configuration file.
A value of None means to drop the directive.
Does not tolerate (or put) spaces around the separator.
:param filename: input filename
:param directive: directive name
:param value: value of the directive
:param quotes: whether to quote `value` in double quotes. If true, then
any existing double quotes are first escaped to avoid
unparseable directives.
:param separator: character serving as separator between directive and
value. Correct value required even when dropping a directive.
:param comment: comment character for the file to keep new values near
their commented-out counterpart
"""
st = os.stat(filename)
with open(filename, 'r') as f:
lines = list(f) # read the whole file
# materialize new list
new_lines = list(set_directive_lines(
quotes, separator, directive, value, lines, comment
))
with open(filename, 'w') as f:
# don't construct the whole string; write line-wise
for line in new_lines:
f.write(line)
os.chown(filename, st.st_uid, st.st_gid) # reset perms
def set_directive_lines(quotes, separator, k, v, lines, comment):
"""Set a name/value pair in a configuration (iterable of lines).
Replaces the value of the key if found, otherwise adds it at
end. If value is ``None``, remove the key if found.
Takes an iterable of lines (with trailing newline).
Yields lines (with trailing newline).
"""
new_line = ""
if v is not None:
v_quoted = quote_directive_value(v, '"') if quotes else v
new_line = ''.join([k, separator, v_quoted, '\n'])
# Special case: consider space as "whitespace" so tabs are allowed
if separator == ' ':
separator = '[ \t]+'
found = False
addnext = False # add on next line, found a comment
matcher = re.compile(r'\s*{}\s*{}'.format(re.escape(k), separator))
cmatcher = re.compile(r'\s*{}\s*{}\s*{}'.format(comment,
re.escape(k), separator))
for line in lines:
if matcher.match(line):
found = True
addnext = False
if v is not None:
yield new_line
elif addnext:
found = True
addnext = False
yield new_line
yield line
elif cmatcher.match(line):
addnext = True
yield line
else:
yield line
if not found and v is not None:
yield new_line
def get_directive(filename, directive, separator=' '):
"""
A rather inefficient way to get a configuration directive.
:param filename: input filename
:param directive: directive name
:param separator: separator between directive and value
:returns: The (unquoted) value if the directive was found, None otherwise
"""
# Special case: consider space as "whitespace" so tabs are allowed
if separator == ' ':
separator = '[ \t]+'
if directive is None:
return None
result = None
with open(filename, "r") as fd:
for line in fd:
if line.lstrip().startswith(directive):
line = line.strip()
match = re.match(
r'{}\s*{}\s*(.*)'.format(directive, separator), line)
if match:
value = match.group(1)
else:
continue
result = unquote_directive_value(value.strip(), '"')
result = result.strip(' ')
break
return result
def quote_directive_value(value, quote_char):
"""Quote a directive value
:param value: string to quote
:param quote_char: character which is used for quoting. All prior
occurences will be escaped before quoting to avoid unparseable value.
:returns: processed value
"""
if value.startswith(quote_char) and value.endswith(quote_char):
return value
return "{quote}{value}{quote}".format(
quote=quote_char,
value="".join(escape_seq(quote_char, value))
)
def unquote_directive_value(value, quote_char):
"""Unquote a directive value
:param value: string to unquote
:param quote_char: character to strip. All escaped occurences of
`quote_char` will be uncescaped during processing
:returns: processed value
"""
unescaped_value = "".join(unescape_seq(quote_char, value))
if (unescaped_value.startswith(quote_char) and
unescaped_value.endswith(quote_char)):
return unescaped_value[1:-1]
return unescaped_value
| 7,673
|
Python
|
.py
| 196
| 30.204082
| 77
| 0.609304
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,625
|
certdb.py
|
freeipa_freeipa/ipapython/certdb.py
|
# Authors: Rob Crittenden <rcritten@redhat.com>
#
# Copyright (C) 2009 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import
import collections
import datetime
import logging
import os
import io
import pwd
import grp
import re
import shutil
import stat
import tempfile
from ctypes.util import find_library
from tempfile import NamedTemporaryFile
import cryptography.x509
from ipaplatform.paths import paths
from ipaplatform.tasks import tasks
from ipapython.dn import DN
from ipapython.kerberos import Principal
from ipapython import ipautil
from ipalib import x509 # pylint: disable=ipa-forbidden-import
logger = logging.getLogger(__name__)
CA_NICKNAME_FMT = "%s IPA CA"
NSS_DBM_FILES = ("cert8.db", "key3.db", "secmod.db")
NSS_SQL_FILES = ("cert9.db", "key4.db", "pkcs11.txt")
NSS_FILES = NSS_DBM_FILES + NSS_SQL_FILES + ("pwdfile.txt",)
TrustFlags = collections.namedtuple('TrustFlags', 'has_key trusted ca usages')
EMPTY_TRUST_FLAGS = TrustFlags(False, None, None, None)
IPA_CA_TRUST_FLAGS = TrustFlags(
False, True, True, frozenset({
x509.EKU_SERVER_AUTH,
x509.EKU_CLIENT_AUTH,
x509.EKU_CODE_SIGNING,
x509.EKU_EMAIL_PROTECTION,
x509.EKU_PKINIT_CLIENT_AUTH,
x509.EKU_PKINIT_KDC,
}),
)
EXTERNAL_CA_TRUST_FLAGS = TrustFlags(
False, True, True, frozenset({x509.EKU_SERVER_AUTH}),
)
TRUSTED_PEER_TRUST_FLAGS = TrustFlags(
False, True, False, frozenset({x509.EKU_SERVER_AUTH}),
)
def nss_supports_dbm():
return bool(find_library("nssdbm3"))
def get_ca_nickname(realm, format=CA_NICKNAME_FMT):
return format % realm
def find_cert_from_txt(cert, start=0):
"""
Given a cert blob (str) which may or may not contian leading and
trailing text, pull out just the certificate part. This will return
the FIRST cert in a stream of data.
:returns: a tuple (IPACertificate, last position in cert)
"""
s = cert.find('-----BEGIN CERTIFICATE-----', start)
e = cert.find('-----END CERTIFICATE-----', s)
if e > 0:
e = e + 25
if s < 0 or e < 0:
raise RuntimeError("Unable to find certificate")
cert = x509.load_pem_x509_certificate(cert[s:e].encode('utf-8'))
return (cert, e)
def parse_trust_flags(trust_flags):
"""
Convert certutil trust flags to TrustFlags object.
"""
has_key = 'u' in trust_flags
if 'p' in trust_flags:
if 'C' in trust_flags or 'P' in trust_flags or 'T' in trust_flags:
raise ValueError("cannot be both trusted and not trusted")
return False, None, None
elif 'C' in trust_flags or 'T' in trust_flags:
if 'P' in trust_flags:
raise ValueError("cannot be both CA and not CA")
ca = True
elif 'P' in trust_flags:
ca = False
else:
return TrustFlags(has_key, None, None, frozenset())
trust_flags = trust_flags.split(',')
ext_key_usage = set()
for i, kp in enumerate((x509.EKU_SERVER_AUTH,
x509.EKU_EMAIL_PROTECTION,
x509.EKU_CODE_SIGNING)):
if 'C' in trust_flags[i] or 'P' in trust_flags[i]:
ext_key_usage.add(kp)
if 'T' in trust_flags[0]:
ext_key_usage.add(x509.EKU_CLIENT_AUTH)
return TrustFlags(has_key, True, ca, frozenset(ext_key_usage))
def unparse_trust_flags(trust_flags):
"""
Convert TrustFlags object to certutil trust flags.
"""
has_key, trusted, ca, ext_key_usage = trust_flags
if trusted is False:
if has_key:
return 'pu,pu,pu'
else:
return 'p,p,p'
elif trusted is None or ca is None:
if has_key:
return 'u,u,u'
else:
return ',,'
elif ext_key_usage is None:
if ca:
if has_key:
return 'CTu,Cu,Cu'
else:
return 'CT,C,C'
else:
if has_key:
return 'Pu,Pu,Pu'
else:
return 'P,P,P'
trust_flags = ['', '', '']
for i, kp in enumerate((x509.EKU_SERVER_AUTH,
x509.EKU_EMAIL_PROTECTION,
x509.EKU_CODE_SIGNING)):
if kp in ext_key_usage:
trust_flags[i] += ('C' if ca else 'P')
if ca and x509.EKU_CLIENT_AUTH in ext_key_usage:
trust_flags[0] += 'T'
if has_key:
for i in range(3):
trust_flags[i] += 'u'
trust_flags = ','.join(trust_flags)
return trust_flags
def verify_kdc_cert_validity(kdc_cert, ca_certs, realm):
"""
Verifies the validity of a kdc_cert, ensuring it is trusted by
the ca_certs chain, has a PKINIT_KDC extended key usage support,
and verify it applies to the given realm.
"""
with NamedTemporaryFile() as kdc_file, NamedTemporaryFile() as ca_file:
kdc_file.write(kdc_cert.public_bytes(x509.Encoding.PEM))
kdc_file.flush()
x509.write_certificate_list(ca_certs, ca_file.name)
ca_file.flush()
try:
ipautil.run(
[paths.OPENSSL, 'verify', '-CAfile', ca_file.name,
kdc_file.name],
capture_output=True)
except ipautil.CalledProcessError as e:
raise ValueError(e.output)
try:
eku = kdc_cert.extensions.get_extension_for_class(
cryptography.x509.ExtendedKeyUsage)
list(eku.value).index(
cryptography.x509.ObjectIdentifier(x509.EKU_PKINIT_KDC))
except (cryptography.x509.ExtensionNotFound,
ValueError):
raise ValueError("invalid for a KDC")
principal = str(Principal(['krbtgt', realm], realm))
gns = x509.process_othernames(kdc_cert.san_general_names)
for gn in gns:
if isinstance(gn, x509.KRB5PrincipalName) and gn.name == principal:
break
else:
raise ValueError("invalid for realm %s" % realm)
CERT_RE = re.compile(
r'^(?P<nick>.+?)\s+(?P<flags>\w*,\w*,\w*)\s*$'
)
KEY_RE = re.compile(
r'^<\s*(?P<slot>\d+)>'
r'\s+(?P<algo>\w+)'
r'\s+(?P<keyid>[0-9a-z]+)'
r'\s+(?P<nick>.*?)\s*$'
)
class Pkcs12ImportIncorrectPasswordError(RuntimeError):
""" Raised when import_pkcs12 fails because of a wrong password.
"""
class Pkcs12ImportOpenError(RuntimeError):
""" Raised when import_pkcs12 fails trying to open the file.
"""
class Pkcs12ImportUnknownError(RuntimeError):
""" Raised when import_pkcs12 fails because of an unknown error.
"""
class NSSDatabase:
"""A general-purpose wrapper around a NSS cert database
For permanent NSS databases, pass the cert DB directory to __init__
For temporary databases, do not pass nssdir, and call close() when done
to remove the DB. Alternatively, a NSSDatabase can be used as a
context manager that calls close() automatically.
"""
# Traditionally, we used CertDB for our NSS DB operations, but that class
# got too tied to IPA server details, killing reusability.
# BaseCertDB is a class that knows nothing about IPA.
# Generic NSS DB code should be moved here.
def __init__(self, nssdir=None, dbtype='auto', token=None, pwd_file=None):
if nssdir is not None:
self.secdir = nssdir
self._is_temporary = False
if dbtype == "auto":
dbtype = self._detect_dbtype()
if dbtype == "dbm" and not nss_supports_dbm():
raise ValueError(
f"NSS is built without support of the legacy database(DBM) "
f"directory '{nssdir}'",
)
if nssdir is None:
self.secdir = tempfile.mkdtemp()
self._is_temporary = True
if pwd_file is None:
self.pwd_file = os.path.join(self.secdir, 'pwdfile.txt')
else:
self.pwd_file = pwd_file
self.dbtype = None
self.certdb = self.keydb = self.secmod = None
self.token = token
# files in actual db
self.filenames = ()
# all files that are handled by create_db(backup=True)
self.backup_filenames = ()
self._set_filenames(dbtype)
def _detect_dbtype(self):
if os.path.isfile(os.path.join(self.secdir, "cert9.db")):
return 'sql'
elif os.path.isfile(os.path.join(self.secdir, "cert8.db")):
return 'dbm'
else:
return 'auto'
def _set_filenames(self, dbtype):
self.dbtype = dbtype
dbmfiles = (
os.path.join(self.secdir, "cert8.db"),
os.path.join(self.secdir, "key3.db"),
os.path.join(self.secdir, "secmod.db")
)
sqlfiles = (
os.path.join(self.secdir, "cert9.db"),
os.path.join(self.secdir, "key4.db"),
os.path.join(self.secdir, "pkcs11.txt")
)
if dbtype == 'dbm':
self.certdb, self.keydb, self.secmod = dbmfiles
self.filenames = dbmfiles + (self.pwd_file,)
elif dbtype == 'sql':
self.certdb, self.keydb, self.secmod = sqlfiles
self.filenames = sqlfiles + (self.pwd_file,)
elif dbtype == 'auto':
self.certdb = self.keydb = self.secmod = None
self.filenames = None
else:
raise ValueError(dbtype)
self.backup_filenames = (
self.pwd_file,
) + sqlfiles + dbmfiles
def close(self):
if self._is_temporary:
shutil.rmtree(self.secdir)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.close()
def _check_db(self):
if self.filenames is None:
raise RuntimeError(
"NSSDB '{}' not initialized.".format(self.secdir)
)
def run_certutil(self, args, stdin=None, **kwargs):
self._check_db()
new_args = [
paths.CERTUTIL,
"-d", '{}:{}'.format(self.dbtype, self.secdir)
]
new_args.extend(args)
if self.token:
new_args.extend(["-h", self.token])
new_args.extend(['-f', self.pwd_file])
# When certutil makes a request it creates a file in the cwd, make
# sure we are in a unique place when this happens.
return ipautil.run(new_args, stdin, cwd=self.secdir, **kwargs)
def run_pk12util(self, args, stdin=None, **kwargs):
self._check_db()
new_args = [
paths.PK12UTIL,
"-d", '{}:{}'.format(self.dbtype, self.secdir)
]
new_args.extend(args)
return ipautil.run(new_args, stdin, **kwargs)
def exists(self):
"""Check DB exists (all files are present)
"""
if self.filenames is None:
return False
return all(os.path.isfile(filename) for filename in self.filenames)
def create_db(self, user=None, group=None, mode=None, backup=False):
"""Create cert DB
:param user: User owner the secdir
:param group: Group owner of the secdir
:param mode: Mode of the secdir
:param backup: Backup the sedir files
"""
if mode is not None:
dirmode = mode
filemode = mode & 0o666
pwdfilemode = mode & 0o660
else:
dirmode = 0o750
filemode = 0o640
pwdfilemode = 0o640
uid = -1
gid = -1
if user is not None:
uid = pwd.getpwnam(user).pw_uid
if group is not None:
gid = grp.getgrnam(group).gr_gid
if backup:
for filename in self.backup_filenames:
ipautil.backup_file(filename)
if not os.path.exists(self.secdir):
os.makedirs(self.secdir, dirmode)
if not os.path.exists(self.pwd_file):
# Create the password file for this db
with io.open(os.open(self.pwd_file,
os.O_CREAT | os.O_WRONLY,
pwdfilemode), 'w', closefd=True) as f:
f.write(ipautil.ipa_generate_password())
# flush and sync tempfile inode
f.flush()
os.fsync(f.fileno())
# In case dbtype is auto, let certutil decide which type of DB
# to create.
if self.dbtype == 'auto':
dbdir = self.secdir
else:
dbdir = '{}:{}'.format(self.dbtype, self.secdir)
args = [
paths.CERTUTIL,
'-d', dbdir,
'-N',
'-f', self.pwd_file,
# -@ in case it's an old db and it must be migrated
'-@', self.pwd_file,
]
ipautil.run(args, stdin=None, cwd=self.secdir)
self._set_filenames(self._detect_dbtype())
if self.filenames is None:
# something went wrong...
raise ValueError(
"Failed to create NSSDB at '{}'".format(self.secdir)
)
# Finally fix up perms
os.chown(self.secdir, uid, gid)
os.chmod(self.secdir, dirmode)
tasks.restore_context(self.secdir, force=True)
for filename in self.filenames:
if os.path.exists(filename):
os.chown(filename, uid, gid)
if filename == self.pwd_file:
new_mode = pwdfilemode
else:
new_mode = filemode
os.chmod(filename, new_mode)
tasks.restore_context(filename, force=True)
def convert_db(self, rename_old=True):
"""Convert DBM database format to SQL database format
**WARNING** **WARNING** **WARNING** **WARNING** **WARNING**
The caller must ensure that no other process or service is
accessing the NSSDB during migration. The DBM format does not support
multiple processes. If more than one process opens a DBM NSSDB for
writing, the database will become **irreparably corrupted**.
**WARNING** **WARNING** **WARNING** **WARNING** **WARNING**
"""
if (self.dbtype == 'sql' or
os.path.isfile(os.path.join(self.secdir, "cert9.db"))):
raise ValueError(
'NSS DB {} has been migrated already.'.format(self.secdir)
)
# use certutil to migrate db to new format
# see https://bugzilla.mozilla.org/show_bug.cgi?id=1415912
# https://fedoraproject.org/wiki/Changes/NSSDefaultFileFormatSql
args = [
paths.CERTUTIL,
'-d', 'sql:{}'.format(self.secdir), '-N',
'-f', self.pwd_file, '-@', self.pwd_file
]
ipautil.run(args, stdin=None, cwd=self.secdir)
# retain file ownership and permission, backup old files
migration = (
('cert8.db', 'cert9.db'),
('key3.db', 'key4.db'),
('secmod.db', 'pkcs11.txt'),
)
for oldname, newname in migration:
oldname = os.path.join(self.secdir, oldname)
newname = os.path.join(self.secdir, newname)
oldstat = os.stat(oldname)
os.chmod(newname, stat.S_IMODE(oldstat.st_mode))
os.chown(newname, oldstat.st_uid, oldstat.st_gid)
tasks.restore_context(newname, force=True)
self._set_filenames('sql')
self.list_certs() # self-test
if rename_old:
for oldname, _ in migration: # pylint: disable=unused-variable
oldname = os.path.join(self.secdir, oldname)
os.rename(oldname, oldname + '.migrated')
def restore(self):
for filename in self.backup_filenames:
backup_path = filename + '.orig'
save_path = filename + '.ipasave'
try:
if os.path.exists(filename):
os.rename(filename, save_path)
if os.path.exists(backup_path):
os.rename(backup_path, filename)
except OSError as e:
logger.debug('%s', e)
def list_certs(self):
"""Return nicknames and cert flags for all certs in the database
:return: List of (name, trust_flags) tuples
"""
args = ["-L"]
result = self.run_certutil(args, capture_output=True)
certs = result.output.splitlines()
# FIXME, this relies on NSS never changing the formatting of certutil
certlist = []
for cert in certs:
match = CERT_RE.match(cert)
if match:
nickname = match.group('nick')
trust_flags = parse_trust_flags(match.group('flags'))
certlist.append((nickname, trust_flags))
return tuple(certlist)
def list_keys(self):
result = self.run_certutil(
["-K"], raiseonerr=False, capture_output=True
)
if result.returncode == 255:
return ()
keylist = []
for line in result.output.splitlines():
mo = KEY_RE.match(line)
if mo is not None:
keylist.append((
int(mo.group('slot')),
mo.group('algo'),
mo.group('keyid'),
mo.group('nick'),
))
return tuple(keylist)
def find_server_certs(self):
"""Return nicknames and cert flags for server certs in the database
Server certs have an "u" character in the trust flags.
:return: List of (name, trust_flags) tuples
"""
server_certs = []
for name, flags in self.list_certs():
if flags.has_key:
server_certs.append((name, flags))
return server_certs
def get_trust_chain(self, nickname):
"""Return names of certs in a given cert's trust chain
The list starts with root ca, then first intermediate CA, second
intermediate, and so on.
:param nickname: Name of the cert
:return: List of certificate names
"""
root_nicknames = []
result = self.run_certutil(
["-O", "--simple-self-signed", "-n", nickname],
capture_output=True)
chain = result.output.splitlines()
for c in chain:
m = re.match(r'\s*"(.*)" \[.*', c)
if m:
root_nicknames.append(m.groups()[0])
return root_nicknames
def export_pkcs12(self, nickname, pkcs12_filename, pkcs12_passwd=None):
args = [
"-o", pkcs12_filename,
"-n", nickname,
"-k", self.pwd_file
]
pkcs12_password_file = None
if pkcs12_passwd is not None:
pkcs12_password_file = ipautil.write_tmp_file(pkcs12_passwd + '\n')
args.extend(["-w", pkcs12_password_file.name])
try:
self.run_pk12util(args)
except ipautil.CalledProcessError as e:
if e.returncode == 17:
raise RuntimeError("incorrect password for pkcs#12 file %s" %
pkcs12_filename)
elif e.returncode == 10:
raise RuntimeError("Failed to open %s" % pkcs12_filename)
else:
raise RuntimeError("unknown error exporting pkcs#12 file %s" %
pkcs12_filename)
finally:
if pkcs12_password_file is not None:
pkcs12_password_file.close()
def import_pkcs12(self, pkcs12_filename, pkcs12_passwd=None):
args = [
"-i", pkcs12_filename,
"-k", self.pwd_file,
"-v"
]
pkcs12_password_file = None
if pkcs12_passwd is not None:
pkcs12_password_file = ipautil.write_tmp_file(pkcs12_passwd + '\n')
args.extend(["-w", pkcs12_password_file.name])
try:
self.run_pk12util(args)
except ipautil.CalledProcessError as e:
if e.returncode in (17, 18):
raise Pkcs12ImportIncorrectPasswordError(
"incorrect password for pkcs#12 file %s" % pkcs12_filename)
elif e.returncode == 10:
raise Pkcs12ImportOpenError(
"Failed to open %s" % pkcs12_filename)
else:
raise Pkcs12ImportUnknownError(
"unknown error import pkcs#12 file %s" %
pkcs12_filename)
finally:
if pkcs12_password_file is not None:
pkcs12_password_file.close()
def import_files(self, files, import_keys=False, key_password=None,
key_nickname=None):
"""
Import certificates and a single private key from multiple files
The files may be in PEM and DER certificate, PKCS#7 certificate chain,
PKCS#8 and raw private key and PKCS#12 formats.
:param files: Names of files to import
:param import_keys: Whether to import private keys
:param key_password: Password to decrypt private keys
:param key_nickname: Nickname of the private key to import from PKCS#12
files
"""
key_file = None
extracted_key = None
extracted_certs = []
for filename in files:
try:
with open(filename, 'rb') as f:
data = f.read()
except IOError as e:
raise RuntimeError(
"Failed to open %s: %s" % (filename, e.strerror))
# Try to parse the file as PEM file
matches = list(
re.finditer(
br'-----BEGIN (.+?)-----(.*?)-----END \1-----',
data, re.DOTALL
)
)
if matches:
loaded = False
for match in matches:
body = match.group()
label = match.group(1)
line = len(data[:match.start() + 1].splitlines())
if label in (b'CERTIFICATE', b'X509 CERTIFICATE',
b'X.509 CERTIFICATE'):
try:
cert = x509.load_pem_x509_certificate(body)
except ValueError as e:
if label != b'CERTIFICATE':
logger.warning(
"Skipping certificate in %s at line %s: "
"%s",
filename, line, e)
continue
logger.error('Failed to load certificate in %s '
'at line %s: %s',
filename, line, e)
else:
extracted_certs.append(cert)
loaded = True
continue
if label in (b'PKCS7', b'PKCS #7 SIGNED DATA',
b'CERTIFICATE'):
try:
certs = x509.pkcs7_to_certs(body)
except ipautil.CalledProcessError as e:
if label == b'CERTIFICATE':
logger.warning(
"Skipping certificate in %s at line %s: "
"%s",
filename, line, e)
else:
logger.warning(
"Skipping PKCS#7 in %s at line %s: %s",
filename, line, e)
continue
else:
extracted_certs.extend(certs)
loaded = True
continue
if label in (b'PRIVATE KEY', b'ENCRYPTED PRIVATE KEY',
b'RSA PRIVATE KEY', b'DSA PRIVATE KEY',
b'EC PRIVATE KEY'):
if not import_keys:
continue
if key_file:
raise RuntimeError(
"Can't load private key from both %s and %s" %
(key_file, filename))
# the args -v2 aes256 -v2prf hmacWithSHA256 are needed
# on OpenSSL 1.0.2 (fips mode). As soon as FreeIPA
# requires OpenSSL 1.1.0 we'll be able to drop them
args = [
paths.OPENSSL, 'pkcs8',
'-topk8',
'-v2', 'aes256', '-v2prf', 'hmacWithSHA256',
'-passout', 'file:' + self.pwd_file,
]
if ((label != b'PRIVATE KEY' and key_password) or
label == b'ENCRYPTED PRIVATE KEY'):
key_pwdfile = ipautil.write_tmp_file(key_password)
args += [
'-passin', 'file:' + key_pwdfile.name,
]
try:
result = ipautil.run(
args, stdin=body, capture_output=True)
except ipautil.CalledProcessError as e:
logger.warning(
"Skipping private key in %s at line %s: %s",
filename, line, e)
continue
else:
extracted_key = result.raw_output
key_file = filename
loaded = True
continue
if loaded:
continue
raise RuntimeError("Failed to load %s" % filename)
# Try to load the file as DER certificate
try:
cert = x509.load_der_x509_certificate(data)
except ValueError:
pass
else:
extracted_certs.append(cert)
continue
# Try to import the file as PKCS#12 file
if import_keys:
try:
self.import_pkcs12(filename, key_password)
except Pkcs12ImportUnknownError:
# the file may not be a PKCS#12 file,
# go to the generic error about unrecognized format
pass
except RuntimeError as e:
raise RuntimeError("Failed to load %s: %s" %
(filename, str(e)))
else:
if key_file:
raise RuntimeError(
"Can't load private key from both %s and %s" %
(key_file, filename))
key_file = filename
server_certs = self.find_server_certs()
if key_nickname:
for nickname, _trust_flags in server_certs:
if nickname == key_nickname:
break
else:
raise RuntimeError(
"Server certificate \"%s\" not found in %s" %
(key_nickname, filename))
else:
if len(server_certs) > 1:
raise RuntimeError(
"%s server certificates found in %s, "
"expecting only one" %
(len(server_certs), filename))
continue
# Supported formats were tried but none succeeded
raise RuntimeError("Failed to load %s: unrecognized format" %
filename)
if import_keys and not key_file:
raise RuntimeError(
"No server certificates found in %s" % (', '.join(files)))
for cert in extracted_certs:
nickname = str(DN(cert.subject))
self.add_cert(cert, nickname, EMPTY_TRUST_FLAGS)
if extracted_key:
with tempfile.NamedTemporaryFile() as in_file, \
tempfile.NamedTemporaryFile() as out_file:
for cert in extracted_certs:
in_file.write(cert.public_bytes(x509.Encoding.PEM))
in_file.write(extracted_key)
in_file.flush()
out_password = ipautil.ipa_generate_password()
out_pwdfile = ipautil.write_tmp_file(out_password)
args = [
paths.OPENSSL, 'pkcs12',
'-export',
'-in', in_file.name,
'-out', out_file.name,
'-passin', 'file:' + self.pwd_file,
'-passout', 'file:' + out_pwdfile.name,
'-certpbe', 'aes-128-cbc',
'-keypbe', 'aes-128-cbc',
]
try:
ipautil.run(args)
except ipautil.CalledProcessError as e:
raise RuntimeError(
"No matching certificate found for private key from "
"%s" % key_file)
self.import_pkcs12(out_file.name, out_password)
def trust_root_cert(self, root_nickname, trust_flags):
if root_nickname[:7] == "Builtin":
logger.debug(
"No need to add trust for built-in root CAs, skipping %s",
root_nickname)
else:
trust_flags = unparse_trust_flags(trust_flags)
try:
self.run_certutil(["-M", "-n", root_nickname,
"-t", trust_flags])
except ipautil.CalledProcessError:
raise RuntimeError(
"Setting trust on %s failed" % root_nickname)
def get_cert(self, nickname):
"""
:param nickname: nickname of the certificate in the NSS database
:returns: string in Python2
bytes in Python3
"""
args = ['-L', '-n', nickname, '-a']
try:
result = self.run_certutil(args, capture_output=True)
except ipautil.CalledProcessError:
raise RuntimeError("Failed to get %s" % nickname)
cert, _start = find_cert_from_txt(result.output, start=0)
return cert
def has_nickname(self, nickname):
try:
self.get_cert(nickname)
except RuntimeError:
# This might be error other than "nickname not found". Beware.
return False
else:
return True
def export_pem_cert(self, nickname, location):
"""Export the given cert to PEM file in the given location"""
cert = self.get_cert(nickname)
with open(location, "wb") as fd:
fd.write(cert.public_bytes(x509.Encoding.PEM))
os.chmod(location, 0o444)
def import_pem_cert(self, nickname, flags, location):
"""Import a cert form the given PEM file.
The file must contain exactly one certificate.
"""
try:
with open(location) as fd:
certs = fd.read()
except IOError as e:
raise RuntimeError(
"Failed to open %s: %s" % (location, e.strerror)
)
cert, st = find_cert_from_txt(certs)
self.add_cert(cert, nickname, flags)
try:
find_cert_from_txt(certs, st)
except RuntimeError:
pass
else:
raise ValueError('%s contains more than one certificate' %
location)
def add_cert(self, cert, nick, flags):
flags = unparse_trust_flags(flags)
args = ["-A", "-n", nick, "-t", flags, '-a']
self.run_certutil(args, stdin=cert.public_bytes(x509.Encoding.PEM))
def delete_cert(self, nick):
self.run_certutil(["-D", "-n", nick])
def delete_key_only(self, nick):
"""Delete the key with provided nick
This commands removes the key but leaves the cert in the DB.
"""
keys = self.list_keys()
# keys is a list of tuple(slot, algo, keyid, nickname)
for (_slot, _algo, keyid, nickname) in keys:
if nickname == nick:
# Key is present in the DB, delete the key
self.run_certutil(["-F", "-k", keyid])
break
def delete_key_and_cert(self, nick):
"""Delete a cert and its key from the DB"""
try:
self.run_certutil(["-F", "-n", nick])
except ipautil.CalledProcessError:
# Using -F -k instead of -F -n because the latter fails if
# the DB contains only the key
self.delete_key_only(nick)
# Check that cert was deleted
for (certname, _flags) in self.list_certs():
if certname == nick:
self.delete_cert(nick)
def _verify_cert_validity(self, cert):
"""Common checks for cert validity
"""
utcnow = datetime.datetime.now(tz=datetime.timezone.utc)
if cert.not_valid_before_utc > utcnow:
raise ValueError(
f"not valid before {cert.not_valid_before_utc} UTC is in "
"the future."
)
if cert.not_valid_after_utc < utcnow:
raise ValueError(
f"has expired {cert.not_valid_after_utc} UTC"
)
# make sure the cert does not expire during installation
if cert.not_valid_after_utc + datetime.timedelta(hours=1) < utcnow:
raise ValueError(
f"expires in less than one hour ({cert.not_valid_after_utc} "
"UTC)"
)
def verify_server_cert_validity(self, nickname, hostname):
"""Verify a certificate is valid for a SSL server with given hostname
Raises a ValueError if the certificate is invalid.
"""
cert = self.get_cert(nickname)
self._verify_cert_validity(cert)
try:
self.run_certutil(
[
'-V', # check validity of cert and attrs
'-n', nickname,
'-u', 'V', # usage; 'V' means "SSL server"
'-e', # check signature(s); this checks
# key sizes, sig algorithm, etc.
],
capture_output=True)
except ipautil.CalledProcessError as e:
# certutil output in case of error is
# 'certutil: certificate is invalid: <ERROR_STRING>\n'
raise ValueError(e.output)
try:
cert.match_hostname(hostname)
except ValueError:
raise ValueError('invalid for server %s' % hostname)
def verify_ca_cert_validity(self, nickname, minpathlen=None):
cert = self.get_cert(nickname)
self._verify_cert_validity(cert)
if not cert.subject:
raise ValueError("has empty subject")
try:
bc = cert.extensions.get_extension_for_class(
cryptography.x509.BasicConstraints)
except cryptography.x509.ExtensionNotFound:
raise ValueError("missing basic constraints")
if not bc.value.ca:
raise ValueError("not a CA certificate")
if minpathlen is not None:
# path_length is None means no limitation
pl = bc.value.path_length
if pl is not None and pl < minpathlen:
raise ValueError(
"basic contraint pathlen {}, must be at least {}".format(
pl, minpathlen
)
)
try:
ski = cert.extensions.get_extension_for_class(
cryptography.x509.SubjectKeyIdentifier)
except cryptography.x509.ExtensionNotFound:
raise ValueError("missing subject key identifier extension")
else:
if len(ski.value.digest) == 0:
raise ValueError("subject key identifier must not be empty")
try:
self.run_certutil(
[
'-V', # check validity of cert and attrs
'-n', nickname,
'-u', 'L', # usage; 'L' means "SSL CA"
'-e', # check signature(s); this checks
# key sizes, sig algorithm, etc.
],
capture_output=True)
except ipautil.CalledProcessError as e:
# certutil output in case of error is
# 'certutil: certificate is invalid: <ERROR_STRING>\n'
raise ValueError(e.output)
def verify_kdc_cert_validity(self, nickname, realm):
nicknames = self.get_trust_chain(nickname)
certs = [self.get_cert(nickname) for nickname in nicknames]
verify_kdc_cert_validity(certs[-1], certs[:-1], realm)
| 38,021
|
Python
|
.py
| 906
| 28.891832
| 79
| 0.533995
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,626
|
graph.py
|
freeipa_freeipa/ipapython/graph.py
|
#
# Copyright (C) 2015-2017 FreeIPA Contributors see COPYING for license
#
from collections import deque
class Graph:
"""
Simple oriented graph structure
G = (V, E) where G is graph, V set of vertices and E list of edges.
E = (tail, head) where tail and head are vertices
"""
def __init__(self):
self.vertices = set()
self.edges = []
self._adj = dict()
def add_vertex(self, vertex):
self.vertices.add(vertex)
self._adj[vertex] = []
def add_edge(self, tail, head):
if tail not in self.vertices:
raise ValueError("tail is not a vertex")
if head not in self.vertices:
raise ValueError("head is not a vertex")
self.edges.append((tail, head))
self._adj[tail].append(head)
def remove_edge(self, tail, head):
try:
self.edges.remove((tail, head))
except KeyError:
raise ValueError(
"graph does not contain edge: ({0}, {1})".format(tail, head)
)
self._adj[tail].remove(head)
def remove_vertex(self, vertex):
try:
self.vertices.remove(vertex)
except KeyError:
raise ValueError(
"graph does not contain vertex: {0}".format(vertex)
)
# delete _adjacencies
del self._adj[vertex]
for adj in self._adj.values():
adj[:] = [v for v in adj if v != vertex]
# delete edges
self.edges = [
e for e in self.edges if vertex not in (e[0], e[1])
]
def get_tails(self, head):
"""
Get list of vertices where a vertex is on the right side of an edge
"""
return [e[0] for e in self.edges if e[1] == head]
def get_heads(self, tail):
"""
Get list of vertices where a vertex is on the left side of an edge
"""
return [e[1] for e in self.edges if e[0] == tail]
def bfs(self, start=None):
"""
Breadth-first search traversal of the graph from `start` vertex.
Return a set of all visited vertices
"""
if not start:
start = next(iter(self.vertices))
visited = set()
queue = deque([start])
while queue:
vertex = queue.popleft()
if vertex not in visited:
visited.add(vertex)
queue.extend(set(self._adj.get(vertex, [])) - visited)
return visited
| 2,490
|
Python
|
.py
| 72
| 25.472222
| 76
| 0.560366
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,627
|
admintool.py
|
freeipa_freeipa/ipapython/admintool.py
|
# Authors:
# Petr Viktorin <pviktori@redhat.com>
#
# Copyright (C) 2012 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""A common framework for command-line admin tools, e.g. install scripts
Handles common operations like option parsing and logging
"""
import logging
import sys
import os
import traceback
from optparse import OptionGroup # pylint: disable=deprecated-module
from ipaplatform.osinfo import osinfo
from ipapython import version
from ipapython import config
from ipapython.ipa_log_manager import standard_logging_setup
SUCCESS = 0
SERVER_INSTALL_ERROR = 1
SERVER_NOT_CONFIGURED = 2
logger = logging.getLogger(__name__)
class ScriptError(Exception):
"""An exception that records an error message and a return value
"""
def __init__(self, msg='', rval=1):
if msg is None:
msg = ''
super(ScriptError, self).__init__(msg)
self.rval = rval
@property
def msg(self):
return str(self)
class AdminTool:
"""Base class for command-line admin tools
To run the tool, call the main() classmethod with a list of command-line
arguments.
Alternatively, call run_cli() to run with command-line arguments in
sys.argv, and call sys.exit() with the return value.
Some commands actually represent multiple related tools, e.g.
``ipa-server-install`` and ``ipa-server-install --uninstall`` would be
represented by separate classes. Only their options are the same.
To handle this, AdminTool provides classmethods for option parsing
and selecting the appropriate command class.
A class-wide option parser is made by calling add_options.
The options are then parsed into options and arguments, and
get_command_class is called with those to retrieve the class.
That class is then instantiated and run.
Running consists of a few steps:
- validating options or the environment (validate_options)
- setting up logging (setup_logging)
- running the actual command (run)
Any unhandled exceptions are handled in handle_error.
And at the end, either log_success or log_failure is called.
Class attributes to define in subclasses:
command_name - shown in logs
log_file_name - if None, logging is to stderr only
usage - text shown in help
description - text shown in help
See the setup_logging method for more info on logging.
"""
command_name = None
log_file_name = None
usage = None
description = None
_option_parsers = dict()
@classmethod
def make_parser(cls):
"""Create an option parser shared across all instances of this class"""
parser = config.IPAOptionParser(version=version.VERSION,
usage=cls.usage, formatter=config.IPAFormatter(),
description=cls.description)
cls.option_parser = parser
cls.add_options(parser)
@classmethod
def add_options(cls, parser, debug_option=False):
"""Add command-specific options to the option parser
:param parser: The parser to add options to
:param debug_option: Add a --debug option as an alias to --verbose
"""
group = OptionGroup(parser, "Logging and output options")
group.add_option("-v", "--verbose", dest="verbose", default=False,
action="store_true", help="print debugging information")
if debug_option:
group.add_option("-d", "--debug", dest="verbose", default=False,
action="store_true", help="alias for --verbose (deprecated)")
group.add_option("-q", "--quiet", dest="quiet", default=False,
action="store_true", help="output only errors")
group.add_option("--log-file", dest="log_file", default=None,
metavar="FILE", help="log to the given file")
parser.add_option_group(group)
@classmethod
def run_cli(cls):
"""Run this command with sys.argv, exit process with the return value
"""
sys.exit(cls.main(sys.argv))
@classmethod
def main(cls, argv):
"""The main entry point
Parses command-line arguments, selects the actual command class to use
based on them, and runs that command.
:param argv: Command-line arguments.
:return: Command exit code
"""
if cls not in cls._option_parsers:
# We use cls._option_parsers, a dictionary keyed on class, to check
# if we need to create a parser. This is because cls.option_parser
# can refer to the parser of a superclass.
cls.make_parser()
cls._option_parsers[cls] = cls.option_parser
options, args = cls.option_parser.parse_args(argv[1:])
command_class = cls.get_command_class(options, args)
command = command_class(options, args)
return command.execute()
@classmethod
def get_command_class(cls, options, args):
return cls
def __init__(self, options, args):
self.options = options
self.args = args
self.log_file_initialized = False
self.safe_options = self.option_parser.get_safe_opts(options)
def execute(self):
"""Do everything needed after options are parsed
This includes validating options, setting up logging, doing the
actual work, and handling the result.
"""
self._setup_logging(no_file=True)
return_value = 1
try:
self.validate_options()
self.ask_for_options()
self.setup_logging()
return_value = self.run()
except BaseException as exception:
if isinstance(exception, ScriptError):
if exception.rval and exception.rval > return_value:
return_value = exception.rval
traceback = sys.exc_info()[2]
error_message, return_value = self.handle_error(exception)
if return_value:
self.log_failure(error_message, return_value, exception,
traceback)
return return_value
self.log_success()
return return_value
def validate_options(self, needs_root=False):
"""Validate self.options
It's also possible to compute and store information that will be
useful later, but no changes to the system should be made here.
"""
if needs_root and os.getegid() != 0:
raise ScriptError('Must be root to run %s' % self.command_name, 1)
if self.options.verbose and self.options.quiet:
raise ScriptError(
'The --quiet and --verbose options are mutually exclusive')
def ask_for_options(self):
"""Ask for missing options interactively
Similar to validate_options. This is separate method because we want
any validation errors to abort the script before bothering the user
with prompts.
Any options that might be asked for should also be validated here.
"""
def setup_logging(self, log_file_mode='w'):
"""Set up logging
:param _to_file: Setting this to false will disable logging to file.
For internal use.
If the --log-file option was given or if a filename is in
self.log_file_name, the tool will log to that file. In this case,
all messages are logged.
What is logged to the console depends on command-line options:
the default is INFO; --quiet sets ERROR; --verbose sets DEBUG.
Rules of thumb for logging levels:
- CRITICAL for fatal errors
- ERROR for critical things that the admin must see, even with --quiet
- WARNING for things that need to stand out in the log
- INFO to display normal messages
- DEBUG to spam about everything the program does
- a plain print for things that should not be log (for example,
interactive prompting)
To log, use a module-level logger.
Logging to file is only set up after option validation and prompting;
before that, all output will go to the console only.
"""
root_logger = logging.getLogger()
for handler in root_logger.handlers:
if (isinstance(handler, logging.StreamHandler) and
handler.stream is sys.stderr):
root_logger.removeHandler(handler)
break
self._setup_logging(log_file_mode=log_file_mode)
if self.log_file_name:
self.log_file_initialized = True
def _setup_logging(self, log_file_mode='w', no_file=False):
if no_file:
log_file_name = None
elif self.options.log_file:
log_file_name = self.options.log_file
self.log_file_name = log_file_name
else:
log_file_name = self.log_file_name
if self.options.verbose:
console_format = '%(name)s: %(levelname)s: %(message)s'
verbose = True
debug = True
else:
console_format = '%(message)s'
debug = False
if self.options.quiet:
verbose = False
else:
verbose = True
standard_logging_setup(
log_file_name, console_format=console_format,
filemode=log_file_mode, debug=debug, verbose=verbose)
if log_file_name:
logger.debug('Logging to %s', log_file_name)
elif not no_file:
logger.debug('Not logging to a file')
def handle_error(self, exception):
"""Given an exception, return a message (or None) and process exit code
"""
if isinstance(exception, ScriptError):
return exception.msg, exception.rval
elif isinstance(exception, SystemExit):
if isinstance(exception.code, int):
return None, exception.code
return str(exception.code), 1
return str(exception), 1
def run(self):
"""Actual running of the command
This is where the hard work is done. The base implementation logs
the invocation of the command.
If this method returns (i.e. doesn't raise an exception), the tool is
assumed to have run successfully, and the return value is used as the
SystemExit code.
"""
logger.debug('%s was invoked with arguments %s and options: %s',
self.command_name, self.args, self.safe_options)
logger.debug('IPA version %s', version.VENDOR_VERSION)
logger.debug('IPA platform %s', osinfo.platform)
logger.debug('IPA os-release %s %s', osinfo.name, osinfo.version)
def log_failure(self, error_message, return_value, exception, backtrace):
logger.debug('%s', ''.join(traceback.format_tb(backtrace)))
logger.debug('The %s command failed, exception: %s: %s',
self.command_name, type(exception).__name__, exception)
if error_message:
logger.error('%s', error_message)
if return_value == 0:
# A script may raise an exception but still want quit gracefully,
# like the case of ipa-client-install called from
# ipa-server-install.
return
message = "The %s command failed." % self.command_name
if self.log_file_initialized and return_value != SERVER_NOT_CONFIGURED:
message += " See %s for more information" % self.log_file_name
logger.error('%s', message)
def log_success(self):
logger.info('The %s command was successful', self.command_name)
| 12,258
|
Python
|
.py
| 271
| 36.667897
| 79
| 0.652921
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,628
|
ipa_log_manager.py
|
freeipa_freeipa/ipapython/ipa_log_manager.py
|
# Authors: John Dennis <jdennis@redhat.com>
#
# Copyright (C) 2011 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import os
import re
import time
# Module exports
__all__ = ['standard_logging_setup',
'ISO8601_UTC_DATETIME_FMT',
'LOGGING_FORMAT_STDERR', 'LOGGING_FORMAT_STDOUT', 'LOGGING_FORMAT_FILE']
# Format string for time.strftime() to produce a ISO 8601 date time
# formatted string in the UTC time zone.
ISO8601_UTC_DATETIME_FMT = '%Y-%m-%dT%H:%M:%SZ'
# Logging format string for use with logging stderr handlers
LOGGING_FORMAT_STDERR = 'ipa: %(levelname)s: %(message)s'
# Logging format string for use with logging stdout handlers
LOGGING_FORMAT_STDOUT = '[%(asctime)s %(name)s] <%(levelname)s>: %(message)s'
# Logging format string for use with logging file handlers
LOGGING_FORMAT_FILE = '\t'.join([
'%(asctime)s',
'%(process)d',
'%(threadName)s',
'%(name)s',
'%(levelname)s',
'%(message)s',
])
# Used by standard_logging_setup() for console message
LOGGING_FORMAT_STANDARD_CONSOLE = '%(name)-12s: %(levelname)-8s %(message)s'
# Used by standard_logging_setup() for file message
LOGGING_FORMAT_STANDARD_FILE = '%(asctime)s %(levelname)s %(message)s'
class Filter:
def __init__(self, regexp, level):
self.regexp = re.compile(regexp)
self.level = level
def filter(self, record):
return (not self.regexp.match(record.name) or
record.levelno >= self.level)
class Formatter(logging.Formatter):
def __init__(
self, fmt=LOGGING_FORMAT_STDOUT, datefmt=ISO8601_UTC_DATETIME_FMT):
super(Formatter, self).__init__(fmt, datefmt)
self.converter = time.gmtime
def standard_logging_setup(filename=None, verbose=False, debug=False,
filemode='w', console_format=None):
if console_format is None:
console_format = LOGGING_FORMAT_STANDARD_CONSOLE
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
# File output is always logged at debug level
if filename is not None:
umask = os.umask(0o177)
try:
file_handler = logging.FileHandler(filename, mode=filemode)
finally:
os.umask(umask)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(Formatter(LOGGING_FORMAT_STANDARD_FILE))
root_logger.addHandler(file_handler)
level = logging.ERROR
if verbose:
level = logging.INFO
if debug:
level = logging.DEBUG
console_handler = logging.StreamHandler()
console_handler.setLevel(level)
console_handler.setFormatter(Formatter(console_format))
root_logger.addHandler(console_handler)
def convert_log_level(value):
try:
level = int(value)
except ValueError:
try:
level = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warn': logging.WARNING,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL
}[value.lower()]
except KeyError:
raise ValueError('unknown log level (%s)' % value)
return level
| 3,881
|
Python
|
.py
| 98
| 33.642857
| 83
| 0.678894
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,629
|
kernel_keyring.py
|
freeipa_freeipa/ipapython/kernel_keyring.py
|
# Authors: Rob Crittenden <rcritten@redhat.com>
#
# Copyright (C) 2012 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import
import os
from ipapython.ipautil import run
from ipaplatform.paths import paths
from ipaplatform.tasks import tasks
# NOTE: Absolute path not required for keyctl since we reset the environment
# in ipautil.run.
# Use the session keyring so the same user can have a different principal
# in different shells. This was explicitly chosen over @us because then
# it is not possible to use KRB5CCNAME to have a different user principal.
# The same session would always be used and the first principal would
# always win.
KEYRING = '@s'
KEYTYPE = 'user'
def dump_keys():
"""
Dump all keys
"""
result = run([paths.KEYCTL, 'list', KEYRING], raiseonerr=False,
capture_output=True)
return result.output
def get_real_key(key):
"""
One cannot request a key based on the description it was created with
so find the one we're looking for.
"""
assert isinstance(key, str)
result = run([paths.KEYCTL, 'search', KEYRING, KEYTYPE, key],
raiseonerr=False, capture_output=True)
if result.returncode:
raise ValueError('key %s not found' % key)
return result.raw_output.rstrip()
def get_persistent_key(key):
"""
Fetches the value of a persistent key from storage, trimming trailing
any tailing whitespace.
Assert when key is not a string-type.
"""
assert isinstance(key, str)
result = run([paths.KEYCTL, 'get_persistent', KEYRING, key],
raiseonerr=False, capture_output=True)
if result.returncode:
raise ValueError('persistent key %s not found' % key)
return result.raw_output.rstrip()
def is_persistent_keyring_supported(check_container=True):
"""Returns True if the kernel persistent keyring is supported.
If check_container is True and a containerized environment is detected,
return False. There is no support for keyring namespace isolation yet.
"""
if check_container and tasks.detect_container() is not None:
return False
uid = os.geteuid()
try:
get_persistent_key(str(uid))
except ValueError:
return False
return True
def has_key(key):
"""
Returns True/False whether the key exists in the keyring.
"""
assert isinstance(key, str)
try:
get_real_key(key)
return True
except ValueError:
return False
def read_key(key):
"""
Read the keyring and return the value for key.
Use pipe instead of print here to ensure we always get the raw data.
"""
assert isinstance(key, str)
real_key = get_real_key(key)
result = run([paths.KEYCTL, 'pipe', real_key], raiseonerr=False,
capture_output=True)
if result.returncode:
raise ValueError('keyctl pipe failed: %s' % result.error_log)
return result.raw_output
def update_key(key, value):
"""
Update the keyring data. If they key doesn't exist it is created.
"""
assert isinstance(key, str)
assert isinstance(value, bytes)
if has_key(key):
real_key = get_real_key(key)
result = run([paths.KEYCTL, 'pupdate', real_key], stdin=value,
raiseonerr=False)
if result.returncode:
raise ValueError('keyctl pupdate failed: %s' % result.error_log)
else:
add_key(key, value)
def add_key(key, value):
"""
Add a key to the kernel keyring.
"""
assert isinstance(key, str)
assert isinstance(value, bytes)
if has_key(key):
raise ValueError('key %s already exists' % key)
result = run([paths.KEYCTL, 'padd', KEYTYPE, key, KEYRING],
stdin=value, raiseonerr=False)
if result.returncode:
raise ValueError('keyctl padd failed: %s' % result.error_log)
def del_key(key):
"""
Remove a key from the keyring
"""
assert isinstance(key, str)
real_key = get_real_key(key)
result = run([paths.KEYCTL, 'unlink', real_key, KEYRING],
raiseonerr=False)
if result.returncode:
raise ValueError('keyctl unlink failed: %s' % result.error_log)
| 4,903
|
Python
|
.py
| 133
| 31.631579
| 76
| 0.689372
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,630
|
cookie.py
|
freeipa_freeipa/ipapython/cookie.py
|
# Authors:
# John Dennis <jdennis@redhat.com>
#
# Copyright (C) 2012 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import re
import datetime
import email.utils
from calendar import timegm
from urllib.parse import urlparse
from ipapython.ipautil import datetime_from_utctimestamp
'''
Core Python has two cookie libraries, Cookie.py targeted to server
side and cookielib.py targeted to client side. So why this module and
not use the standard libraries?
Cookie.py has some serious bugs, it cannot correctly parse the
HttpOnly, Secure, and Expires cookie attributes (more of a client side
need and not what it was designed for). Since we utilize those
attributes that makes Cookie.py a non-starter. Plus it's API awkard
and limited (we would have to build more on top of it).
The Cookie.py bug reports are:
http://bugs.python.org/issue3073
http://bugs.python.org/issue16611
cookielib.py has a lot of good featuress, a nice API and covers all
the relevant RFC's as well as actual practice in the field. However
cookielib.py is tighly integrated with urllib2 and it's not possible
to use most of the features of cookielib without simultaneously using
urllib2. Unfortunataely we only use httplib because of our dependency
on xmlrpc.client. Without urllib2 cookielib is a non-starter.
This module is a minimal implementation of Netscape cookies which
works equally well on either the client or server side. It's API is
easy to use with cookie attributes as class properties which can be
read or set easily. The Cookie object automatically converts Expires
and Max-Age attributes into datetime objects for easy time
comparision. Cookies in strings can easily be parsed, including
multiple cookies in the HTTP_COOKIE envionment variable.
The cookie RFC is silent on any escaping requirements for cookie
contents as such this module does not provide any automated support
escaping and unescapin.
'''
#-------------------------------------------------------------------------------
class Cookie:
'''
A Cookie object has the following attributes:
key
The name of the cookie
value
The value of the cookie
A Cookie also supports these predefined optional attributes. If an
optional attribute is not set on the cookie it's value is None.
domain
Restrict cookie usage to this domain
path
Restrict cookie usage to this path or below
expires
Cookie is invalid after this UTC timestamp
max_age
Cookie is invalid this many seconds in the future.
Has precedence over the expires attribute.
secure
Cookie should only be returned on secure (i.e. SSL/TLS)
connections.
httponly
Cookie is intended only for HTTP communication, it can
never be utilized in any other context (e.g. browser
Javascript).
See the documentation of get_expiration() for an explanation of
how the expires and max-age attributes interact as well as the
role of the timestamp attribute. Expiration values are stored as
datetime objects for easy manipulation and comparision.
There are two ways to instantiate a Cookie object. Either directly
via the constructor or by calling the class function parse() which
returns a list of Cookie objects found in a string.
To create a cookie to sent to a client:
Example:
cookie = Cookie('session', session_id,
domain=my_domain, path=mypath,
httponly=True, secure=True, expires=expiration)
headers.append(('Set-Cookie', str(cookie)))
To receive cookies from a request:
Example:
cookies = Cookie.parse(response.getheader('Set-Cookie'), request_url)
'''
class Expired(ValueError):
pass
class URLMismatch(ValueError):
pass
# regexp to split fields at a semi-colon
field_re = re.compile(r';\s*')
# regexp to locate a key/value pair
kv_pair_re = re.compile(r'^\s*([a-zA-Z0-9\!\#\$\%\&\'\*\+\-\.\^\_\`\|\~]+)\s*=\s*(.*?)\s*$', re.IGNORECASE)
# Reserved attribute names, maps from lower case protocol name to
# object attribute name
attrs = {'domain' : 'domain',
'path' : 'path',
'max-age' : 'max_age',
'expires' : 'expires',
'secure' : 'secure',
'httponly' : 'httponly'}
@classmethod
def datetime_to_time(cls, dt):
'''
Timestamps (timestamp & expires) are stored as datetime
objects in UTC. It's non-obvious how to convert a naive UTC
datetime into a unix time value (seconds since the epoch
UTC). That functionality is oddly missing from the datetime
and time modules. This utility provides that missing
functionality.
'''
# Use timegm from the calendar module
return timegm(dt.utctimetuple())
@classmethod
def datetime_to_string(cls, dt=None):
'''
Given a datetime object in UTC generate RFC 1123 date string.
'''
# Try to verify dt is specified as UTC. If utcoffset is not
# available we'll just have to assume the caller is using the
# correct timezone.
utcoffset = dt.utcoffset()
if utcoffset is not None and utcoffset.total_seconds() != 0.0:
raise ValueError("timezone is not UTC")
# Do not use strftime because it respects the locale, instead
# use the RFC 1123 formatting function which uses only English
return email.utils.formatdate(cls.datetime_to_time(dt), usegmt=True)
@classmethod
def parse_datetime(cls, s):
'''
Parse a RFC 822, RFC 1123 date string, return a datetime naive object in UTC.
'''
s = s.strip()
# Do not use strptime because it respects the locale, instead
# use the RFC 1123 parsing function which uses only English
try:
dt = email.utils.parsedate_to_datetime(s)
except Exception as e:
raise ValueError("unable to parse expires datetime '%s': %s" % (s, e))
return dt
@classmethod
def normalize_url_path(cls, url_path):
'''
Given a URL path, possibly empty, return a path consisting
only of directory components. The URL path must end with a
trailing slash for the last path element to be considered a
directory. Also the URL path must begin with a slash. Empty
input returns '/'.
Examples:
'' -> '/'
'/' -> '/'
'foo' -> '/'
'foo/' -> '/'
'/foo -> '/'
'/foo/' -> '/foo'
'/foo/bar' -> '/foo'
'/foo/bar/' -> '/foo/bar'
'''
url_path = url_path.lower()
if not url_path:
return '/'
if not url_path.startswith('/'):
return '/'
if url_path.count('/') <= 1:
return '/'
return url_path[:url_path.rindex('/')]
@classmethod
def parse(cls, cookie_string, request_url=None):
'''
Given a string containing one or more cookies (the
HTTP_COOKIES environment variable typically contains multiple
cookies) parse the string and return a list of Cookie objects
found in the string.
'''
# Our list of returned cookies
cookies = []
# Split the input string at semi-colon boundaries, we call this a
# field. A field may either be a single keyword or a key=value
# pair.
fields = Cookie.field_re.split(cookie_string)
# The input string may have multiple cookies inside it. This is
# common when the string comes from a HTTP_COOKIE environment
# variable. All the cookies will be contenated, separated by a
# semi-colon. Semi-colons are also the separator between
# attributes in a cookie.
#
# To distinguish between two adjacent cookies in a string we
# have to locate the key=value pair at the start of a
# cookie. Unfortunately cookies have attributes that also look
# like key/value pairs, the only way to distinguish a cookie
# attribute from a cookie is the fact the attribute names are
# reserved. A cookie attribute may either be a key/value pair
# or a single key (e.g. HttpOnly). As we scan the cookie we
# first identify the key=value (cookie name, cookie
# value). Then we continue scanning, if a bare key or
# key/value pair follows and is a known reserved keyword than
# that's an attribute belonging to the current cookie. As soon
# as we see a key/value pair whose key is not reserved we know
# we've found a new cookie. Bare keys (no value) can never
# start a new cookie.
# Iterate over all the fields and emit a new cookie whenever the
# next field is not a known attribute.
cookie = None
for field in fields:
match = Cookie.kv_pair_re.search(field)
if match:
key = match.group(1)
value = match.group(2)
# Double quoted value?
if value and value[0] == '"':
if value[-1] == '"':
value = value[1:-1]
else:
raise ValueError("unterminated quote in '%s'" % value)
kv_pair = True
else:
key = field
value = True # True because bare keys are boolean flags
kv_pair = False
is_attribute = key.lower() in Cookie.attrs
# First cookie found, create new cookie object
if cookie is None and kv_pair and not is_attribute:
cookie = Cookie(key, value)
# If start of new cookie then flush previous cookie and create
# a new one (it's a new cookie because it's a key/value pair
# whose key is not a reserved keyword).
elif cookie and kv_pair and not is_attribute:
if request_url is not None:
cookie.normalize(request_url)
cookies.append(cookie)
cookie = Cookie(key, value)
# If it's a reserved keyword add that as an attribute to the
# current cookie being scanned.
elif cookie and is_attribute:
cookie.__set_attr(key, value)
# If we've found a non-empty single token that's not a
# reserved keyword it's an error. An empty token can occur
# when there are two adjacent semi-colons (i.e. "; ;").
# We don't consider empty tokens an error.
elif key:
raise ValueError("unknown cookie token '%s'" % key)
# Flush out final cookie
if cookie:
if request_url is not None:
cookie.normalize(request_url)
cookies.append(cookie)
return cookies
@classmethod
def get_named_cookie_from_string(cls, cookie_string, cookie_name,
request_url=None, timestamp=None):
'''
A cookie string may contain multiple cookies, parse the cookie
string and return the last cookie in the string matching the
cookie name or None if not found.
This is basically a utility wrapper around the parse() class
method which iterates over what parse() returns looking for
the specific cookie.
When cookie_name appears more than once the last instance is
returned rather than the first because the ordering sequence
makes the last instance the current value.
'''
target_cookie = None
cookies = cls.parse(cookie_string)
for cookie in cookies:
if cookie.key == cookie_name:
target_cookie = cookie
if timestamp is not None:
target_cookie.timestamp = timestamp
if request_url is not None:
target_cookie.normalize(request_url)
return target_cookie
def __init__(self, key, value, domain=None, path=None, max_age=None, expires=None,
secure=None, httponly=None, timestamp=None):
self.key = key
self.value = value
self.domain = domain
self.path = path
self.max_age = max_age
self.expires = expires
self.secure = secure
self.httponly = httponly
self.timestamp = timestamp
@property
def timestamp(self):
'''
The UTC moment at which cookie was received for purposes of
computing the expiration given a Max-Age offset. The
expiration will be timestamp + max_age. The timestamp value
will aways be a datetime object.
By default the timestamp will be the moment the Cookie object
is created as this often corresponds to the moment the cookie
is received (the intent of the Max-Age attribute). But becuase
it's sometimes desirable to force a specific moment for
purposes of computing the expiration from the Max-Age the
Cookie timestamp can be updated.
Setting a value of None causes the timestamp to be set to the
current UTC time (now). You may also assign with a numeric
UNIX timestamp (seconds since the epoch UTC) or a formatted time
sting, in all cases the value will be converted to a datetime
object.
'''
return self._timestamp
@timestamp.setter
def timestamp(self, value):
if value is None:
self._timestamp = None
elif isinstance(value, datetime.datetime):
self._timestamp = value
elif isinstance(value, (int, float)):
self._timestamp = datetime_from_utctimestamp(value, units=1)
elif isinstance(value, str):
self._timestamp = Cookie.parse_datetime(value)
else:
raise TypeError('value must be datetime, int, long, float, basestring or None, not %s' % \
value.__class__.__name__)
@property
def expires(self):
'''
The expiration timestamp (in UTC) as a datetime object for the
cookie, or None if not set.
You may assign a value of None, a datetime object, a numeric
UNIX timestamp (seconds since the epoch UTC) or formatted time
string (the latter two will be converted to a datetime object.
'''
return self._expires
@expires.setter
def expires(self, value):
if value is None:
self._expires = None
elif isinstance(value, datetime.datetime):
self._expires = value
if self._expires.tzinfo is None:
self._expires.replace(tzinfo=datetime.timezone.utc)
elif isinstance(value, (int, float)):
self._expires = datetime_from_utctimestamp(value, units=1)
elif isinstance(value, str):
self._expires = Cookie.parse_datetime(value)
else:
raise TypeError('value must be datetime, int, long, float, basestring or None, not %s' % \
value.__class__.__name__)
@property
def max_age(self):
'''
The lifetime duration of the cookie. Computed as an offset
from the cookie's timestamp.
'''
return self._max_age
@max_age.setter
def max_age(self, value):
if value is None:
self._max_age = None
else:
try:
self._max_age = int(value)
except Exception:
raise ValueError("Max-Age value '%s' not convertable to integer" % value)
def __set_attr(self, name, value): # pylint: disable=unused-private-member
'''
Sets one of the predefined cookie attributes.
'''
attr_name = Cookie.attrs.get(name.lower(), None)
if attr_name is None:
raise ValueError("unknown cookie attribute '%s'" % name)
setattr(self, attr_name, value)
def __str__(self):
components = []
components.append("%s=%s" % (self.key, self.value))
if self.domain is not None:
components.append("Domain=%s" % self.domain)
if self.path is not None:
components.append("Path=%s" % self.path)
if self.max_age is not None:
components.append("Max-Age=%s" % self.max_age)
if self.expires is not None:
components.append("Expires=%s" % Cookie.datetime_to_string(self.expires))
if self.secure:
components.append("Secure")
if self.httponly:
components.append("HttpOnly")
return '; '.join(components)
def get_expiration(self):
'''
Return the effective expiration of the cookie as a datetime
object or None if no expiration is defined. Expiration may be
defined either by the "Expires" timestamp attribute or the
"Max-Age" duration attribute. If both are set "Max-Age" takes
precedence. If neither is set the cookie has no expiration and
None will be returned.
"Max-Age" specifies the number of seconds in the future from when the
cookie is received until it expires. Effectively it means
adding "Max-Age" seconds to a timestamp to arrive at an
expiration. By default the timestamp used to mark the arrival
of the cookie is set to the moment the cookie object is
created. However sometimes it is desirable to adjust the
received timestamp to something other than the moment of
object creation, therefore you can explicitly set the arrival
timestamp used in the "Max-Age" calculation.
"Expires" specifies an explicit timestamp.
If "Max-Age" is set a datetime object is returned which is the
sum of the arrival timestamp and "Max-Age".
If "Expires" is set a datetime object is returned matching the
timestamp specified as the "Expires" value.
If neither is set None is returned.
'''
if self.max_age is not None:
return self.timestamp + datetime.timedelta(seconds=self.max_age)
if self.expires is not None:
return self.expires
return None
def normalize_expiration(self):
'''
An expiration may be specified either with an explicit
timestamp in the "Expires" attribute or via an offset
specified witht the "Max-Age" attribute. The "Max-Age"
attribute has precedence over "Expires" if both are
specified.
This method normalizes the expiration of the cookie such that
only a "Expires" attribute remains after consideration of the
"Max-Age" attribute. This is useful when storing the cookie
for future reference.
'''
self.expires = self.get_expiration()
self.max_age = None
return self.expires
def set_defaults_from_url(self, url):
'''
If cookie domain and path attributes are not specified then
they assume defaults from the request url the cookie was
received from.
'''
_scheme, domain, path, _params, _query, _fragment = urlparse(url)
if self.domain is None:
self.domain = domain.lower()
if self.path is None:
self.path = self.normalize_url_path(path)
def normalize(self, url):
'''
Missing cookie attributes will receive default values derived
from the request URL. The expiration value is normalized.
'''
self.set_defaults_from_url(url)
self.normalize_expiration()
def http_cookie(self):
'''
Return a string with just the key and value (no attributes).
This is appropriate for including in a HTTP Cookie header.
'''
return '%s=%s;' % (self.key, self.value)
def http_return_ok(self, url):
'''
Tests to see if a cookie should be returned when a request is
sent to a specific URL.
* The request url's host must match the cookie's doman
otherwise raises Cookie.URLMismatch.
* The path in the request url must contain the cookie's path
otherwise raises Cookie.URLMismatch.
* If the cookie defines an expiration date then the current
time must be less or equal to the cookie's expiration
timestamp. Will raise Cookie.Expired if a defined expiration
is not valid.
If the test fails Cookie.Expired or Cookie.URLMismatch will be raised,
otherwise True is returned.
'''
def domain_valid(url_domain, cookie_domain):
'''
Compute domain component and perform test per
RFC 6265, Section 5.1.3. "Domain Matching"
'''
# FIXME: At the moment we can't import from ipalib at the
# module level because of a dependency loop (cycle) in the
# import. Our module layout needs to be refactored.
# pylint: disable=ipa-forbidden-import
from ipalib.util import validate_domain_name
# pylint: enable=ipa-forbidden-import
try:
validate_domain_name(url_domain)
except Exception:
return False
if cookie_domain is None:
return True
url_domain = url_domain.lower()
cookie_domain = cookie_domain.lower()
if url_domain == cookie_domain:
return True
if url_domain.endswith(cookie_domain):
if cookie_domain.startswith('.'):
return True
return False
def path_valid(url_path, cookie_path):
'''
Compute path component and perform test per
RFC 6265, Section 5.1.4. "Paths and Path-Match"
'''
if cookie_path is None:
return True
cookie_path = cookie_path.lower()
request_path = self.normalize_url_path(url_path)
if cookie_path == request_path:
return True
if cookie_path and request_path.startswith(cookie_path):
if cookie_path.endswith('/'):
return True
tail = request_path[len(cookie_path):]
if tail.startswith('/'):
return True
return False
cookie_name = self.key
(
url_scheme, url_domain, url_path,
_url_params, _url_query, _url_fragment
) = urlparse(url)
cookie_expiration = self.get_expiration()
if cookie_expiration is not None:
now = datetime.datetime.now(tz=datetime.timezone.utc)
if cookie_expiration < now:
raise Cookie.Expired("cookie named '%s'; expired at %s'" % \
(cookie_name,
self.datetime_to_string(cookie_expiration)))
if not domain_valid(url_domain, self.domain):
raise Cookie.URLMismatch("cookie named '%s'; it's domain '%s' does not match URL domain '%s'" % \
(cookie_name, self.domain, url_domain))
if not path_valid(url_path, self.path):
raise Cookie.URLMismatch("cookie named '%s'; it's path '%s' does not contain the URL path '%s'" % \
(cookie_name, self.path, url_path))
url_scheme = url_scheme.lower()
if self.httponly:
if url_scheme not in ('http', 'https'):
raise Cookie.URLMismatch("cookie named '%s'; is restricted to HTTP but it's URL scheme is '%s'" % \
(cookie_name, url_scheme))
if self.secure:
if url_scheme not in ('https',):
raise Cookie.URLMismatch("cookie named '%s'; is restricted to secure transport but it's URL scheme is '%s'" % \
(cookie_name, url_scheme))
return True
| 24,920
|
Python
|
.py
| 545
| 35.743119
| 127
| 0.622906
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,631
|
dnsutil.py
|
freeipa_freeipa/ipapython/dnsutil.py
|
# Authors: Martin Basti <mbasti@redhat.com>
#
# Copyright (C) 2007-2014 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import copy
import logging
import operator
import random
import dns.name
import dns.exception
import dns.resolver
import dns.rdataclass
import dns.rdatatype
import dns.reversename
import six
from ipapython.ipautil import UnsafeIPAddress
if six.PY3:
unicode = str
logger = logging.getLogger(__name__)
ipa_resolver = None
def get_ipa_resolver():
global ipa_resolver
if ipa_resolver is None:
ipa_resolver = DNSResolver()
return ipa_resolver
def resolve(*args, **kwargs):
return get_ipa_resolver().resolve(*args, **kwargs)
def resolve_address(*args, **kwargs):
return get_ipa_resolver().resolve_address(*args, **kwargs)
def zone_for_name(*args, **kwargs):
if "resolver" not in kwargs:
kwargs["resolver"] = get_ipa_resolver()
return dns.resolver.zone_for_name(*args, **kwargs)
def reset_default_resolver():
"""Re-initialize ipa resolver.
"""
global ipa_resolver
ipa_resolver = DNSResolver()
class DNSResolver(dns.resolver.Resolver):
"""DNS stub resolver compatible with both dnspython < 2.0.0
and dnspython >= 2.0.0.
Set `use_search_by_default` attribute to `True`, which
determines the default for whether the search list configured
in the system's resolver configuration is used for relative
names, and whether the resolver's domain may be added to relative
names.
Increase the default lifetime which determines the number of seconds
to spend trying to get an answer to the question. dnspython 2.0.0
changes this to 5sec, while the previous one was 30sec.
"""
def __init__(self, *args, **kwargs):
self._nameservers = None
super().__init__(*args, **kwargs)
self.reset_ipa_defaults()
self.resolve = getattr(super(), "resolve", self.query)
self.resolve_address = getattr(
super(),
"resolve_address",
self._resolve_address
)
def reset_ipa_defaults(self):
"""
BIND's default timeout for resolver is 10sec.
If that changes then it causes Timeout (instead of SERVFAIL)
exception for dnspython if BIND under high load. So, let's make
it the same + operation time.
dnspython default is 2sec
"""
self.timeout = 10 + 2
# dnspython default is 5sec
self.lifetime = min(self.timeout * len(self.nameservers) * 2, 45)
self.use_search_by_default = True
def reset(self):
super().reset()
self.reset_ipa_defaults()
def _resolve_address(self, ip_address, *args, **kwargs):
"""Query nameservers for PTR records.
:param ip_address: IPv4 or IPv6 address
:type ip_address: str
"""
return self.resolve(
dns.reversename.from_address(ip_address),
rdtype=dns.rdatatype.PTR,
*args,
**kwargs,
)
def read_resolv_conf(self, *args, **kwargs):
"""
dnspython tries nameservers sequentially(not parallel).
IPA controlled BIND always listen on IPv6 if available,
so no need to send requests to both IPv4 and IPv6 endpoints
of the same NS(though BIND handles this).
"""
super().read_resolv_conf(*args, **kwargs)
# deduplicate
nameservers = list(dict.fromkeys(self.nameservers))
ipv6_loopback = "::1"
ipv4_loopback = "127.0.0.1"
if ipv6_loopback in nameservers and ipv4_loopback in nameservers:
nameservers.remove(ipv4_loopback)
self.nameservers = nameservers
@property
def nameservers(self):
return self._nameservers
@nameservers.setter
def nameservers(self, nameservers):
"""
*nameservers*, a ``list`` of nameservers with optional ports:
"SERVER_IP port PORT_NUMBER".
Overloads dns.resolver.Resolver.nameservers setter to split off ports
into nameserver_ports after setting nameservers successfully with the
setter in dns.resolver.Resolver.
"""
# Get nameserver_ports if it is already set
if hasattr(self, "nameserver_ports"):
nameserver_ports = self.nameserver_ports
else:
nameserver_ports = {}
# Check nameserver items in list and split out converted port number
# into nameserver_ports: { nameserver: port }
if isinstance(nameservers, list):
_nameservers = []
for nameserver in nameservers:
splits = nameserver.split()
if len(splits) == 3 and splits[1] == "port":
nameserver = splits[0]
try:
port = int(splits[2])
if port < 0 or port > 65535:
raise ValueError()
except ValueError:
raise ValueError(
"invalid nameserver: %s is not a valid port" %
splits[2])
nameserver_ports[nameserver] = port
_nameservers.append(nameserver)
nameservers = _nameservers
# Call dns.resolver.Resolver.nameservers setter
if hasattr(dns.resolver.Resolver, "nameservers"):
dns.resolver.Resolver.nameservers.__set__(self, nameservers)
else:
# old dnspython (<2) doesn't have 'nameservers' property
self._nameservers = nameservers
# Set nameserver_ports after successfull call to setter
self.nameserver_ports = nameserver_ports
class DNSZoneAlreadyExists(dns.exception.DNSException):
supp_kwargs = {'zone', 'ns'}
fmt = (u"DNS zone {zone} already exists in DNS "
"and is handled by server(s): {ns}")
@six.python_2_unicode_compatible
class DNSName(dns.name.Name):
labels = None # make pylint happy
@classmethod
def from_text(cls, labels, origin=None):
return cls(dns.name.from_text(labels, origin))
def __init__(self, labels, origin=None):
try:
if isinstance(labels, str):
labels = dns.name.from_text(unicode(labels), origin).labels
elif isinstance(labels, dns.name.Name):
labels = labels.labels
super(DNSName, self).__init__(labels)
except UnicodeError as e:
# dnspython bug, an invalid domain name returns the UnicodeError
# instead of a dns.exception
raise dns.exception.SyntaxError(e)
def __bool__(self):
#dns.name.from_text('@') is represented like empty tuple
#we need to acting '@' as nonzero value
return True
__nonzero__ = __bool__ # for Python 2
def __copy__(self):
return DNSName(self.labels)
def __deepcopy__(self, memo):
return DNSName(copy.deepcopy(self.labels, memo))
def __str__(self):
return self.to_unicode()
# method ToASCII named by RFC 3490 and python standard library
if six.PY2:
def ToASCII(self):
# must be unicode string in Py2
return self.to_text().decode('ascii')
else:
def ToASCII(self):
return self.to_text()
def canonicalize(self):
return DNSName(super(DNSName, self).canonicalize())
def concatenate(self, other):
return DNSName(super(DNSName, self).concatenate(other))
def relativize(self, origin):
return DNSName(super(DNSName, self).relativize(origin))
def derelativize(self, origin):
return DNSName(super(DNSName, self).derelativize(origin))
def choose_relativity(self, origin=None, relativize=True):
return DNSName(super(DNSName, self).choose_relativity(origin=origin,
relativize=relativize))
def make_absolute(self):
return self.derelativize(self.root)
def is_idn(self):
return any(label.startswith('xn--') for label in self.labels)
def is_ip4_reverse(self):
return self.is_subdomain(self.ip4_rev_zone)
def is_ip6_reverse(self):
return self.is_subdomain(self.ip6_rev_zone)
def is_reverse(self):
return self.is_ip4_reverse() or self.is_ip6_reverse()
def is_empty(self):
return len(self.labels) == 0
#DNS public constants
DNSName.root = DNSName(dns.name.root) # '.'
DNSName.empty = DNSName(dns.name.empty) # '@'
DNSName.ip4_rev_zone = DNSName(('in-addr', 'arpa', ''))
DNSName.ip6_rev_zone = DNSName(('ip6', 'arpa', ''))
# Empty zones are defined in various RFCs. BIND is by default serving them.
# This constat should contain everything listed in
# IANA registry "Locally-Served DNS Zones"
# URL: http://www.iana.org/assignments/locally-served-dns-zones
# + AS112 zone defined in RFC 7534. It is not in the registry for some
# reason but BIND 9.10 is serving it as automatic empty zones.
EMPTY_ZONES = [DNSName(aez).make_absolute() for aez in [
# RFC 1918
"10.IN-ADDR.ARPA", "16.172.IN-ADDR.ARPA", "17.172.IN-ADDR.ARPA",
"18.172.IN-ADDR.ARPA", "19.172.IN-ADDR.ARPA", "20.172.IN-ADDR.ARPA",
"21.172.IN-ADDR.ARPA", "22.172.IN-ADDR.ARPA", "23.172.IN-ADDR.ARPA",
"24.172.IN-ADDR.ARPA", "25.172.IN-ADDR.ARPA", "26.172.IN-ADDR.ARPA",
"27.172.IN-ADDR.ARPA", "28.172.IN-ADDR.ARPA", "29.172.IN-ADDR.ARPA",
"30.172.IN-ADDR.ARPA", "31.172.IN-ADDR.ARPA", "168.192.IN-ADDR.ARPA",
# RFC 6598
"64.100.IN-ADDR.ARPA", "65.100.IN-ADDR.ARPA", "66.100.IN-ADDR.ARPA",
"67.100.IN-ADDR.ARPA", "68.100.IN-ADDR.ARPA", "69.100.IN-ADDR.ARPA",
"70.100.IN-ADDR.ARPA", "71.100.IN-ADDR.ARPA", "72.100.IN-ADDR.ARPA",
"73.100.IN-ADDR.ARPA", "74.100.IN-ADDR.ARPA", "75.100.IN-ADDR.ARPA",
"76.100.IN-ADDR.ARPA", "77.100.IN-ADDR.ARPA", "78.100.IN-ADDR.ARPA",
"79.100.IN-ADDR.ARPA", "80.100.IN-ADDR.ARPA", "81.100.IN-ADDR.ARPA",
"82.100.IN-ADDR.ARPA", "83.100.IN-ADDR.ARPA", "84.100.IN-ADDR.ARPA",
"85.100.IN-ADDR.ARPA", "86.100.IN-ADDR.ARPA", "87.100.IN-ADDR.ARPA",
"88.100.IN-ADDR.ARPA", "89.100.IN-ADDR.ARPA", "90.100.IN-ADDR.ARPA",
"91.100.IN-ADDR.ARPA", "92.100.IN-ADDR.ARPA", "93.100.IN-ADDR.ARPA",
"94.100.IN-ADDR.ARPA", "95.100.IN-ADDR.ARPA", "96.100.IN-ADDR.ARPA",
"97.100.IN-ADDR.ARPA", "98.100.IN-ADDR.ARPA", "99.100.IN-ADDR.ARPA",
"100.100.IN-ADDR.ARPA", "101.100.IN-ADDR.ARPA",
"102.100.IN-ADDR.ARPA", "103.100.IN-ADDR.ARPA",
"104.100.IN-ADDR.ARPA", "105.100.IN-ADDR.ARPA",
"106.100.IN-ADDR.ARPA", "107.100.IN-ADDR.ARPA",
"108.100.IN-ADDR.ARPA", "109.100.IN-ADDR.ARPA",
"110.100.IN-ADDR.ARPA", "111.100.IN-ADDR.ARPA",
"112.100.IN-ADDR.ARPA", "113.100.IN-ADDR.ARPA",
"114.100.IN-ADDR.ARPA", "115.100.IN-ADDR.ARPA",
"116.100.IN-ADDR.ARPA", "117.100.IN-ADDR.ARPA",
"118.100.IN-ADDR.ARPA", "119.100.IN-ADDR.ARPA",
"120.100.IN-ADDR.ARPA", "121.100.IN-ADDR.ARPA",
"122.100.IN-ADDR.ARPA", "123.100.IN-ADDR.ARPA",
"124.100.IN-ADDR.ARPA", "125.100.IN-ADDR.ARPA",
"126.100.IN-ADDR.ARPA", "127.100.IN-ADDR.ARPA",
# RFC 5735 and RFC 5737
"0.IN-ADDR.ARPA", "127.IN-ADDR.ARPA", "254.169.IN-ADDR.ARPA",
"2.0.192.IN-ADDR.ARPA", "100.51.198.IN-ADDR.ARPA",
"113.0.203.IN-ADDR.ARPA", "255.255.255.255.IN-ADDR.ARPA",
# Local IPv6 Unicast Addresses
"0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.IP6.ARPA",
"1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.IP6.ARPA",
# LOCALLY ASSIGNED LOCAL ADDRESS SCOPE
"D.F.IP6.ARPA", "8.E.F.IP6.ARPA", "9.E.F.IP6.ARPA", "A.E.F.IP6.ARPA",
"B.E.F.IP6.ARPA",
# Example Prefix, RFC 3849.
"8.B.D.0.1.0.0.2.IP6.ARPA",
# RFC 7534
"EMPTY.AS112.ARPA",
]]
def assert_absolute_dnsname(name):
"""Raise AssertionError if name is not DNSName or is not absolute.
>>> assert_absolute_dnsname(DNSName('absolute.name.example.'))
>>> assert_absolute_dnsname(DNSName('relative.name.example'))
Traceback (most recent call last):
...
AssertionError: name must be absolute, ...
>>> assert_absolute_dnsname('absolute.string.example.')
Traceback (most recent call last):
...
AssertionError: name must be DNSName instance, ...
"""
assert isinstance(name, DNSName), ("name must be DNSName instance, "
"got '%s'" % type(name))
assert name.is_absolute(), "name must be absolute, got '%s'" % name
def is_auto_empty_zone(zone):
"""True if specified zone name exactly matches an automatic empty zone.
>>> is_auto_empty_zone(DNSName('in-addr.arpa.'))
False
>>> is_auto_empty_zone(DNSName('10.in-addr.arpa.'))
True
>>> is_auto_empty_zone(DNSName('1.10.in-addr.arpa.'))
False
>>> is_auto_empty_zone(DNSName('10.in-addr.arpa'))
Traceback (most recent call last):
...
AssertionError: ...
"""
assert_absolute_dnsname(zone)
return zone in EMPTY_ZONES
def inside_auto_empty_zone(name):
"""True if specified absolute name is a subdomain of an automatic empty
zone.
DNS domain is a subdomain of itself so this function
returns True for zone apexes, too.
>>> inside_auto_empty_zone(DNSName('in-addr.arpa.'))
False
>>> inside_auto_empty_zone(DNSName('10.in-addr.arpa.'))
True
>>> inside_auto_empty_zone(DNSName('1.10.in-addr.arpa.'))
True
>>> inside_auto_empty_zone(DNSName('1.10.in-addr.arpa'))
Traceback (most recent call last):
...
AssertionError: ...
"""
assert_absolute_dnsname(name)
for aez in EMPTY_ZONES:
if name.is_subdomain(aez):
return True
return False
def related_to_auto_empty_zone(name):
"""True if specified absolute name is a sub/superdomain of an automatic
empty zone.
DNS domain is a subdomain of itself so this function
returns True for zone apexes, too.
>>> related_to_auto_empty_zone(DNSName('.'))
True
>>> related_to_auto_empty_zone(DNSName('in-addr.arpa.'))
True
>>> related_to_auto_empty_zone(DNSName('10.in-addr.arpa.'))
True
>>> related_to_auto_empty_zone(DNSName('1.10.in-addr.arpa.'))
True
>>> related_to_auto_empty_zone(DNSName('unrelated.example.'))
False
>>> related_to_auto_empty_zone(DNSName('1.10.in-addr.arpa'))
Traceback (most recent call last):
...
AssertionError: ...
"""
assert_absolute_dnsname(name)
relations = {dns.name.NAMERELN_SUBDOMAIN,
dns.name.NAMERELN_EQUAL,
dns.name.NAMERELN_SUPERDOMAIN}
return any(name.fullcompare(aez)[0] in relations
for aez in EMPTY_ZONES)
def has_empty_zone_addresses(hostname):
"""Detect if given host is using IP address belonging to
an automatic empty zone.
Information from --ip-address option used in installed is lost by
the time when upgrade is run. Use IP addresses from DNS as best
approximation.
This is brain-dead and duplicates logic from DNS installer
but I did not find other way around.
"""
ip_addresses = resolve_ip_addresses(hostname)
return any(
inside_auto_empty_zone(DNSName(ip.reverse_dns))
for ip in ip_addresses
)
def resolve_rrsets(fqdn, rdtypes):
"""
Get Resource Record sets for given FQDN.
CNAME chain is followed during resolution
but CNAMEs are not returned in the resulting rrset.
:returns:
set of dns.rrset.RRset objects, can be empty
if the FQDN does not exist or if none of rrtypes exist
"""
# empty set of rdtypes would always return empty set of rrsets
assert rdtypes, "rdtypes must not be empty"
if not isinstance(fqdn, DNSName):
fqdn = DNSName(fqdn)
fqdn = fqdn.make_absolute()
rrsets = []
for rdtype in rdtypes:
try:
answer = resolve(fqdn, rdtype)
logger.debug('found %d %s records for %s: %s',
len(answer),
rdtype,
fqdn,
' '.join(str(rr) for rr in answer))
rrsets.append(answer.rrset)
except dns.resolver.NXDOMAIN as ex:
logger.debug('%s', ex)
break # no such FQDN, do not iterate
except dns.resolver.NoAnswer as ex:
logger.debug('%s', ex) # record type does not exist for given FQDN
except dns.exception.DNSException as ex:
logger.error('DNS query for %s %s failed: %s', fqdn, rdtype, ex)
raise
return rrsets
def resolve_ip_addresses(fqdn):
"""Get IP addresses from DNS A/AAAA records for given host (using DNS).
:returns:
list of IP addresses as UnsafeIPAddress objects
"""
rrsets = resolve_rrsets(fqdn, ['A', 'AAAA'])
ip_addresses = set()
for rrset in rrsets:
ip_addresses.update({UnsafeIPAddress(ip) for ip in rrset})
return ip_addresses
def check_zone_overlap(zone, raise_on_error=True):
logger.info("Checking DNS domain %s, please wait ...", zone)
if not isinstance(zone, DNSName):
zone = DNSName(zone).make_absolute()
# automatic empty zones always exist so checking them is pointless,
# do not report them to avoid meaningless error messages
if is_auto_empty_zone(zone):
return
try:
containing_zone = zone_for_name(zone)
except dns.exception.DNSException as e:
msg = ("DNS check for domain %s failed: %s." % (zone, e))
if raise_on_error:
if isinstance(e, dns.resolver.NoNameservers):
# Show warning and continue in case we've got SERVFAIL
# because we are supposedly going to create this reverse zone
logger.warning('%s', msg)
return
else:
raise ValueError(msg)
else:
logger.warning('%s', msg)
return
if containing_zone == zone:
try:
ns = [ans.to_text() for ans in resolve(zone, 'NS')]
except dns.exception.DNSException as e:
logger.debug("Failed to resolve nameserver(s) for domain %s: %s",
zone, e)
ns = []
raise DNSZoneAlreadyExists(zone=zone.to_text(), ns=ns)
def _mix_weight(records):
"""Weighted population sorting for records with same priority
"""
# trivial case
if len(records) <= 1:
return records
# Optimization for common case: If all weights are the same (e.g. 0),
# just shuffle the records, which is about four times faster.
if all(rr.weight == records[0].weight for rr in records):
random.shuffle(records)
return records
noweight = 0.01 # give records with 0 weight a small chance
result = []
records = set(records)
while len(records) > 1:
# Compute the sum of the weights of those RRs. Then choose a
# uniform random number between 0 and the sum computed (inclusive).
urn = random.uniform(0, sum(rr.weight or noweight for rr in records))
# Select the RR whose running sum value is the first in the selected
# order which is greater than or equal to the random number selected.
acc = 0.
for rr in records.copy():
acc += rr.weight or noweight
if acc >= urn:
records.remove(rr)
result.append(rr)
if records:
result.append(records.pop())
return result
def sort_prio_weight(records):
"""RFC 2782 sorting algorithm for SRV and URI records
RFC 2782 defines a sorting algorithms for SRV records, that is also used
for sorting URI records. Records are sorted by priority and than randomly
shuffled according to weight.
This implementation also removes duplicate entries.
"""
# order records by priority
records = sorted(records, key=operator.attrgetter("priority"))
# remove duplicate entries
uniquerecords = []
seen = set()
for rr in records:
# A SRV record has target and port, URI just has target.
target = (rr.target, getattr(rr, "port", None))
if target not in seen:
uniquerecords.append(rr)
seen.add(target)
# weighted randomization of entries with same priority
result = []
sameprio = []
for rr in uniquerecords:
# add all items with same priority in a bucket
if not sameprio or sameprio[0].priority == rr.priority:
sameprio.append(rr)
else:
# got different priority, shuffle bucket
result.extend(_mix_weight(sameprio))
# start a new priority list
sameprio = [rr]
# add last batch of records with same priority
if sameprio:
result.extend(_mix_weight(sameprio))
return result
def query_srv(qname, resolver=None, **kwargs):
"""Query SRV records and sort reply according to RFC 2782
:param qname: query name, _service._proto.domain.
:return: list of dns.rdtypes.IN.SRV.SRV instances
"""
if resolver is None:
resolve_f = resolve
else:
resolve_f = getattr(resolver, "resolve", resolver.query)
answer = resolve_f(qname, rdtype=dns.rdatatype.SRV, **kwargs)
return sort_prio_weight(answer)
| 22,125
|
Python
|
.py
| 522
| 34.538314
| 83
| 0.635852
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,632
|
ipaldap.py
|
freeipa_freeipa/ipapython/ipaldap.py
|
# Authors: Rich Megginson <richm@redhat.com>
# Rob Crittenden <rcritten@redhat.com>
# John Dennis <jdennis@redhat.com>
#
# Copyright (C) 2007 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import binascii
import errno
import logging
import time
from datetime import datetime
from decimal import Decimal
from copy import deepcopy
import contextlib
import os
import pwd
import warnings
from collections import OrderedDict
from cryptography import x509 as crypto_x509
from cryptography.hazmat.primitives import serialization
import ldap
import ldap.sasl
import ldap.filter
from ldap.controls import SimplePagedResultsControl, GetEffectiveRightsControl
import ldapurl
import six
# pylint: disable=ipa-forbidden-import
from ipalib import errors, x509, _
from ipalib.constants import LDAP_GENERALIZED_TIME_FORMAT
# pylint: enable=ipa-forbidden-import
from ipaplatform.paths import paths
from ipapython.ipautil import format_netloc, CIDict
from ipapython.dn import DN, RDN
from ipapython.dnsutil import DNSName
from ipapython.kerberos import Principal
from collections.abc import MutableMapping
if six.PY3:
unicode = str
logger = logging.getLogger(__name__)
# Global variable to define SASL auth
SASL_GSSAPI = ldap.sasl.sasl({}, 'GSSAPI')
SASL_GSS_SPNEGO = ldap.sasl.sasl({}, 'GSS-SPNEGO')
_debug_log_ldap = False
_missing = object()
# Autobind modes
AUTOBIND_AUTO = 1
AUTOBIND_ENABLED = 2
AUTOBIND_DISABLED = 3
TRUNCATED_SIZE_LIMIT = object()
TRUNCATED_TIME_LIMIT = object()
TRUNCATED_ADMIN_LIMIT = object()
DIRMAN_DN = DN(('cn', 'directory manager'))
if six.PY2 and hasattr(ldap, 'LDAPBytesWarning'):
# XXX silence python-ldap's BytesWarnings
warnings.filterwarnings(
action="ignore",
category=ldap.LDAPBytesWarning,
)
def realm_to_serverid(realm_name):
"""Convert Kerberos realm name to 389-DS server id"""
return "-".join(realm_name.split("."))
def realm_to_ldapi_uri(realm_name):
"""Get ldapi:// URI to 389-DS's Unix socket"""
serverid = realm_to_serverid(realm_name)
socketname = paths.SLAPD_INSTANCE_SOCKET_TEMPLATE % (serverid,)
return 'ldapi://' + ldapurl.ldapUrlEscape(socketname)
def ldap_initialize(uri, cacertfile=None):
"""Wrapper around ldap.initialize()
The function undoes global and local ldap.conf settings that may cause
issues or reduce security:
* Canonization of SASL host names is disabled.
* With cacertfile=None, the connection uses OpenSSL's default verify
locations, also known as system-wide trust store.
* Cert validation is enforced.
* SSLv2 and SSLv3 are disabled.
"""
conn = ldap.initialize(uri)
# Do not perform reverse DNS lookups to canonicalize SASL host names
conn.set_option(ldap.OPT_X_SASL_NOCANON, ldap.OPT_ON)
if not uri.startswith('ldapi://'):
if cacertfile:
if not os.path.isfile(cacertfile):
raise IOError(errno.ENOENT, cacertfile)
conn.set_option(ldap.OPT_X_TLS_CACERTFILE, cacertfile)
# SSLv3 and SSLv2 are insecure
# OpenLDAP 2.4 sets minimum version with SSL_CTX_set_options(). The
# system-wide crypto-policies for TLS minimum version are applied
# with SSL_CTX_set_min_proto_version(). The set_option() call cannot
# enable lower versions than allowed by crypto-policy, e.g.
# openssl.cnf MinProtocol=TLS1.2 + OPT_X_TLS_PROTOCOL_MIN=TLS1.0
# result in TLS 1.2 as minimum protocol version.
conn.set_option(ldap.OPT_X_TLS_PROTOCOL_MIN, 0x301) # TLS 1.0
# libldap defaults to cert validation, but the default can be
# overridden in global or user local ldap.conf.
conn.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_DEMAND)
# reinitialize TLS context to materialize settings
conn.set_option(ldap.OPT_X_TLS_NEWCTX, 0)
return conn
class _ServerSchema:
'''
Properties of a schema retrieved from an LDAP server.
'''
def __init__(self, server, schema):
self.server = server
self.schema = schema
self.retrieve_timestamp = time.time()
class SchemaCache:
'''
Cache the schema's from individual LDAP servers.
'''
def __init__(self):
self.servers = {}
def get_schema(self, url, conn, force_update=False):
'''
Return schema belonging to a specific LDAP server.
For performance reasons the schema is retrieved once and
cached unless force_update is True. force_update flushes the
existing schema for the server from the cache and reacquires
it.
'''
if force_update:
self.flush(url)
server_schema = self.servers.get(url)
if server_schema is None:
schema = self._retrieve_schema_from_server(url, conn)
server_schema = _ServerSchema(url, schema)
self.servers[url] = server_schema
return server_schema.schema
def flush(self, url):
logger.debug('flushing %s from SchemaCache', url)
try:
del self.servers[url]
except KeyError:
pass
def _retrieve_schema_from_server(self, url, conn):
"""
Retrieve the LDAP schema from the provided url and determine if
User-Private Groups (upg) are configured.
Bind using kerberos credentials. If in the context of the
in-tree "lite" server then use the current ccache. If in the context of
Apache then create a new ccache and bind using the Apache HTTP service
principal.
If a connection is provided then it the credentials bound to it are
used. The connection is not closed when the request is done.
"""
assert conn is not None
logger.debug(
'retrieving schema for SchemaCache url=%s conn=%s', url, conn)
try:
try:
schema_entry = conn.search_s('cn=schema', ldap.SCOPE_BASE,
attrlist=['attributetypes', 'objectclasses'])[0]
except ldap.NO_SUCH_OBJECT:
# try different location for schema
# openldap has schema located in cn=subschema
logger.debug('cn=schema not found, fallback to cn=subschema')
schema_entry = conn.search_s('cn=subschema', ldap.SCOPE_BASE,
attrlist=['attributetypes', 'objectclasses'])[0]
except ldap.SERVER_DOWN:
raise errors.NetworkError(uri=url,
error=u'LDAP Server Down, unable to retrieve LDAP schema')
except ldap.LDAPError as e:
desc = e.args[0]['desc'].strip()
info = e.args[0].get('info', '').strip()
raise errors.DatabaseError(desc = u'uri=%s' % url,
info = u'Unable to retrieve LDAP schema: %s: %s' % (desc, info))
# no 'cn=schema' entry in LDAP? some servers use 'cn=subschema'
# TODO: DS uses 'cn=schema', support for other server?
# raise a more appropriate exception
return ldap.schema.SubSchema(schema_entry[1])
schema_cache = SchemaCache()
class LDAPEntry(MutableMapping):
__slots__ = ('_conn', '_dn', '_names', '_nice', '_raw', '_sync',
'_not_list', '_orig_raw', '_raw_view',
'_single_value_view')
__hash__ = None
def __init__(self, _conn, _dn=None, _obj=None, **kwargs):
"""
LDAPEntry constructor.
Takes 1 to 3 positional arguments and an arbitrary number of keyword
arguments. The 3 forms of positional arguments are:
* LDAPEntry(entry) - create a shallow copy of an existing LDAPEntry.
* LDAPEntry(dn, entry) - create a shallow copy of an existing
LDAPEntry with a different DN.
* LDAPEntry(conn, dn, mapping) - create a new LDAPEntry using the
specified LDAPClient and DN and optionally initialize
attributes from the specified mapping object.
Keyword arguments can be used to override values of specific attributes.
"""
super(LDAPEntry, self).__init__()
if isinstance(_conn, LDAPEntry):
assert _dn is None
_dn = _conn
_conn = _conn._conn
assert isinstance(_conn, LDAPClient)
if isinstance(_dn, LDAPEntry):
assert _obj is None
_obj = _dn
_dn = _dn._dn
assert isinstance(_dn, DN)
if _obj is None:
_obj = {}
self._conn = _conn
self._dn = _dn
self._names = CIDict()
self._nice = {}
self._raw = {}
self._sync = {}
self._not_list = set()
self._orig_raw = {}
self._raw_view = None
self._single_value_view = None
if isinstance(_obj, LDAPEntry):
self._not_list = set(_obj._not_list)
self._orig_raw = dict(_obj._orig_raw)
if _obj.conn is _conn:
self._names = CIDict(_obj._names)
self._nice = dict(_obj._nice)
self._raw = dict(_obj._raw)
self._sync = dict(_obj._sync)
else:
self.raw.update(_obj.raw)
_obj = {}
self.update(_obj, **kwargs)
@property
def conn(self):
return self._conn
@property
def dn(self):
return self._dn
@dn.setter
def dn(self, value):
assert isinstance(value, DN)
self._dn = value
@property
def raw(self):
if self._raw_view is None:
self._raw_view = RawLDAPEntryView(self)
return self._raw_view
@property
def single_value(self):
if self._single_value_view is None:
self._single_value_view = SingleValueLDAPEntryView(self)
return self._single_value_view
def __repr__(self):
data = dict(self._raw)
data.update((k, v) for k, v in self._nice.items() if v is not None)
return '%s(%r, %r)' % (type(self).__name__, self._dn, data)
def copy(self):
return LDAPEntry(self)
def _sync_attr(self, name):
nice = self._nice[name]
assert isinstance(nice, list)
raw = self._raw[name]
assert isinstance(raw, list)
nice_sync, raw_sync = self._sync.setdefault(name, ([], []))
if nice == nice_sync and raw == raw_sync:
return
nice_adds = set(nice) - set(nice_sync)
nice_dels = set(nice_sync) - set(nice)
raw_adds = set(raw) - set(raw_sync)
raw_dels = set(raw_sync) - set(raw)
for value in nice_dels:
value = self._conn.encode(value)
if value in raw_adds:
continue
raw.remove(value)
for value in raw_dels:
try:
value = self._conn.decode(value, name)
except ValueError as e:
raise ValueError("{error} in LDAP entry '{dn}'".format(
error=e, dn=self._dn))
if value in nice_adds:
continue
nice.remove(value)
for value in sorted(nice_adds, key=nice.index):
value = self._conn.encode(value)
if value in raw_dels:
continue
raw.append(value)
for value in sorted(raw_adds, key=raw.index):
try:
value = self._conn.decode(value, name)
except ValueError as e:
raise ValueError("{error} in LDAP entry '{dn}'".format(
error=e, dn=self._dn))
if value in nice_dels:
continue
nice.append(value)
self._sync[name] = (deepcopy(nice), deepcopy(raw))
if len(nice) > 1:
self._not_list.discard(name)
def _attr_name(self, name):
if not isinstance(name, str):
raise TypeError(
"attribute name must be unicode or str, got %s object %r" % (
name.__class__.__name__, name))
if isinstance(name, bytes):
name = name.decode('utf-8')
return name
def _add_attr_name(self, name):
if name in self._names:
return self._names[name]
if self._conn.schema is not None:
if six.PY2:
encoded_name = name.encode('utf-8')
else:
encoded_name = name
attrtype = self._conn.schema.get_obj(
ldap.schema.AttributeType, encoded_name)
if attrtype is not None:
for altname in attrtype.names:
if six.PY2:
altname = altname.decode('utf-8')
self._names[altname] = name
self._names[name] = name
for oldname in list(self._orig_raw):
if self._names.get(oldname) == name:
self._orig_raw[name] = self._orig_raw.pop(oldname)
break
return name
def _set_nice(self, name, value):
name = self._attr_name(name)
name = self._add_attr_name(name)
if not isinstance(value, list):
if value is None:
value = []
else:
value = [value]
self._not_list.add(name)
else:
self._not_list.discard(name)
if self._nice.get(name) is not value:
self._nice[name] = value
self._raw[name] = None
self._sync.pop(name, None)
if self._raw[name] is not None:
self._sync_attr(name)
def _set_raw(self, name, value):
name = self._attr_name(name)
if not isinstance(value, list):
raise TypeError("%s value must be list, got %s object %r" % (
name, value.__class__.__name__, value))
for (i, item) in enumerate(value):
if not isinstance(item, bytes):
raise TypeError(
"%s[%d] value must be bytes, got %s object %r" % (
name, i, item.__class__.__name__, item)
)
name = self._add_attr_name(name)
if self._raw.get(name) is not value:
self._raw[name] = value
self._nice[name] = None
self._sync.pop(name, None)
if self._nice[name] is not None:
self._sync_attr(name)
def __setitem__(self, name, value):
self._set_nice(name, value)
def _get_attr_name(self, name):
name = self._attr_name(name)
name = self._names[name]
return name
def _get_nice(self, name):
name = self._get_attr_name(name)
value = self._nice[name]
if value is None:
value = self._nice[name] = []
assert isinstance(value, list)
if self._raw[name] is not None:
self._sync_attr(name)
if name in self._not_list:
assert len(value) <= 1
if value:
value = value[0]
else:
value = None
return value
def _get_raw(self, name):
name = self._get_attr_name(name)
value = self._raw[name]
if value is None:
value = self._raw[name] = []
assert isinstance(value, list)
if self._nice[name] is not None:
self._sync_attr(name)
return value
def __getitem__(self, name):
return self._get_nice(name)
def __delitem__(self, name):
name = self._get_attr_name(name)
for (altname, keyname) in list(self._names.items()):
if keyname == name:
del self._names[altname]
del self._nice[name]
del self._raw[name]
self._sync.pop(name, None)
self._not_list.discard(name)
def clear(self):
self._names.clear()
self._nice.clear()
self._raw.clear()
self._sync.clear()
self._not_list.clear()
def __len__(self):
return len(self._nice)
def __contains__(self, name):
return name in self._names
def has_key(self, name):
return name in self
def __eq__(self, other):
if not isinstance(other, LDAPEntry):
return NotImplemented
return other is self
def __ne__(self, other):
if not isinstance(other, LDAPEntry):
return NotImplemented
return other is not self
def reset_modlist(self, other=None):
if other is None:
other = self
assert isinstance(other, LDAPEntry)
self._orig_raw = deepcopy(dict(other.raw))
def generate_modlist(self):
modlist = []
names = set(self)
names.update(self._orig_raw)
for name in names:
new = self.raw.get(name, [])
old = self._orig_raw.get(name, [])
if old and not new:
modlist.append((ldap.MOD_DELETE, name, None))
continue
if not old and new:
modlist.append((ldap.MOD_REPLACE, name, new))
continue
# We used to convert to sets and use difference to calculate
# the changes but this did not preserve order which is important
# particularly for schema
adds = [value for value in new if value not in old]
dels = [value for value in old if value not in new]
if adds and self.conn.get_attribute_single_value(name):
if len(adds) > 1:
raise errors.OnlyOneValueAllowed(attr=name)
modlist.append((ldap.MOD_REPLACE, name, adds))
else:
# dels before adds, in case the same value occurs in
# both due to encoding differences
# (https://pagure.io/freeipa/issue/7750)
if dels:
modlist.append((ldap.MOD_DELETE, name, dels))
if adds:
modlist.append((ldap.MOD_ADD, name, adds))
# Usually the modlist order does not matter.
# However, for schema updates, we want 'attributetypes' before
# 'objectclasses'.
# A simple sort will ensure this.
modlist.sort(key=lambda m: m[1].lower() != 'attributetypes')
return modlist
def __iter__(self):
return iter(self._nice)
class LDAPEntryView(MutableMapping):
__slots__ = ('_entry',)
def __init__(self, entry):
assert isinstance(entry, LDAPEntry)
self._entry = entry
def __delitem__(self, name):
del self._entry[name]
def clear(self):
self._entry.clear()
def __iter__(self):
return iter(self._entry)
def __len__(self):
return len(self._entry)
def __contains__(self, name):
return name in self._entry
def has_key(self, name):
return name in self
class RawLDAPEntryView(LDAPEntryView):
def __getitem__(self, name):
return self._entry._get_raw(name)
def __setitem__(self, name, value):
self._entry._set_raw(name, value)
class SingleValueLDAPEntryView(LDAPEntryView):
def __getitem__(self, name):
value = self._entry[name]
if not isinstance(value, list):
# FIXME: remove when we enforce lists
return value
elif not value:
return None
elif len(value) == 1:
return value[0]
else:
raise ValueError(
'%s has %s values, one expected' % (name, len(value)))
def __setitem__(self, name, value):
if value is None:
self._entry[name] = None
else:
self._entry[name] = [value]
class LDAPClient:
"""LDAP backend class
This class abstracts a LDAP connection, providing methods that work with
LADPEntries.
The purpose of this class is to provide a boundary between IPA and
python-ldap. In IPA we use IPA defined types because they are
richer and are designed to meet our needs. We also require that we
consistently use those types without exception. On the other hand
python-ldap uses different types. The goal is to be able to have
IPA code call python-ldap methods using the types native to
IPA. This class accomplishes that goal by exposing python-ldap
methods which take IPA types, convert them to python-ldap types,
call python-ldap, and then convert the results returned by
python-ldap into IPA types.
"""
# rules for generating filters from entries
MATCH_ANY = '|' # (|(filter1)(filter2))
MATCH_ALL = '&' # (&(filter1)(filter2))
MATCH_NONE = '!' # (!(filter1)(filter2))
# search scope for find_entries()
SCOPE_BASE = ldap.SCOPE_BASE
SCOPE_ONELEVEL = ldap.SCOPE_ONELEVEL
SCOPE_SUBTREE = ldap.SCOPE_SUBTREE
_SYNTAX_MAPPING = {
'1.3.6.1.4.1.1466.115.121.1.1' : bytes, # ACI item
'1.3.6.1.4.1.1466.115.121.1.4' : bytes, # Audio
'1.3.6.1.4.1.1466.115.121.1.5' : bytes, # Binary
'1.3.6.1.4.1.1466.115.121.1.7' : bool, # Boolean
'1.3.6.1.4.1.1466.115.121.1.8' : bytes, # Certificate
'1.3.6.1.4.1.1466.115.121.1.9' : bytes, # Certificate List
'1.3.6.1.4.1.1466.115.121.1.10' : bytes, # Certificate Pair
'1.3.6.1.4.1.1466.115.121.1.12' : DN, # Distinguished Name
'1.3.6.1.4.1.1466.115.121.1.23' : bytes, # Fax
'1.3.6.1.4.1.1466.115.121.1.24' : datetime, # GeneralizedTime
'1.3.6.1.4.1.1466.115.121.1.28' : bytes, # JPEG
'1.3.6.1.4.1.1466.115.121.1.40' : bytes, # OctetString (same as Binary)
'1.3.6.1.4.1.1466.115.121.1.49' : bytes, # Supported Algorithm
'1.3.6.1.4.1.1466.115.121.1.51' : bytes, # Teletext Terminal Identifier
'1.3.6.1.4.1.5322.21.2.5' : datetime, # krbLastAdminUnlock
'2.16.840.1.113730.3.8.3.3' : DN, # enrolledBy
'2.16.840.1.113730.3.8.3.18' : DN, # managedBy
'2.16.840.1.113730.3.8.3.5' : DN, # memberUser
'2.16.840.1.113730.3.8.3.7' : DN, # memberHost
'2.16.840.1.113730.3.8.3.20' : DN, # memberService
'2.16.840.1.113730.3.8.11.4' : DN, # ipaNTFallbackPrimaryGroup
'2.16.840.1.113730.3.8.11.21' : DN, # ipaAllowToImpersonate
'2.16.840.1.113730.3.8.11.22' : DN, # ipaAllowedTarget
'2.16.840.1.113730.3.8.7.1' : DN, # memberAllowCmd
'2.16.840.1.113730.3.8.7.2' : DN, # memberDenyCmd
'2.16.840.1.113719.1.301.4.6.1' : datetime, # krbPrincipalExpiration
'2.16.840.1.113719.1.301.4.14.1' : DN, # krbRealmReferences
'2.16.840.1.113719.1.301.4.17.1' : DN, # krbKdcServers
'2.16.840.1.113719.1.301.4.18.1' : DN, # krbPwdServers
'2.16.840.1.113719.1.301.4.26.1' : DN, # krbPrincipalReferences
'2.16.840.1.113719.1.301.4.29.1' : DN, # krbAdmServers
'2.16.840.1.113719.1.301.4.36.1' : DN, # krbPwdPolicyReference
'2.16.840.1.113719.1.301.4.37.1' : datetime, # krbPasswordExpiration
'2.16.840.1.113719.1.301.4.40.1' : DN, # krbTicketPolicyReference
'2.16.840.1.113719.1.301.4.41.1' : DN, # krbSubTrees
'2.16.840.1.113719.1.301.4.45.1' : datetime, # krbLastPwdChange
'2.16.840.1.113719.1.301.4.48.1' : datetime, # krbLastSuccessfulAuth
'2.16.840.1.113719.1.301.4.49.1' : datetime, # krbLastFailedAuth
'2.16.840.1.113719.1.301.4.52.1' : DN, # krbObjectReferences
'2.16.840.1.113719.1.301.4.53.1' : DN, # krbPrincContainerRef
'2.16.840.1.113730.3.8.16.1.3' : datetime, # ipatokenNotBefore
'2.16.840.1.113730.3.8.16.1.4' : datetime, # ipatokenNotAfter
}
# In most cases we lookup the syntax from the schema returned by
# the server. However, sometimes attributes may not be defined in
# the schema (e.g. extensibleObject which permits undefined
# attributes), or the schema was incorrectly defined (i.e. giving
# an attribute the syntax DirectoryString when in fact it's really
# a DN). This (hopefully sparse) table allows us to trap these
# anomalies and force them to be the syntax we know to be in use.
#
# FWIW, many entries under cn=config are undefined :-(
_SYNTAX_OVERRIDE = CIDict({
'managedtemplate': DN,
'managedbase': DN,
'memberindirect': DN,
'memberofindirect':DN,
'originscope': DN,
'idnsname': DNSName,
'idnssoamname': DNSName,
'idnssoarname': DNSName,
'dnszoneidnsname': DNSName,
'krbcanonicalname': Principal,
'krbprincipalname': Principal,
'usercertificate': crypto_x509.Certificate,
'usercertificate;binary': crypto_x509.Certificate,
'cACertificate': crypto_x509.Certificate,
'cACertificate;binary': crypto_x509.Certificate,
'nsds5replicalastupdatestart': unicode,
'nsds5replicalastupdateend': unicode,
'nsds5replicalastinitstart': unicode,
'nsds5replicalastinitend': unicode,
})
_SINGLE_VALUE_OVERRIDE = CIDict({
'nsslapd-ssl-check-hostname': True,
'nsslapd-lookthroughlimit': True,
'nsslapd-idlistscanlimit': True,
'nsslapd-anonlimitsdn': True,
'nsslapd-minssf-exclude-rootdse': True,
'nsslapd-enable-upgrade-hash': True,
'nsslapd-db-locks': True,
'nsslapd-logging-hr-timestamps-enabled': True,
'nsslapd-ldapientrysearchbase': True,
'nsslapd-ldapidnmappingbase': True,
'nsslapd-sizelimit': True,
})
time_limit = -1.0 # unlimited
size_limit = 0 # unlimited
def __init__(self, ldap_uri, start_tls=False, force_schema_updates=False,
no_schema=False, decode_attrs=True, cacert=None,
sasl_nocanon=True):
"""Create LDAPClient object.
:param ldap_uri: The LDAP URI to connect to
:param start_tls: Use STARTTLS
:param force_schema_updates:
If true, this object will always request a new schema from the
server. If false, a cached schema will be reused if it exists.
Generally, it should be true if the API context is 'installer' or
'updates', but it must be given explicitly since the API object
is not always available
:param no_schema: If true, schema is never requested from the server.
:param decode_attrs:
If true, attributes are decoded to Python types according to their
syntax.
"""
if ldap_uri is not None:
# special case for ldap2 server plugin
self.ldap_uri = ldap_uri
assert self.protocol in {'ldaps', 'ldapi', 'ldap'}
self._start_tls = start_tls
self._force_schema_updates = force_schema_updates
self._no_schema = no_schema
self._decode_attrs = decode_attrs
self._cacert = cacert
self._sasl_nocanon = sasl_nocanon
self._has_schema = False
self._schema = None
if ldap_uri is not None:
self._conn = self._connect()
@classmethod
def from_realm(cls, realm_name, **kwargs):
"""Create a LDAPI connection to local 389-DS instance
"""
uri = realm_to_ldapi_uri(realm_name)
return cls(uri, start_tls=False, cacert=None, **kwargs)
@classmethod
def from_hostname_secure(cls, hostname, cacert=paths.IPA_CA_CRT,
start_tls=True, **kwargs):
"""Create LDAP or LDAPS connection to a remote 389-DS instance
This constructor is opinionated and doesn't let you shoot yourself in
the foot. It always creates a secure connection. By default it
returns a LDAP connection to port 389 and performs STARTTLS using the
default CA cert. With start_tls=False, it creates a LDAPS connection
to port 636 instead.
Note: Microsoft AD does not support SASL encryption and integrity
verification with a TLS connection. For AD, use a plain connection
with GSSAPI and a MIN_SSF >= 56. SASL GSSAPI and SASL GSS SPNEGO
ensure data integrity and confidentiality with SSF > 1. Also see
https://msdn.microsoft.com/en-us/library/cc223500.aspx
"""
if start_tls:
uri = 'ldap://%s' % format_netloc(hostname, 389)
else:
uri = 'ldaps://%s' % format_netloc(hostname, 636)
return cls(uri, start_tls=start_tls, cacert=cacert, **kwargs)
@classmethod
def from_hostname_plain(cls, hostname, **kwargs):
"""Create a plain LDAP connection with TLS/SSL
Note: A plain TLS connection should only be used in combination with
GSSAPI bind.
"""
assert 'start_tls' not in kwargs
assert 'cacert' not in kwargs
uri = 'ldap://%s' % format_netloc(hostname, 389)
return cls(uri, **kwargs)
def __str__(self):
return self.ldap_uri
def modify_s(self, dn, modlist):
# FIXME: for backwards compatibility only
assert isinstance(dn, DN)
dn = str(dn)
modlist = [(a, b, self.encode(c)) for a, b, c in modlist]
return self.conn.modify_s(dn, modlist)
@property
def conn(self):
return self._conn
@property
def protocol(self):
if self.ldap_uri:
return self.ldap_uri.split('://', 1)[0]
else:
return None
def _get_schema(self):
if self._no_schema:
return None
if not self._has_schema:
try:
schema = schema_cache.get_schema(
self.ldap_uri, self.conn,
force_update=self._force_schema_updates)
except (errors.ExecutionError, IndexError):
schema = None
# bypass ldap2's locking
object.__setattr__(self, '_schema', schema)
object.__setattr__(self, '_has_schema', True)
return self._schema
def _flush_schema(self):
'''
Force this instance to forget it's cached schema and reacquire
it from the schema cache.
'''
# Currently this is called during bind operations to assure
# we're working with valid schema for a specific
# connection. This causes self._get_schema() to query the
# schema cache for the server's schema passing along a flag
# indicating if we're in a context that requires freshly
# loading the schema vs. returning the last cached version of
# the schema. If we're in a mode that permits use of
# previously cached schema the flush and reacquire is a very
# low cost operation.
#
# The schema is reacquired whenever this object is
# instantiated or when binding occurs. The schema is not
# reacquired for operations during a bound connection, it is
# presumed schema cannot change during this interval. This
# provides for maximum efficiency in contexts which do need
# schema refreshing by only peforming the refresh inbetween
# logical operations that have the potential to cause a schema
# change.
# bypass ldap2's locking
object.__setattr__(self, '_has_schema', False)
object.__setattr__(self, '_schema', None)
def get_attribute_type(self, name_or_oid):
if not self._decode_attrs:
return bytes
if six.PY2:
if isinstance(name_or_oid, unicode):
name_or_oid = name_or_oid.encode('utf-8')
# Is this a special case attribute?
if name_or_oid in self._SYNTAX_OVERRIDE:
return self._SYNTAX_OVERRIDE[name_or_oid]
schema = self._get_schema()
if schema is not None:
# Try to lookup the syntax in the schema returned by the server
obj = schema.get_obj(ldap.schema.AttributeType, name_or_oid)
if obj is not None and obj.syntax in self._SYNTAX_MAPPING:
return self._SYNTAX_MAPPING[obj.syntax]
return unicode
def has_dn_syntax(self, name_or_oid):
"""
Check the schema to see if the attribute uses DN syntax.
Returns True/False
"""
return self.get_attribute_type(name_or_oid) is DN
def get_attribute_single_value(self, name_or_oid):
"""
Check the schema to see if the attribute is single-valued.
If the attribute is in the schema then returns True/False
If there is a problem loading the schema or the attribute is
not in the schema return None
"""
if six.PY2 and isinstance(name_or_oid, unicode):
name_or_oid = name_or_oid.encode('utf-8')
if name_or_oid in self._SINGLE_VALUE_OVERRIDE:
return self._SINGLE_VALUE_OVERRIDE[name_or_oid]
schema = self._get_schema()
if schema is not None:
obj = schema.get_obj(ldap.schema.AttributeType, name_or_oid)
if obj is not None:
return obj.single_value
return None
def encode(self, val):
"""
Encode attribute value to LDAP representation (str/bytes).
"""
# Booleans are both an instance of bool and int, therefore
# test for bool before int otherwise the int clause will be
# entered for a boolean value instead of the boolean clause.
if isinstance(val, bool):
if val:
return b'TRUE'
else:
return b'FALSE'
elif isinstance(val, (unicode, int, Decimal, DN, Principal)):
return str(val).encode('utf-8')
elif isinstance(val, DNSName):
return val.to_text().encode('ascii')
elif isinstance(val, bytes):
return val
elif isinstance(val, list):
return [self.encode(m) for m in val]
elif isinstance(val, tuple):
return tuple(self.encode(m) for m in val)
elif isinstance(val, dict):
# key in dict must be str not bytes
dct = dict((k, self.encode(v)) for k, v in val.items())
return dct
elif isinstance(val, datetime):
return val.strftime(LDAP_GENERALIZED_TIME_FORMAT).encode('utf-8')
elif isinstance(val, crypto_x509.Certificate):
return val.public_bytes(x509.Encoding.DER)
elif val is None:
return None
else:
raise TypeError("attempt to pass unsupported type to ldap, value=%s type=%s" %(val, type(val)))
def decode(self, val, attr):
"""
Decode attribute value from LDAP representation (str/bytes).
"""
if isinstance(val, bytes):
target_type = self.get_attribute_type(attr)
try:
if target_type is bytes:
return val
elif target_type is unicode:
return val.decode('utf-8')
elif target_type is bool:
return val.decode('utf-8') == 'TRUE'
elif target_type is datetime:
return datetime.strptime(
val.decode('utf-8'), LDAP_GENERALIZED_TIME_FORMAT)
elif target_type is DNSName:
return DNSName.from_text(val.decode('utf-8'))
elif target_type in (DN, Principal):
return target_type(val.decode('utf-8'))
elif target_type is crypto_x509.Certificate:
return x509.load_der_x509_certificate(val)
else:
return target_type(val)
except Exception:
msg = 'unable to convert the attribute %r value %r to type %s' % (attr, val, target_type)
logger.error('%s', msg)
raise ValueError(msg)
elif isinstance(val, list):
return [self.decode(m, attr) for m in val]
elif isinstance(val, tuple):
return tuple(self.decode(m, attr) for m in val)
elif isinstance(val, dict):
dct = {
k.decode('utf-8'): self.decode(v, k) for k, v in val.items()
}
return dct
elif val is None:
return None
else:
raise TypeError("attempt to pass unsupported type from ldap, value=%s type=%s" %(val, type(val)))
def _convert_result(self, result):
'''
result is a python-ldap result tuple of the form (dn, attrs),
where dn is a string containing the dn (distinguished name) of
the entry, and attrs is a dictionary containing the attributes
associated with the entry. The keys of attrs are strings, and
the associated values are lists of strings.
We convert the tuple to an LDAPEntry object.
'''
ipa_result = []
for dn_tuple in result:
original_dn = dn_tuple[0]
original_attrs = dn_tuple[1]
# original_dn is None if referral instead of an entry was
# returned from the LDAP server, we need to skip this item
if original_dn is None:
log_msg = 'Referral entry ignored: {ref}'\
.format(ref=str(original_attrs))
logger.debug('%s', log_msg)
continue
ipa_entry = LDAPEntry(self, DN(original_dn))
for attr, original_values in original_attrs.items():
ipa_entry.raw[attr] = original_values
ipa_entry.reset_modlist()
ipa_result.append(ipa_entry)
if _debug_log_ldap:
logger.debug('ldap.result: %s', ipa_result)
return ipa_result
@contextlib.contextmanager
def error_handler(self, arg_desc=None):
"""Context manager that handles LDAPErrors
"""
desc = None
try:
try:
yield
except ldap.TIMEOUT:
raise errors.DatabaseTimeout()
except ldap.LDAPError as e:
desc = e.args[0]['desc'].strip()
info = e.args[0].get('info', '').strip()
if arg_desc is not None:
info = "%s arguments: %s" % (info, arg_desc)
raise
except ldap.NO_SUCH_OBJECT:
raise errors.NotFound(reason=arg_desc or 'no such entry')
except ldap.ALREADY_EXISTS:
# entry already exists
raise errors.DuplicateEntry()
except ldap.TYPE_OR_VALUE_EXISTS:
# attribute type or attribute value already exists, usually only
# occurs, when two machines try to write at the same time.
raise errors.DuplicateEntry(message=desc)
except ldap.CONSTRAINT_VIOLATION:
# This error gets thrown by the uniqueness plugin
_msg = 'Another entry with the same attribute value already exists'
if info.startswith(_msg):
raise errors.DuplicateEntry()
else:
raise errors.DatabaseError(desc=desc, info=info)
except ldap.INSUFFICIENT_ACCESS:
raise errors.ACIError(info=info)
except ldap.INVALID_CREDENTIALS:
raise errors.ACIError(info="%s %s" % (info, desc))
except ldap.INAPPROPRIATE_AUTH:
raise errors.ACIError(info="%s: %s" % (desc, info))
except ldap.NO_SUCH_ATTRIBUTE:
# this is raised when a 'delete' attribute isn't found.
# it indicates the previous attribute was removed by another
# update, making the oldentry stale.
raise errors.MidairCollision()
except ldap.INVALID_SYNTAX:
raise errors.InvalidSyntax(attr=info)
except ldap.OBJECT_CLASS_VIOLATION:
raise errors.ObjectclassViolation(info=info)
except ldap.ADMINLIMIT_EXCEEDED:
raise errors.AdminLimitExceeded()
except ldap.SIZELIMIT_EXCEEDED:
raise errors.SizeLimitExceeded()
except ldap.TIMELIMIT_EXCEEDED:
raise errors.TimeLimitExceeded()
except ldap.NOT_ALLOWED_ON_RDN:
raise errors.NotAllowedOnRDN(attr=info)
except ldap.FILTER_ERROR:
raise errors.BadSearchFilter(info=info)
except ldap.NOT_ALLOWED_ON_NONLEAF:
raise errors.NotAllowedOnNonLeaf()
except ldap.SERVER_DOWN:
raise errors.NetworkError(uri=self.ldap_uri,
error=info)
except ldap.LOCAL_ERROR:
raise errors.ACIError(info=info)
except ldap.SUCCESS:
pass
except ldap.CONNECT_ERROR:
raise errors.DatabaseError(desc=desc, info=info)
except ldap.UNWILLING_TO_PERFORM:
raise errors.DatabaseError(desc=desc, info=info)
except ldap.AUTH_UNKNOWN:
raise errors.ACIError(info='%s (%s)' % (info,desc))
except ldap.LDAPError as e:
if 'NOT_ALLOWED_TO_DELEGATE' in info:
raise errors.ACIError(
info="KDC returned NOT_ALLOWED_TO_DELEGATE")
logger.debug(
'Unhandled LDAPError: %s: %s', type(e).__name__, str(e))
raise errors.DatabaseError(desc=desc, info=info)
@staticmethod
def handle_truncated_result(truncated):
if not truncated:
return
if truncated is TRUNCATED_ADMIN_LIMIT:
raise errors.AdminLimitExceeded()
elif truncated is TRUNCATED_SIZE_LIMIT:
raise errors.SizeLimitExceeded()
elif truncated is TRUNCATED_TIME_LIMIT:
raise errors.TimeLimitExceeded()
else:
raise errors.LimitsExceeded()
@property
def schema(self):
"""schema associated with this LDAP server"""
return self._get_schema()
def get_allowed_attributes(self, objectclasses, raise_on_unknown=False,
attributes="all"):
if self.schema is None:
return None
allowed_attributes = []
for oc in objectclasses:
obj = self.schema.get_obj(ldap.schema.ObjectClass, oc)
if obj is not None:
if attributes == "must":
# Only return required(must) attrs
allowed_attributes += obj.must
elif attributes == "may":
# Only return allowed(may) attrs
allowed_attributes += obj.may
else:
# Return both allowed & required attrs
allowed_attributes += obj.must + obj.may
elif raise_on_unknown:
raise errors.NotFound(
reason=_('objectclass %s not found') % oc)
return [unicode(a).lower() for a in list(set(allowed_attributes))]
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
"""
Close the connection.
"""
self._conn = None
def _connect(self):
with self.error_handler():
conn = ldap_initialize(self.ldap_uri, cacertfile=self._cacert)
# SASL_NOCANON is set to ON in Fedora's default ldap.conf and
# in the ldap_initialize() function.
if not self._sasl_nocanon:
conn.set_option(ldap.OPT_X_SASL_NOCANON, ldap.OPT_OFF)
if self._start_tls and self.protocol == 'ldap':
# STARTTLS applies only to ldap:// connections
conn.start_tls_s()
return conn
def simple_bind(self, bind_dn, bind_password, server_controls=None,
client_controls=None, insecure_bind=False):
"""
Perform simple bind operation.
"""
if (self.protocol == 'ldap' and not self._start_tls and
bind_password and not insecure_bind):
# non-empty bind must use a secure connection unless
# insecure bind is explicitly enabled
raise ValueError('simple_bind over insecure LDAP connection')
with self.error_handler():
self._flush_schema()
assert isinstance(bind_dn, DN)
bind_dn = str(bind_dn)
bind_password = self.encode(bind_password)
self.conn.simple_bind_s(
bind_dn, bind_password, server_controls, client_controls)
def external_bind(self, server_controls=None, client_controls=None):
"""
Perform SASL bind operation using the SASL EXTERNAL mechanism.
"""
user_name = pwd.getpwuid(os.geteuid()).pw_name
with self.error_handler():
auth_tokens = ldap.sasl.external(user_name)
self._flush_schema()
self.conn.sasl_interactive_bind_s(
'', auth_tokens, server_controls, client_controls)
def gssapi_bind(self, server_controls=None, client_controls=None):
"""
Perform SASL bind operation using the SASL GSSAPI mechanism.
"""
with self.error_handler():
if self.protocol == 'ldapi':
auth_tokens = SASL_GSS_SPNEGO
else:
auth_tokens = SASL_GSSAPI
self._flush_schema()
self.conn.sasl_interactive_bind_s(
'', auth_tokens, server_controls, client_controls)
def unbind(self):
"""
Perform unbind operation.
"""
with self.error_handler():
self._flush_schema()
self.conn.unbind_s()
def make_dn_from_attr(self, attr, value, parent_dn=None):
"""
Make distinguished name from attribute.
Keyword arguments:
parent_dn -- DN of the parent entry (default '')
"""
if parent_dn is None:
parent_dn = DN()
if isinstance(value, (list, tuple)):
value = value[0]
return DN((attr, value), parent_dn)
def make_dn(self, entry_attrs, primary_key='cn', parent_dn=None):
"""
Make distinguished name from entry attributes.
Keyword arguments:
primary_key -- attribute from which to make RDN (default 'cn')
parent_dn -- DN of the parent entry (default '')
"""
assert primary_key in entry_attrs
assert isinstance(parent_dn, DN)
return DN((primary_key, entry_attrs[primary_key]), parent_dn)
def make_entry(self, _dn=None, _obj=None, **kwargs):
return LDAPEntry(self, _dn, _obj, **kwargs)
# generating filters for find_entry
# some examples:
# f1 = ldap2.make_filter_from_attr(u'firstName', u'Pavel')
# f2 = ldap2.make_filter_from_attr(u'lastName', u'Zuna')
# f = ldap2.combine_filters([f1, f2], ldap2.MATCH_ALL)
# # f should be (&(firstName=Pavel)(lastName=Zuna))
# # it should be equivalent to:
# entry_attrs = {u'firstName': u'Pavel', u'lastName': u'Zuna'}
# f = ldap2.make_filter(entry_attrs, rules=ldap2.MATCH_ALL)
@classmethod
def combine_filters(cls, filters, rules='|'):
"""
Combine filters into one for ldap2.find_entries.
Keyword arguments:
rules -- see ldap2.make_filter
"""
assert isinstance(filters, (list, tuple))
filters = [fx for fx in filters if fx]
if filters and rules == cls.MATCH_NONE: # unary operator
return '(%s%s)' % (cls.MATCH_NONE,
cls.combine_filters(filters, cls.MATCH_ANY))
if len(filters) > 1:
flt = '(%s' % rules
else:
flt = ''
for f in filters:
if not f.startswith('('):
f = '(%s)' % f
flt = '%s%s' % (flt, f)
if len(filters) > 1:
flt = '%s)' % flt
return flt
@classmethod
def make_filter_from_attr(
cls, attr, value, rules='|', exact=True,
leading_wildcard=True, trailing_wildcard=True):
"""
Make filter for ldap2.find_entries from attribute.
Keyword arguments:
rules -- see ldap2.make_filter
exact -- boolean, True - make filter as (attr=value)
False - make filter as (attr=*value*)
leading_wildcard -- boolean:
True - allow heading filter wildcard when exact=False
False - forbid heading filter wildcard when exact=False
trailing_wildcard -- boolean:
True - allow trailing filter wildcard when exact=False
False - forbid trailing filter wildcard when exact=False
"""
if isinstance(value, (list, tuple)):
flts = [
cls.make_filter_from_attr(
attr, v, exact=exact,
leading_wildcard=leading_wildcard,
trailing_wildcard=trailing_wildcard)
for v in value
]
return cls.combine_filters(flts, rules)
elif value is not None:
if isinstance(value, crypto_x509.Certificate):
value = value.public_bytes(serialization.Encoding.DER)
if isinstance(value, bytes):
value = binascii.hexlify(value).decode('ascii')
# value[-2:0] is empty string for the initial '\\'
value = u'\\'.join(
value[i:i+2] for i in six.moves.range(-2, len(value), 2))
elif isinstance(value, datetime):
value = value.strftime(
LDAP_GENERALIZED_TIME_FORMAT)
value = ldap.filter.escape_filter_chars(value)
else:
value = str(value)
value = ldap.filter.escape_filter_chars(value)
if not exact:
template = '%s'
if leading_wildcard:
template = '*' + template
if trailing_wildcard:
template = template + '*'
value = template % value
if rules == cls.MATCH_NONE:
return '(!(%s=%s))' % (attr, value)
return '(%s=%s)' % (attr, value)
return ''
@classmethod
def make_filter(
cls, entry_attrs, attrs_list=None, rules='|', exact=True,
leading_wildcard=True, trailing_wildcard=True):
"""
Make filter for ldap2.find_entries from entry attributes.
Keyword arguments:
attrs_list -- list of attributes to use, all if None (default None)
rules -- specifies how to determine a match (default ldap2.MATCH_ANY)
exact -- boolean, True - make filter as (attr=value)
False - make filter as (attr=*value*)
leading_wildcard -- boolean:
True - allow heading filter wildcard when exact=False
False - forbid heading filter wildcard when exact=False
trailing_wildcard -- boolean:
True - allow trailing filter wildcard when exact=False
False - forbid trailing filter wildcard when exact=False
rules can be one of the following:
ldap2.MATCH_NONE - match entries that do not match any attribute
ldap2.MATCH_ALL - match entries that match all attributes
ldap2.MATCH_ANY - match entries that match any of attribute
"""
if rules == cls.MATCH_NONE:
make_filter_rules = cls.MATCH_ANY
else:
make_filter_rules = rules
flts = []
if attrs_list is None:
for (k, v) in entry_attrs.items():
flts.append(
cls.make_filter_from_attr(
k, v, make_filter_rules, exact,
leading_wildcard, trailing_wildcard)
)
else:
for a in attrs_list:
value = entry_attrs.get(a, None)
if value is not None:
flts.append(
cls.make_filter_from_attr(
a, value, make_filter_rules, exact,
leading_wildcard, trailing_wildcard)
)
return cls.combine_filters(flts, rules)
def get_entries(self, base_dn, scope=ldap.SCOPE_SUBTREE, filter=None,
attrs_list=None, get_effective_rights=False, **kwargs):
"""Return a list of matching entries.
:raises: errors.LimitsExceeded if the list is truncated by the server
:raises: errors.NotFound if result set is empty
or base_dn doesn't exist
:param base_dn: dn of the entry at which to start the search
:param scope: search scope, see LDAP docs (default ldap2.SCOPE_SUBTREE)
:param filter: LDAP filter to apply
:param attrs_list: ist of attributes to return, all if None (default)
:param get_effective_rights: use GetEffectiveRights control
:param kwargs: additional keyword arguments. See find_entries method
for their description.
"""
entries, truncated = self.find_entries(
base_dn=base_dn, scope=scope, filter=filter, attrs_list=attrs_list,
get_effective_rights=get_effective_rights,
**kwargs)
try:
self.handle_truncated_result(truncated)
except errors.LimitsExceeded as e:
logger.error(
"%s while getting entries (base DN: %s, filter: %s)",
e, base_dn, filter
)
raise
return entries
def find_entries(
self, filter=None, attrs_list=None, base_dn=None,
scope=ldap.SCOPE_SUBTREE, time_limit=None, size_limit=None,
paged_search=False, get_effective_rights=False):
"""
Return a list of entries and indication of whether the results were
truncated ([(dn, entry_attrs)], truncated) matching specified search
parameters followed by truncated flag. If the truncated flag is True,
search hit a server limit and its results are incomplete.
Keyword arguments:
:param attrs_list: list of attributes to return, all if None
(default None)
:param base_dn: dn of the entry at which to start the search
(default '')
:param scope: search scope, see LDAP docs (default ldap2.SCOPE_SUBTREE)
:param time_limit: time limit in seconds (default unlimited)
:param size_limit: size (number of entries returned) limit
(default unlimited)
:param paged_search: search using paged results control
:param get_effective_rights: use GetEffectiveRights control
:raises: errors.NotFound if result set is empty
or base_dn doesn't exist
"""
if base_dn is None:
base_dn = DN()
assert isinstance(base_dn, DN)
if not filter:
filter = '(objectClass=*)'
res = []
truncated = False
if time_limit is None:
time_limit = self.time_limit
if time_limit == 0:
time_limit = -1.0
if size_limit is None:
size_limit = self.size_limit
if not isinstance(size_limit, int):
size_limit = int(size_limit)
if not isinstance(time_limit, float):
time_limit = float(time_limit)
if attrs_list:
attrs_list = [a.lower() for a in set(attrs_list)]
base_sctrls = []
if get_effective_rights:
base_sctrls.append(self.__get_effective_rights_control())
cookie = ''
page_size = (size_limit if size_limit > 0 else 2000) - 1
if page_size == 0:
paged_search = False
# pass arguments to python-ldap
with self.error_handler():
if six.PY2:
filter = self.encode(filter)
attrs_list = self.encode(attrs_list)
while True:
if paged_search:
sctrls = base_sctrls + [
SimplePagedResultsControl(0, page_size, cookie)
]
else:
sctrls = base_sctrls or None
try:
id = self.conn.search_ext(
str(base_dn), scope, filter, attrs_list,
serverctrls=sctrls, timeout=time_limit,
sizelimit=size_limit
)
while True:
result = self.conn.result3(id, 0)
objtype, res_list, _res_id, res_ctrls = result
if objtype == ldap.RES_SEARCH_RESULT:
break
res_list = self._convert_result(res_list)
if res_list:
res.append(res_list[0])
if paged_search:
# Get cookie for the next page
for ctrl in res_ctrls:
if isinstance(ctrl, SimplePagedResultsControl):
cookie = ctrl.cookie
break
else:
cookie = ''
except ldap.ADMINLIMIT_EXCEEDED:
truncated = TRUNCATED_ADMIN_LIMIT
break
except ldap.SIZELIMIT_EXCEEDED:
truncated = TRUNCATED_SIZE_LIMIT
break
except ldap.TIMELIMIT_EXCEEDED:
truncated = TRUNCATED_TIME_LIMIT
break
except ldap.LDAPError as e:
# If paged search is in progress, try to cancel it
if paged_search and cookie:
sctrls = [SimplePagedResultsControl(0, 0, cookie)]
try:
self.conn.search_ext_s(
str(base_dn), scope, filter, attrs_list,
serverctrls=sctrls, timeout=time_limit,
sizelimit=size_limit)
except ldap.LDAPError as e2:
logger.warning(
"Error cancelling paged search: %s", e2)
cookie = ''
try:
raise e
except (ldap.ADMINLIMIT_EXCEEDED, ldap.TIMELIMIT_EXCEEDED,
ldap.SIZELIMIT_EXCEEDED):
truncated = True
break
if not paged_search or not cookie:
break
if not res and not truncated:
raise errors.EmptyResult(reason='no matching entry found')
return (res, truncated)
def __get_effective_rights_control(self):
"""Construct a GetEffectiveRights control for current user."""
bind_dn = self.conn.whoami_s()[4:]
return GetEffectiveRightsControl(
True, "dn: {0}".format(bind_dn).encode('utf-8'))
def find_entry_by_attr(self, attr, value, object_class, attrs_list=None,
base_dn=None):
"""
Find entry (dn, entry_attrs) by attribute and object class.
Keyword arguments:
attrs_list - list of attributes to return, all if None (default None)
base_dn - dn of the entry at which to start the search (default '')
"""
if base_dn is None:
base_dn = DN()
assert isinstance(base_dn, DN)
search_kw = {attr: value, 'objectClass': object_class}
filter = self.make_filter(search_kw, rules=self.MATCH_ALL)
entries = self.get_entries(
base_dn, filter=filter, attrs_list=attrs_list)
if len(entries) > 1:
raise errors.SingleMatchExpected(found=len(entries))
return entries[0]
def get_entry(self, dn, attrs_list=None, time_limit=None,
size_limit=None, get_effective_rights=False):
"""
Get entry (dn, entry_attrs) by dn.
Keyword arguments:
attrs_list - list of attributes to return, all if None (default None)
"""
assert isinstance(dn, DN)
entries = self.get_entries(
dn, self.SCOPE_BASE, None, attrs_list, time_limit=time_limit,
size_limit=size_limit, get_effective_rights=get_effective_rights,
)
return entries[0]
def add_entry(self, entry):
"""Create a new entry.
This should be called as add_entry(entry).
"""
# remove all [] values (python-ldap hates 'em)
attrs = dict((k, v) for k, v in entry.raw.items() if v)
with self.error_handler():
attrs = self.encode(attrs)
self.conn.add_s(str(entry.dn), list(attrs.items()))
entry.reset_modlist()
def move_entry(self, dn, new_dn, del_old=True):
"""
Move an entry (either to a new superior or/and changing relative distinguished name)
Keyword arguments:
dn: DN of the source entry
new_dn: DN of the target entry
del_old -- delete old RDN value (default True)
:raises:
errors.NotFound if source entry or target superior entry doesn't exist
errors.EmptyModlist if source and target are identical
"""
assert isinstance(dn, DN)
assert isinstance(new_dn, DN)
if new_dn == dn:
raise errors.EmptyModlist()
new_rdn = new_dn[0]
if new_dn[1:] == dn[1:]:
new_superior = None
else:
new_superior = str(DN(*new_dn[1:]))
with self.error_handler():
self.conn.rename_s(str(dn), str(new_rdn), newsuperior=new_superior,
delold=int(del_old))
time.sleep(.3) # Give memberOf plugin a chance to work
def update_entry(self, entry):
"""Update entry's attributes.
This should be called as update_entry(entry).
"""
# generate modlist
modlist = entry.generate_modlist()
if not modlist:
raise errors.EmptyModlist()
logger.debug("update_entry modlist %s", modlist)
# pass arguments to python-ldap
with self.error_handler():
modlist = [(a, str(b), self.encode(c))
for a, b, c in modlist]
self.conn.modify_s(str(entry.dn), modlist)
entry.reset_modlist()
def delete_entry(self, entry_or_dn):
"""Delete an entry given either the DN or the entry itself"""
if isinstance(entry_or_dn, DN):
dn = entry_or_dn
else:
dn = entry_or_dn.dn
with self.error_handler():
self.conn.delete_s(str(dn))
def entry_exists(self, dn):
"""
Test whether the given object exists in LDAP.
"""
assert isinstance(dn, DN)
try:
self.get_entry(dn, attrs_list=[])
except errors.NotFound:
return False
else:
return True
def get_ldap_uri(host='', port=389, cacert=None, ldapi=False, realm=None,
protocol=None):
if protocol is None:
if ldapi:
protocol = 'ldapi'
elif cacert is not None:
protocol = 'ldaps'
else:
protocol = 'ldap'
if protocol == 'ldaps':
return 'ldaps://%s' % format_netloc(host, port)
elif protocol == 'ldapi':
return 'ldapi://%%2fvar%%2frun%%2fslapd-%s.socket' % (
"-".join(realm.split(".")))
elif protocol == 'ldap':
return 'ldap://%s' % format_netloc(host, port)
else:
raise ValueError('Protocol %r not supported' % protocol)
class CacheEntry:
def __init__(self, entry=None, attrs_list=None, exception=None,
get_effective_rights=False, all=False):
self.entry = entry
self.attrs_list = attrs_list
self.exception = exception
self.all = all
class LDAPCache(LDAPClient):
"""A very basic LRU Cache using an OrderedDict"""
def __init__(self, ldap_uri, start_tls=False, force_schema_updates=False,
no_schema=False, decode_attrs=True, cacert=None,
sasl_nocanon=True, enable_cache=True, cache_size=100,
debug_cache=False):
self.cache = OrderedDict()
self._enable_cache = True # initialize to zero to satisfy pylint
self._debug_cache = False # initialize to zero to satisfy pylint
object.__setattr__(self, '_cache_misses', 0)
object.__setattr__(self, '_cache_hits', 0)
object.__setattr__(self, '_enable_cache',
enable_cache and cache_size > 0)
object.__setattr__(self, '_debug_cache', debug_cache)
object.__setattr__(self, '_cache_size', cache_size)
super(LDAPCache, self).__init__(
ldap_uri, start_tls, force_schema_updates, no_schema,
decode_attrs, cacert, sasl_nocanon
)
@property
def hit(self):
return self._cache_hits # pylint: disable=no-member
@property
def miss(self):
return self._cache_misses # pylint: disable=no-member
@property
def max_entries(self):
return self._cache_size # pylint: disable=no-member
def emit(self, msg, *args, **kwargs):
if self._enable_cache and self._debug_cache:
logger.debug(msg, *args, **kwargs)
def copy_entry(self, dn, entry, attrs=[]):
new_entry = LDAPEntry(self, DN(dn))
# Return either the whole entry or only those attrs requested
if not attrs:
new_entry.raw.update(deepcopy(dict(entry.raw)))
else:
for attr, original_values in entry.raw.items():
if attr.lower() not in attrs:
continue
new_entry.raw[attr.lower()] = deepcopy(original_values)
new_entry._orig_raw = deepcopy(dict(entry.raw))
new_entry.reset_modlist()
return new_entry
def add_cache_entry(self, dn, attrs_list=None, get_all=False,
entry=None, exception=None):
# idnsname - caching prevents delete when mod value to None
# cospriority - in a Class of Service object, uncacheable
# usercertificate* - caching subtypes is tricky, trade less
# complexity for performance
#
# TODO: teach the cache about subtypes
BANNED_ATTRS = {
'idnsname',
'cospriority',
'usercertificate',
'usercertificate;binary'
}
if not self._enable_cache:
return
self.remove_cache_entry(dn)
if (
DN('cn=config') in dn
or DN('cn=kerberos') in dn
or DN('o=ipaca') in dn
):
return
if exception:
self.emit("EXC: Caching exception %s", exception)
self.cache[dn] = CacheEntry(exception=exception)
else:
if not BANNED_ATTRS.intersection(attrs_list):
self.cache[dn] = CacheEntry(
entry=entry.copy(),
attrs_list=attrs_list.copy(),
all=get_all,
)
else:
return
self.emit("ADD: %s: %s all=%s", dn, attrs_list, get_all)
self.cache.move_to_end(dn)
if len(self.cache) > self.max_entries:
(dn, entry) = self.cache.popitem(last=False)
self.emit("LRU: removed %s", dn)
def clear_cache(self):
self.cache_status('FINAL')
object.__setattr__(self, 'cache', OrderedDict())
object.__setattr__(self, '_cache_hits', 0)
object.__setattr__(self, '_cache_misses', 0)
def cache_status(self, type):
self.emit("%s: Hits %d Misses %d Size %d",
type, self.hit, self.miss, len(self.cache))
def remove_cache_entry(self, dn):
assert isinstance(dn, DN)
self.emit('DROP: %s', dn)
if dn in self.cache:
del self.cache[dn]
else:
self.emit('DROP: not in cache %s', dn)
# Begin LDAPClient methods
def add_entry(self, entry):
self.emit('add_entry')
self.remove_cache_entry(entry.dn)
super(LDAPCache, self).add_entry(entry)
def update_entry(self, entry):
self.emit('update_entry')
self.remove_cache_entry(entry.dn)
super(LDAPCache, self).update_entry(entry)
def delete_entry(self, entry_or_dn):
self.emit('delete_entry')
if isinstance(entry_or_dn, DN):
dn = entry_or_dn
else:
dn = entry_or_dn.dn
self.remove_cache_entry(dn)
super(LDAPCache, self).delete_entry(dn)
def move_entry(self, dn, new_dn, del_old=True):
self.emit('move_entry')
self.remove_cache_entry(dn)
self.remove_cache_entry(new_dn)
super(LDAPCache, self).move_entry(dn, new_dn, del_old)
def modify_s(self, dn, modlist):
self.emit('modify_s')
if not isinstance(dn, DN):
dn = DN(dn)
self.emit('modlist %s', modlist)
for (_op, attr, mod_dn) in modlist:
if attr.lower() in ('member',
'ipaallowedtoperform_write_keys',
'managedby_host'):
for d in mod_dn:
if not isinstance(d, (DN, RDN)):
d = DN(d.decode('utf-8'))
self.emit('modify_s %s', d)
self.remove_cache_entry(d)
self.emit('modify_s %s', dn)
self.remove_cache_entry(dn)
return super(LDAPCache, self).modify_s(dn, modlist)
def get_entry(self, dn, attrs_list=None, time_limit=None,
size_limit=None, get_effective_rights=False):
if not self._enable_cache:
return super(LDAPCache, self).get_entry(
dn, attrs_list, time_limit, size_limit, get_effective_rights
)
self.emit("Cache lookup: %s", dn)
entry = self.cache.get(dn)
if get_effective_rights and entry:
# We don't cache this so do the query but don't drop the
# entry.
entry = None
if entry and entry.exception:
hits = self._cache_hits + 1 # pylint: disable=no-member
object.__setattr__(self, '_cache_hits', hits)
self.emit("HIT: Re-raising %s", entry.exception)
self.cache_status('HIT')
raise entry.exception
self.emit("Requested attrs_list %s", attrs_list)
if entry:
self.emit("Cached attrs_list %s", entry.attrs_list)
if not attrs_list:
attrs_list = ['*']
elif attrs_list == ['']:
attrs_list = ['dn']
get_all = False
if (
entry
and (attrs_list in (['*'], ['']))
and entry.all
):
get_all = True
if entry and entry.all and get_all:
hits = self._cache_hits + 1 # pylint: disable=no-member
object.__setattr__(self, '_cache_hits', hits)
self.cache_status('HIT')
return self.copy_entry(dn, entry.entry)
# Be sure we have all the requested attributes before returning
# a cached entry.
if entry and attrs_list:
req_attrs = set(attr.lower() for attr in set(attrs_list))
cache_attrs = set(attr.lower() for attr in entry.attrs_list)
if req_attrs.issubset(cache_attrs):
hits = self._cache_hits + 1 # pylint: disable=no-member
object.__setattr__(self, '_cache_hits', hits)
self.cache_status('HIT')
return self.copy_entry(dn, entry.entry, req_attrs)
try:
entry = super(LDAPCache, self).get_entry(
dn, attrs_list, time_limit, size_limit, get_effective_rights
)
except (errors.NotFound, errors.EmptyResult) as e:
# only cache these exceptions
self.add_cache_entry(dn, exception=e)
misses = self._cache_misses + 1 # pylint: disable=no-member
object.__setattr__(self, '_cache_misses', misses)
self.cache_status('MISS: %s' % e)
raise
# pylint: disable=try-except-raise
except Exception:
# re-raise anything we aren't caching
raise
else:
if attrs_list in (['*'], ['']):
get_all = True
self.add_cache_entry(
dn,
attrs_list=set(k.lower() for k in entry._names.keys()),
get_all=get_all,
entry=self.copy_entry(dn, entry),
)
misses = self._cache_misses + 1 # pylint: disable=no-member
object.__setattr__(self, '_cache_misses', misses)
self.cache_status('MISS')
return entry
| 73,826
|
Python
|
.py
| 1,711
| 32.005845
| 109
| 0.578535
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,633
|
dn_ctypes.py
|
freeipa_freeipa/ipapython/dn_ctypes.py
|
#
# Copyright (C) 2019 FreeIPA Contributors see COPYING for license
#
"""ctypes wrapper for libldap_str2dn
"""
from __future__ import absolute_import
import ctypes
import ctypes.util
import six
__all__ = ("str2dn", "dn2str", "DECODING_ERROR", "LDAPError")
# load reentrant ldap client library (libldap_r-*.so.2 or libldap.so.2)
ldap_lib_filename = next(
filter(None, map(ctypes.util.find_library, ["ldap_r-2", "ldap"])), None
)
if ldap_lib_filename is None:
raise ImportError("libldap_r or libldap shared library missing")
try:
lib = ctypes.CDLL(ldap_lib_filename)
except OSError as e:
raise ImportError(str(e))
# constants
LDAP_AVA_FREE_ATTR = 0x0010
LDAP_AVA_FREE_VALUE = 0x0020
LDAP_DECODING_ERROR = -4
# mask for AVA flags
AVA_MASK = ~(LDAP_AVA_FREE_ATTR | LDAP_AVA_FREE_VALUE)
class berval(ctypes.Structure):
__slots__ = ()
_fields_ = [("bv_len", ctypes.c_ulong), ("bv_value", ctypes.c_char_p)]
def __bytes__(self):
buf = ctypes.create_string_buffer(self.bv_value, self.bv_len)
return buf.raw
def __str__(self):
return self.__bytes__().decode("utf-8")
if six.PY2:
__unicode__ = __str__
__str__ = __bytes__
class LDAPAVA(ctypes.Structure):
__slots__ = ()
_fields_ = [
("la_attr", berval),
("la_value", berval),
("la_flags", ctypes.c_uint16),
]
# typedef LDAPAVA** LDAPRDN;
LDAPRDN = ctypes.POINTER(ctypes.POINTER(LDAPAVA))
# typedef LDAPRDN* LDAPDN;
LDAPDN = ctypes.POINTER(LDAPRDN)
def errcheck(result, func, arguments):
if result != 0:
if result == LDAP_DECODING_ERROR:
raise DECODING_ERROR
else:
msg = ldap_err2string(result)
raise LDAPError(msg.decode("utf-8"))
return result
ldap_str2dn = lib.ldap_str2dn
ldap_str2dn.argtypes = (
ctypes.c_char_p,
ctypes.POINTER(LDAPDN),
ctypes.c_uint16,
)
ldap_str2dn.restype = ctypes.c_int16
ldap_str2dn.errcheck = errcheck
ldap_dnfree = lib.ldap_dnfree
ldap_dnfree.argtypes = (LDAPDN,)
ldap_dnfree.restype = None
ldap_err2string = lib.ldap_err2string
ldap_err2string.argtypes = (ctypes.c_int16,)
ldap_err2string.restype = ctypes.c_char_p
class LDAPError(Exception):
pass
class DECODING_ERROR(LDAPError):
pass
# RFC 4514, 2.4
_ESCAPE_CHARS = {'"', "+", ",", ";", "<", ">", "'", "\x00"}
def _escape_dn(dn):
if not dn:
return ""
result = []
# a space or number sign occurring at the beginning of the string
if dn[0] in {"#", " "}:
result.append("\\")
for c in dn:
if c in _ESCAPE_CHARS:
result.append("\\")
result.append(c)
# a space character occurring at the end of the string
if len(dn) > 1 and result[-1] == " ":
# insert before last entry
result.insert(-1, "\\")
return "".join(result)
def dn2str(dn):
return ",".join(
"+".join(
"=".join((attr, _escape_dn(value))) for attr, value, _flag in rdn
)
for rdn in dn
)
def str2dn(dn, flags=0):
if dn is None:
return []
if isinstance(dn, six.text_type):
dn = dn.encode("utf-8")
ldapdn = LDAPDN()
try:
ldap_str2dn(dn, ctypes.byref(ldapdn), flags)
result = []
if not ldapdn:
# empty DN, str2dn("") == []
return result
for rdn in ldapdn:
if not rdn:
break
avas = []
for ava_p in rdn:
if not ava_p:
break
ava = ava_p[0]
avas.append(
(
six.text_type(ava.la_attr),
six.text_type(ava.la_value),
ava.la_flags & AVA_MASK,
)
)
result.append(avas)
return result
finally:
ldap_dnfree(ldapdn)
| 3,905
|
Python
|
.py
| 130
| 23.392308
| 77
| 0.585764
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,634
|
util.py
|
freeipa_freeipa/ipapython/install/util.py
|
#
# Copyright (C) 2015 FreeIPA Contributors see COPYING for license
#
"""
Utilities.
"""
import sys
import six
class from_:
"""
Wrapper for delegating to a subgenerator.
See `run_generator_with_yield_from`.
"""
__slots__ = ('obj',)
def __init__(self, obj):
self.obj = obj
def run_generator_with_yield_from(gen):
"""
Iterate over a generator object with subgenerator delegation.
This implements Python 3's ``yield from`` expressions, using Python 2
syntax:
>>> def subgen():
... yield 'B'
... yield 'C'
...
>>> def gen():
... yield 'A'
... yield from_(subgen())
... yield 'D'
...
>>> list(run_generator_with_yield_from(gen()))
['A', 'B', 'C', 'D']
Returning value from a subgenerator is not supported.
"""
exc_info = None
value = None
stack = [gen]
while stack:
prev_exc_info, exc_info = exc_info, None
prev_value, value = value, None
gen = stack[-1]
try:
if prev_exc_info is None:
value = gen.send(prev_value)
else:
value = gen.throw(*prev_exc_info)
except StopIteration:
stack.pop()
continue
except BaseException:
exc_info = sys.exc_info()
stack.pop()
continue
else:
if isinstance(value, from_):
stack.append(value.obj)
value = None
continue
try:
value = (yield value)
except BaseException:
exc_info = sys.exc_info()
if exc_info is not None:
six.reraise(*exc_info)
class InnerClassMeta(type):
# pylint: disable=no-value-for-parameter
def __new__(cls, name, bases, class_dict):
class_dict.pop('__outer_class__', None)
class_dict.pop('__outer_name__', None)
return super(InnerClassMeta, cls).__new__(cls, name, bases, class_dict)
def __get__(cls, obj, obj_type):
outer_class, outer_name = cls.__bind(obj_type)
if obj is None:
return cls
assert isinstance(obj, outer_class)
try:
return obj.__dict__[outer_name]
except KeyError:
inner = cls(obj)
try:
getter = inner.__get__
except AttributeError:
return inner
else:
return getter(obj, obj_type)
def __set__(cls, obj, value):
outer_class, outer_name = cls.__bind(obj.__class__)
assert isinstance(obj, outer_class)
inner = cls(obj)
try:
setter = inner.__set__
except AttributeError:
try:
inner.__delete__
except AttributeError:
obj.__dict__[outer_name] = value
else:
raise AttributeError('__set__')
else:
setter(obj, value)
def __delete__(cls, obj):
outer_class, outer_name = cls.__bind(obj.__class__)
assert isinstance(obj, outer_class)
inner = cls(obj)
try:
deleter = inner.__delete__
except AttributeError:
try:
inner.__set__
except AttributeError:
try:
del obj.__dict__[outer_name]
except KeyError:
raise AttributeError(outer_name)
else:
raise AttributeError('__delete__')
else:
deleter(obj)
def __bind(cls, obj_type):
try:
outer_class = cls.__dict__['__outer_class__']
name = cls.__dict__['__outer_name__']
except KeyError:
outer_class, name, value = None, None, None
for outer_class in obj_type.__mro__:
for name, value in six.iteritems(outer_class.__dict__):
if value is cls:
break
if value is cls:
break
assert value is cls
cls.__outer_class__ = outer_class
cls.__outer_name__ = name
cls.__name__ = '.'.join((outer_class.__name__, name))
cls.__qualname__ = cls.__name__
return outer_class, name
| 4,302
|
Python
|
.py
| 136
| 21.566176
| 79
| 0.509548
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,635
|
cli.py
|
freeipa_freeipa/ipapython/install/cli.py
|
#
# Copyright (C) 2015 FreeIPA Contributors see COPYING for license
#
"""
Command line support.
"""
import collections
import enum
import logging
import optparse # pylint: disable=deprecated-module
import signal
import six
from ipapython import admintool
from ipapython.ipa_log_manager import standard_logging_setup
from ipapython.ipautil import (CheckedIPAddress, CheckedIPAddressLoopback,
private_ccache)
from . import core, common
__all__ = ['install_tool', 'uninstall_tool']
if six.PY3:
long = int
NoneType = type(None)
logger = logging.getLogger(__name__)
def _get_usage(configurable_class):
usage = '%prog [options]'
for owner_cls, name in configurable_class.knobs():
knob_cls = getattr(owner_cls, name)
if knob_cls.is_cli_positional():
if knob_cls.cli_metavar is not None:
metavar = knob_cls.cli_metavar
elif knob_cls.cli_names:
metavar = knob_cls.cli_names[0].upper()
else:
metavar = name.replace('_', '-').upper()
try:
knob_cls.default
except AttributeError:
fmt = ' {}'
else:
fmt = ' [{}]'
usage += fmt.format(metavar)
return usage
def install_tool(configurable_class, command_name, log_file_name,
debug_option=False, verbose=False, console_format=None,
use_private_ccache=True, uninstall_log_file_name=None):
"""
Some commands represent multiple related tools, e.g.
``ipa-server-install`` and ``ipa-server-install --uninstall`` would be
represented by separate classes. Only their options are the same.
:param configurable_class: the command class for options
:param command_name: the command name shown in logs/output
:param log_file_name: if None, logging is to stderr only
:param debug_option: log level is DEBUG
:param verbose: log level is INFO
:param console_format: logging format for stderr
:param use_private_ccache: a temporary ccache is created and used
:param uninstall_log_file_name: if not None the log for uninstall
"""
if uninstall_log_file_name is not None:
uninstall_kwargs = dict(
configurable_class=configurable_class,
command_name=command_name,
log_file_name=uninstall_log_file_name,
debug_option=debug_option,
verbose=verbose,
console_format=console_format,
)
else:
uninstall_kwargs = None
return type(
'install_tool({0})'.format(configurable_class.__name__),
(InstallTool,),
dict(
configurable_class=configurable_class,
command_name=command_name,
log_file_name=log_file_name,
usage=_get_usage(configurable_class),
debug_option=debug_option,
verbose=verbose,
console_format=console_format,
uninstall_kwargs=uninstall_kwargs,
use_private_ccache=use_private_ccache,
)
)
def uninstall_tool(configurable_class, command_name, log_file_name,
debug_option=False, verbose=False, console_format=None):
return type(
'uninstall_tool({0})'.format(configurable_class.__name__),
(UninstallTool,),
dict(
configurable_class=configurable_class,
command_name=command_name,
log_file_name=log_file_name,
usage=_get_usage(configurable_class),
debug_option=debug_option,
verbose=verbose,
console_format=console_format,
)
)
class ConfigureTool(admintool.AdminTool):
configurable_class = None
debug_option = False
verbose = False
console_format = None
use_private_ccache = True
@staticmethod
def _transform(configurable_class):
raise NotImplementedError
@classmethod
def add_options( # pylint: disable=arguments-renamed
cls, parser, positional=False
):
transformed_cls = cls._transform(cls.configurable_class)
if issubclass(transformed_cls, common.Interactive):
parser.add_option(
'-U', '--unattended',
dest='unattended',
default=False,
action='store_true',
help="unattended (un)installation never prompts the user",
)
groups = collections.OrderedDict()
# if no group is defined, add the option to the parser top level
groups[None] = parser
for owner_cls, name in transformed_cls.knobs():
knob_cls = getattr(owner_cls, name)
if knob_cls.is_cli_positional() is not positional:
continue
group_cls = knob_cls.group()
try:
opt_group = groups[group_cls]
except KeyError:
opt_group = groups[group_cls] = optparse.OptionGroup(
parser, "{0} options".format(group_cls.description))
parser.add_option_group(opt_group)
knob_type = knob_cls.type
if issubclass(knob_type, list):
try:
# typing.List[X].__parameters__ == (X,)
knob_scalar_type = knob_type.__parameters__[0]
except AttributeError:
knob_scalar_type = str
else:
knob_scalar_type = knob_type
kwargs = dict()
if knob_scalar_type is NoneType:
kwargs['type'] = None
kwargs['const'] = True
kwargs['default'] = False
elif knob_scalar_type is str:
kwargs['type'] = 'string'
elif knob_scalar_type is int:
kwargs['type'] = 'int'
elif knob_scalar_type is long:
kwargs['type'] = 'long'
elif knob_scalar_type is CheckedIPAddressLoopback:
kwargs['type'] = 'ip_with_loopback'
elif knob_scalar_type is CheckedIPAddress:
kwargs['type'] = 'ip'
elif issubclass(knob_scalar_type, enum.Enum):
kwargs['type'] = 'choice'
kwargs['choices'] = [i.value for i in knob_scalar_type]
kwargs['metavar'] = "{{{0}}}".format(
",".join(kwargs['choices']))
else:
kwargs['type'] = 'constructor'
kwargs['constructor'] = knob_scalar_type
kwargs['dest'] = name
if issubclass(knob_type, list):
if kwargs['type'] is None:
kwargs['action'] = 'append_const'
else:
kwargs['action'] = 'append'
else:
if kwargs['type'] is None:
kwargs['action'] = 'store_const'
else:
kwargs['action'] = 'store'
if knob_cls.sensitive:
kwargs['sensitive'] = True
if knob_cls.cli_metavar:
kwargs['metavar'] = knob_cls.cli_metavar
if not positional:
cli_info = (
(knob_cls.deprecated, knob_cls.cli_names),
(True, knob_cls.cli_deprecated_names),
)
else:
cli_info = (
(knob_cls.deprecated, (None,)),
)
for hidden, cli_names in cli_info:
opt_strs = []
for cli_name in cli_names:
if cli_name is None:
cli_name = '--{}'.format(name.replace('_', '-'))
opt_strs.append(cli_name)
if not opt_strs:
continue
if not hidden:
help = knob_cls.description
else:
help = optparse.SUPPRESS_HELP
opt_group.add_option(
*opt_strs,
help=help,
**kwargs
)
super(ConfigureTool, cls).add_options(parser,
debug_option=cls.debug_option)
def __init__(self, options, args):
super(ConfigureTool, self).__init__(options, args)
self.transformed_cls = self._transform(self.configurable_class)
self.positional_arguments = []
for owner_cls, name in self.transformed_cls.knobs():
knob_cls = getattr(owner_cls, name)
if knob_cls.is_cli_positional():
self.positional_arguments.append(name)
# fake option parser to parse positional arguments
# (because optparse does not support positional argument parsing)
fake_option_parser = optparse.OptionParser()
self.add_options(fake_option_parser, True)
fake_option_map = {option.dest: option
for group in fake_option_parser.option_groups
for option in group.option_list}
for index, name in enumerate(self.positional_arguments):
try:
value = self.args.pop(0)
except IndexError:
break
fake_option = fake_option_map[name]
fake_option.process('argument {}'.format(index + 1),
value,
self.options,
self.option_parser)
def validate_options(self, needs_root=True):
super(ConfigureTool, self).validate_options(needs_root=needs_root)
if self.args:
self.option_parser.error("Too many arguments provided")
def _setup_logging(self, log_file_mode='w', no_file=False):
if no_file:
log_file_name = None
elif self.options.log_file:
log_file_name = self.options.log_file
else:
log_file_name = self.log_file_name
standard_logging_setup(
log_file_name,
verbose=self.verbose,
debug=self.options.verbose,
console_format=self.console_format)
if log_file_name:
logger.debug('Logging to %s', log_file_name)
elif not no_file:
logger.debug('Not logging to a file')
def init_configurator(self):
"""Executes transformation, getting a flattened Installer object
:returns: common.installer.Installer object
"""
kwargs = {}
transformed_cls = self._transform(self.configurable_class)
knob_classes = {n: getattr(c, n) for c, n in transformed_cls.knobs()}
for name in knob_classes:
value = getattr(self.options, name, None)
if value is not None:
kwargs[name] = value
if (issubclass(self.configurable_class, common.Interactive) and
not self.options.unattended):
kwargs['interactive'] = True
try:
return transformed_cls(**kwargs)
except core.KnobValueError as e:
knob_cls = knob_classes[e.name]
try:
index = self.positional_arguments.index(e.name)
except ValueError:
cli_name = knob_cls.cli_names[0] or e.name.replace('_', '-')
desc = "option {0}".format(cli_name)
else:
desc = "argument {0}".format(index + 1)
self.option_parser.error("{0}: {1}".format(desc, e))
except RuntimeError as e:
self.option_parser.error(str(e))
return None
def run(self):
cfgr = self.init_configurator()
signal.signal(signal.SIGTERM, self.__signal_handler)
if self.use_private_ccache:
with private_ccache():
super(ConfigureTool, self).run()
return cfgr.run()
else:
super(ConfigureTool, self).run()
return cfgr.run()
@staticmethod
def __signal_handler(signum, frame):
raise KeyboardInterrupt
class InstallTool(ConfigureTool):
uninstall_kwargs = None
_transform = staticmethod(common.installer)
@classmethod
def add_options(cls, parser, positional=False):
super(InstallTool, cls).add_options(parser, positional)
if cls.uninstall_kwargs is not None:
parser.add_option(
'--uninstall',
dest='uninstall',
default=False,
action='store_true',
help=("uninstall an existing installation. The uninstall can "
"be run with --unattended option"),
)
@classmethod
def get_command_class(cls, options, args):
if cls.uninstall_kwargs is not None and options.uninstall:
uninstall_cls = uninstall_tool(**cls.uninstall_kwargs)
uninstall_cls.option_parser = cls.option_parser
return uninstall_cls
else:
return super(InstallTool, cls).get_command_class(options, args)
class UninstallTool(ConfigureTool):
_transform = staticmethod(common.uninstaller)
| 13,174
|
Python
|
.py
| 321
| 28.797508
| 78
| 0.56466
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,636
|
__init__.py
|
freeipa_freeipa/ipapython/install/__init__.py
|
#
# Copyright (C) 2015 FreeIPA Contributors see COPYING for license
#
"""
Installer framework.
"""
| 101
|
Python
|
.py
| 6
| 15.666667
| 66
| 0.755319
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,637
|
core.py
|
freeipa_freeipa/ipapython/install/core.py
|
#
# Copyright (C) 2015 FreeIPA Contributors see COPYING for license
#
"""
The framework core.
"""
import abc
import collections
import functools
import itertools
import sys
import six
from . import util
from .util import from_
__all__ = ['InvalidStateError', 'KnobValueError', 'Property', 'knob',
'Configurable', 'group', 'Component', 'Composite']
NoneType = type(None)
builtin_type = type
# Configurable states
_VALIDATE_PENDING = 'VALIDATE_PENDING'
_VALIDATE_RUNNING = 'VALIDATE_RUNNING'
_EXECUTE_PENDING = 'EXECUTE_PENDING'
_EXECUTE_RUNNING = 'EXECUTE_RUNNING'
_STOPPED = 'STOPPED'
_FAILED = 'FAILED'
_CLOSED = 'CLOSED'
_missing = object()
_counter = itertools.count()
@functools.cmp_to_key
def _class_key(a, b):
if a is b:
return 0
elif issubclass(a, b):
return -1
elif issubclass(b, a):
return 1
else:
return 0
class InvalidStateError(Exception):
pass
class KnobValueError(ValueError):
def __init__(self, name, message):
super(KnobValueError, self).__init__(message)
self.name = name
class PropertyBase(metaclass=util.InnerClassMeta):
# shut up pylint
__outer_class__ = None
__outer_name__ = None
_order = None
@property
def default(self):
raise AttributeError('default')
def __init__(self, outer):
pass
def __get__(self, obj, obj_type):
while obj is not None:
try:
return obj.__dict__[self.__outer_name__]
except KeyError:
pass
obj = obj._get_fallback()
try:
return self.default
except AttributeError:
raise AttributeError(self.__outer_name__)
def __set__(self, obj, value):
try:
obj.__dict__[self.__outer_name__] = value
except KeyError:
raise AttributeError(self.__outer_name__)
def __delete__(self, obj):
try:
del obj.__dict__[self.__outer_name__]
except KeyError:
raise AttributeError(self.__outer_name__)
def Property(default=_missing):
class_dict = {}
if default is not _missing:
class_dict['default'] = default
return util.InnerClassMeta('Property', (PropertyBase,), class_dict)
class KnobBase(PropertyBase):
type = None
sensitive = False
deprecated = False
description = None
cli_names = (None,)
cli_deprecated_names = ()
cli_metavar = None
def __init__(self, outer):
self.outer = outer
def validate(self, value):
pass
@classmethod
def group(cls):
return cls.__outer_class__.group()
@classmethod
def is_cli_positional(cls):
return all(n is not None and not n.startswith('-')
for n in cls.cli_names)
@classmethod
def default_getter(cls, func):
@property
def default(self):
return func(self.outer)
cls.default = default
return cls
@classmethod
def validator(cls, func):
def validate(self, value):
func(self.outer, value)
super(cls, self).validate(value)
cls.validate = validate
return cls
def _knob(type=_missing, default=_missing, bases=_missing, _order=_missing,
sensitive=_missing, deprecated=_missing, description=_missing,
group=_missing, cli_names=_missing, cli_deprecated_names=_missing,
cli_metavar=_missing):
if type is None:
type = NoneType
if bases is _missing:
bases = (KnobBase,)
elif isinstance(bases, builtin_type):
bases = (bases,)
if cli_names is None or isinstance(cli_names, str):
cli_names = (cli_names,)
elif cli_names is not _missing:
cli_names = tuple(cli_names)
if isinstance(cli_deprecated_names, str):
cli_deprecated_names = (cli_deprecated_names,)
elif cli_deprecated_names is not _missing:
cli_deprecated_names = tuple(cli_deprecated_names)
class_dict = {}
if type is not _missing:
class_dict['type'] = type
if default is not _missing:
class_dict['default'] = default
if _order is not _missing:
class_dict['_order'] = _order
if sensitive is not _missing:
class_dict['sensitive'] = sensitive
if deprecated is not _missing:
class_dict['deprecated'] = deprecated
if description is not _missing:
class_dict['description'] = description
if group is not _missing:
class_dict['group'] = group
if cli_names is not _missing:
class_dict['cli_names'] = cli_names
if cli_deprecated_names is not _missing:
class_dict['cli_deprecated_names'] = cli_deprecated_names
if cli_metavar is not _missing:
class_dict['cli_metavar'] = cli_metavar
return util.InnerClassMeta('Knob', bases, class_dict)
def knob(type, default=_missing, **kwargs):
"""
Define a new knob.
"""
return _knob(
type, default,
_order=next(_counter),
**kwargs
)
def extend_knob(base, default=_missing, bases=_missing, group=_missing,
**kwargs):
"""
Extend an existing knob.
"""
if bases is _missing:
bases = (base,)
if group is _missing:
group = staticmethod(base.group)
return _knob(
_missing, default,
bases=bases,
_order=_missing,
group=group,
**kwargs
)
class Configurable(metaclass=abc.ABCMeta):
"""
Base class of all configurables.
FIXME: details of validate/execute, properties and knobs
"""
@classmethod
def properties(cls):
"""
Iterate over properties defined for the configurable.
"""
assert not hasattr(super(Configurable, cls), 'properties')
seen = set()
for owner_cls in cls.__mro__:
result = []
for name, prop_cls in owner_cls.__dict__.items():
if name in seen:
continue
seen.add(name)
if not isinstance(prop_cls, type):
continue
if not issubclass(prop_cls, PropertyBase):
continue
result.append((prop_cls._order, owner_cls, name))
result = sorted(result, key=lambda r: r[0])
for _order, owner_cls, name in result:
yield owner_cls, name
@classmethod
def knobs(cls):
for owner_cls, name in cls.properties():
prop_cls = getattr(owner_cls, name)
if issubclass(prop_cls, KnobBase):
yield owner_cls, name
@classmethod
def group(cls):
assert not hasattr(super(Configurable, cls), 'group')
def __init__(self, **kwargs):
"""
Initialize the configurable.
"""
cls = self.__class__
for owner_cls, name in cls.properties():
if name.startswith('_'):
continue
prop_cls = getattr(owner_cls, name)
if not isinstance(prop_cls, type):
continue
if not issubclass(prop_cls, PropertyBase):
continue
try:
value = kwargs.pop(name)
except KeyError:
pass
else:
setattr(self, name, value)
for owner_cls, name in cls.knobs():
if name.startswith('_'):
continue
if not isinstance(self, owner_cls):
continue
value = getattr(self, name, None)
if value is None:
continue
prop_cls = getattr(owner_cls, name)
prop = prop_cls(self)
try:
prop.validate(value)
except ValueError as e:
raise KnobValueError(name, str(e))
if kwargs:
extra = sorted(kwargs)
raise TypeError(
"{0}() got {1} unexpected keyword arguments: {2}".format(
type(self).__name__,
len(extra),
', '.join(repr(name) for name in extra)))
self._reset()
def _reset(self):
assert not hasattr(super(Configurable, self), '_reset')
self.__state = _VALIDATE_PENDING
self.__gen = util.run_generator_with_yield_from(self._configure())
def _get_components(self):
assert not hasattr(super(Configurable, self), '_get_components')
raise TypeError("{0} is not composite".format(self))
def _get_fallback(self):
pass
@abc.abstractmethod
def _configure(self):
"""
Coroutine which defines the logic of the configurable.
"""
assert not hasattr(super(Configurable, self), '_configure')
self.__transition(_VALIDATE_RUNNING, _EXECUTE_PENDING)
while self.__state != _EXECUTE_RUNNING:
yield
def run(self):
"""
Run the configurable.
"""
self.validate()
if self.__state == _EXECUTE_PENDING:
return self.execute()
return None
def validate(self):
"""
Run the validation part of the configurable.
"""
for _nothing in self._validator():
pass
def _validator(self):
"""
Coroutine which runs the validation part of the configurable.
"""
return self.__runner(_VALIDATE_PENDING,
_VALIDATE_RUNNING,
self._handle_validate_exception)
def execute(self):
"""
Run the execution part of the configurable.
"""
return_value = 0
for rval in self._executor():
if rval is not None and rval > return_value:
return_value = rval
return return_value
def _executor(self):
"""
Coroutine which runs the execution part of the configurable.
"""
return self.__runner(_EXECUTE_PENDING,
_EXECUTE_RUNNING,
self._handle_execute_exception)
def done(self):
"""
Return True if the configurable has finished.
"""
return self.__state in (_STOPPED, _FAILED, _CLOSED)
def run_until_executing(self, gen):
while self.__state != _EXECUTE_RUNNING:
try:
yield next(gen)
except StopIteration:
break
def __runner(self, pending_state, running_state, exc_handler):
self.__transition(pending_state, running_state)
def step_next():
return next(self.__gen)
step = step_next
while True:
try:
step()
except StopIteration:
self.__transition(running_state, _STOPPED)
break
except GeneratorExit:
self.__transition(running_state, _CLOSED)
break
except BaseException:
exc_info = sys.exc_info()
try:
exc_handler(exc_info)
except BaseException:
self.__transition(running_state, _FAILED)
raise
if self.__state != running_state:
break
try:
yield
except BaseException:
exc_info = sys.exc_info()
def step_throw():
return self.__gen.throw(*exc_info)
step = step_throw
else:
step = step_next
def _handle_exception(self, exc_info):
assert not hasattr(super(Configurable, self), '_handle_exception')
six.reraise(*exc_info)
def _handle_validate_exception(self, exc_info):
assert not hasattr(super(Configurable, self),
'_handle_validate_exception')
self._handle_exception(exc_info)
def _handle_execute_exception(self, exc_info):
assert not hasattr(super(Configurable, self),
'_handle_execute_exception')
self._handle_exception(exc_info)
def __transition(self, from_state, to_state):
if self.__state != from_state:
raise InvalidStateError(self.__state)
self.__state = to_state
def group(cls):
def group():
return cls
cls.group = staticmethod(group)
return cls
class ComponentMeta(util.InnerClassMeta, abc.ABCMeta):
pass
class ComponentBase(Configurable, metaclass=ComponentMeta):
# shut up pylint
__outer_class__ = None
__outer_name__ = None
_order = None
@classmethod
def group(cls):
result = super(ComponentBase, cls).group()
if result is not None:
return result
else:
return cls.__outer_class__.group()
def __init__(self, parent, **kwargs):
self.__parent = parent
super(ComponentBase, self).__init__(**kwargs)
@property
def parent(self):
return self.__parent
def __get__(self, obj, obj_type):
obj.__dict__[self.__outer_name__] = self
return self
def _get_fallback(self):
return self.__parent
def _handle_exception(self, exc_info):
try:
super(ComponentBase, self)._handle_exception(exc_info)
except BaseException:
exc_info = sys.exc_info()
self.__parent._handle_exception(exc_info)
def Component(cls):
class_dict = {}
class_dict['_order'] = next(_counter)
return ComponentMeta('Component', (ComponentBase, cls), class_dict)
class Composite(Configurable):
"""
Configurable composed of any number of components.
Provides knobs of all child components.
"""
@classmethod
def properties(cls):
name_dict = {}
owner_dict = collections.OrderedDict()
for owner_cls, name in super(Composite, cls).properties():
name_dict[name] = owner_cls
owner_dict.setdefault(owner_cls, []).append(name)
for owner_cls, name in cls.components():
comp_cls = getattr(cls, name)
for owner_cls, name in comp_cls.knobs():
if hasattr(cls, name):
continue
try:
last_owner_cls = name_dict[name]
except KeyError:
name_dict[name] = owner_cls
owner_dict.setdefault(owner_cls, []).append(name)
else:
knob_cls = getattr(owner_cls, name)
last_knob_cls = getattr(last_owner_cls, name)
if issubclass(knob_cls, last_knob_cls):
name_dict[name] = owner_cls
owner_dict[last_owner_cls].remove(name)
owner_dict.setdefault(owner_cls, [])
if name not in owner_dict[owner_cls]:
owner_dict[owner_cls].append(name)
elif not issubclass(last_knob_cls, knob_cls):
raise TypeError("{0}.knobs(): conflicting definitions "
"of '{1}' in {2} and {3}".format(
cls.__name__,
name,
last_owner_cls.__name__,
owner_cls.__name__))
for owner_cls in sorted(owner_dict, key=_class_key):
for name in owner_dict[owner_cls]:
yield owner_cls, name
@classmethod
def components(cls):
assert not hasattr(super(Composite, cls), 'components')
seen = set()
for owner_cls in cls.__mro__:
result = []
for name, comp_cls in owner_cls.__dict__.items():
if name in seen:
continue
seen.add(name)
if not isinstance(comp_cls, type):
continue
if not issubclass(comp_cls, ComponentBase):
continue
result.append((comp_cls._order, owner_cls, name))
result = sorted(result, key=lambda r: r[0])
for _order, owner_cls, name in result:
yield owner_cls, name
def __getattr__(self, name):
for owner_cls, knob_name in self.knobs():
if knob_name == name:
break
else:
raise AttributeError(name)
for component in self.__components:
if isinstance(component, owner_cls):
break
else:
raise AttributeError(name)
return getattr(component, name)
def _reset(self):
self.__components = list(self._get_components())
super(Composite, self)._reset()
def _get_components(self):
for _owner_cls, name in self.components():
yield getattr(self, name)
def _configure(self):
validate = [(c, c._validator()) for c in self.__components]
while True:
new_validate = []
for child, validator in validate:
try:
next(validator)
except StopIteration:
pass
else:
new_validate.append((child, validator))
if not new_validate:
break
validate = new_validate
yield
if not self.__components:
return
yield from_(super(Composite, self)._configure())
execute = [(c, c._executor()) for c in self.__components
if not c.done()]
while True:
new_execute = []
for child, executor in execute:
try:
next(executor)
except StopIteration:
pass
else:
new_execute.append((child, executor))
if not new_execute:
break
execute = new_execute
yield
| 18,119
|
Python
|
.py
| 517
| 24.388781
| 79
| 0.552359
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,638
|
common.py
|
freeipa_freeipa/ipapython/install/common.py
|
#
# Copyright (C) 2015 FreeIPA Contributors see COPYING for license
#
"""
Common stuff.
"""
import logging
from . import core
from .util import from_
__all__ = ['step', 'Installable', 'Interactive', 'installer',
'uninstaller']
logger = logging.getLogger(__name__)
def step():
def decorator(func):
cls = core.Component(Step)
cls._installer = staticmethod(func)
return cls
return decorator
class Installable(core.Configurable):
"""
Configurable which does install or uninstall.
"""
uninstalling = core.Property(False)
def _get_components(self):
components = super(Installable, self)._get_components()
if self.uninstalling: # pylint: disable=using-constant-test
components = reversed(list(components))
return components
def _configure(self):
if self.uninstalling: # pylint: disable=using-constant-test
return self._uninstall()
else:
return self._install()
def _install(self):
assert not hasattr(super(Installable, self), '_install')
return super(Installable, self)._configure()
def _uninstall(self):
assert not hasattr(super(Installable, self), '_uninstall')
return super(Installable, self)._configure()
class Step(Installable):
@property
def parent(self):
raise AttributeError('parent')
def _install(self):
for unused in self._installer(self.parent):
yield from_(super(Step, self)._install())
@staticmethod
def _installer(obj):
yield
def _uninstall(self):
for unused in self._uninstaller(self.parent):
yield from_(super(Step, self)._uninstall())
@staticmethod
def _uninstaller(obj):
yield
@classmethod
def uninstaller(cls, func):
cls._uninstaller = staticmethod(func)
return cls
class Interactive(core.Configurable):
interactive = core.Property(False)
def installer(cls):
class Installer(cls, Installable):
def __init__(self, **kwargs):
super(Installer, self).__init__(uninstalling=False,
**kwargs)
Installer.__name__ = 'installer({0})'.format(cls.__name__)
return Installer
def uninstaller(cls):
class Uninstaller(cls, Installable):
def __init__(self, **kwargs):
super(Uninstaller, self).__init__(uninstalling=True,
**kwargs)
Uninstaller.__name__ = 'uninstaller({0})'.format(cls.__name__)
return Uninstaller
| 2,603
|
Python
|
.py
| 75
| 27.133333
| 68
| 0.63141
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,639
|
typing.py
|
freeipa_freeipa/ipapython/install/typing.py
|
#
# Copyright (C) 2016 FreeIPA Contributors see COPYING for license
#
import weakref
_cache = weakref.WeakValueDictionary()
class ListMeta(type):
def __getitem__(cls, key):
if not isinstance(key, type):
raise TypeError("Parameters to generic types must be types. "
"Got {!r}.".format(key))
t = ListMeta(
cls.__name__,
cls.__bases__,
{
'__parameters__': (key,),
'__init__': cls.__init__,
}
)
return _cache.get(key, t)
class List(list, metaclass=ListMeta):
__parameters__ = ()
def __init__(self, *_args, **_kwargs):
raise TypeError("Type List cannot be instantiated; use list() instead")
| 769
|
Python
|
.py
| 23
| 24.521739
| 79
| 0.539402
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,640
|
test_placeholder.py
|
freeipa_freeipa/pypi/test_placeholder.py
|
# Copyright (C) 2017 FreeIPA Contributors see COPYING for license
import importlib
import pkg_resources
import pytest
@pytest.mark.parametrize("modname", [
# placeholder packages raise ImportError
'ipaserver',
'ipatests',
# PyPI packages do not have install subpackage
'ipaclient.install',
'ipalib.install',
'ipapython.install',
# override module should not be shipped in wheels
'ipaplatform.override',
])
def test_fail_import(modname):
try:
importlib.import_module(modname)
except ImportError:
pass
else:
pytest.fail("'import {}' does not fail".format(modname))
@pytest.mark.parametrize("modname", [
'ipaclient',
'ipalib',
'ipaplatform',
'ipapython',
])
def test_import(modname):
importlib.import_module(modname)
@pytest.mark.parametrize("pkgname", [
'ipaclient',
'ipalib',
'ipaplatform',
'ipapython',
'ipaserver',
'ipatests',
])
def test_package_installed(pkgname):
pkg_resources.require(pkgname)
| 1,028
|
Python
|
.py
| 40
| 21.5
| 66
| 0.70102
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,641
|
setup.py
|
freeipa_freeipa/pypi/ipa/setup.py
|
#
# Copyright (C) 2017 FreeIPA Contributors see COPYING for license
#
"""Dummy package for FreeIPA
Please install ipaclient instead.
"""
from os.path import abspath, dirname
import sys
if __name__ == '__main__':
# include ../../ for ipasetup.py
sys.path.append(dirname(dirname(dirname(abspath(__file__)))))
from ipasetup import ipasetup # noqa: E402
ipasetup(
name='ipa',
doc = __doc__,
install_requires=[
"ipaclient",
]
)
| 492
|
Python
|
.py
| 19
| 21.315789
| 65
| 0.633262
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,642
|
setup.py
|
freeipa_freeipa/pypi/ipatests/setup.py
|
#
# Copyright (C) 2017 FreeIPA Contributors see COPYING for license
#
"""Dummy package for FreeIPA
ipatests is not yet available as PyPI package.
"""
from os.path import abspath, dirname
import sys
if __name__ == '__main__':
# include ../../ for ipasetup.py
sys.path.append(dirname(dirname(dirname(abspath(__file__)))))
from ipasetup import ipasetup # noqa: E402
ipasetup(
name='ipatests',
doc = __doc__,
packages=[
"ipatests",
],
install_requires=[
"ipaclient",
]
)
| 564
|
Python
|
.py
| 22
| 20.272727
| 65
| 0.607807
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,643
|
__init__.py
|
freeipa_freeipa/pypi/ipatests/ipatests/__init__.py
|
#
# Copyright (C) 2017 FreeIPA Contributors see COPYING for license
#
raise ImportError("ipatests is not yet supported as PyPI package.")
| 139
|
Python
|
.py
| 4
| 33.5
| 67
| 0.798507
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,644
|
setup.py
|
freeipa_freeipa/pypi/ipaserver/setup.py
|
#
# Copyright (C) 2017 FreeIPA Contributors see COPYING for license
#
"""Dummy package for FreeIPA
ipatests is not yet available as PyPI package.
"""
from os.path import abspath, dirname
import sys
if __name__ == '__main__':
# include ../../ for ipasetup.py
sys.path.append(dirname(dirname(dirname(abspath(__file__)))))
from ipasetup import ipasetup # noqa: E402
ipasetup(
name='ipaserver',
doc = __doc__,
packages=[
"ipaserver",
],
install_requires=[
"ipaclient",
]
)
| 566
|
Python
|
.py
| 22
| 20.363636
| 65
| 0.609259
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,645
|
__init__.py
|
freeipa_freeipa/pypi/ipaserver/ipaserver/__init__.py
|
#
# Copyright (C) 2017 FreeIPA Contributors see COPYING for license
#
raise ImportError("ipaserver is not yet supported as PyPI package.")
| 140
|
Python
|
.py
| 4
| 33.75
| 68
| 0.8
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,646
|
setup.py
|
freeipa_freeipa/pypi/freeipa/setup.py
|
#
# Copyright (C) 2017 FreeIPA Contributors see COPYING for license
#
"""Dummy package for FreeIPA
Please install ipaclient instead.
"""
from os.path import abspath, dirname
import sys
if __name__ == '__main__':
# include ../../ for ipasetup.py
sys.path.append(dirname(dirname(dirname(abspath(__file__)))))
from ipasetup import ipasetup # noqa: E402
ipasetup(
name='freeipa',
doc = __doc__,
install_requires=[
"ipaclient",
]
)
| 496
|
Python
|
.py
| 19
| 21.526316
| 65
| 0.636364
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,647
|
messages.py
|
freeipa_freeipa/ipalib/messages.py
|
# Authors:
# Petr Viktorin <pviktori@redhat.com>
#
# Copyright (C) 2012 Red Hat
# see file 'COPYING' for use and warranty inmsgion
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Custom message (debug, info, warning) classes passed through RPC.
These are added to the "messages" entry in a RPC response, and printed to the
user as log messages.
Each message class has a unique numeric "errno" attribute from the 10000-10999
range, so that it does not clash with PublicError numbers.
Messages also have the 'type' argument, set to one of 'debug', 'info',
'warning', 'error'. This determines the severity of the message.
"""
from __future__ import print_function
from inspect import isclass
import six
from ipalib.constants import TYPE_ERROR
from ipalib.text import _ as ugettext
from ipalib.text import Gettext, NGettext
from ipalib.capabilities import client_has_capability
if six.PY3:
unicode = str
def add_message(version, result, message):
if client_has_capability(version, 'messages'):
result.setdefault('messages', []).append(message.to_dict())
def process_message_arguments(obj, format=None, message=None, **kw):
for key, value in kw.items():
if not isinstance(value, int):
try:
kw[key] = unicode(value)
except UnicodeError:
pass
obj.kw = kw
name = obj.__class__.__name__
if obj.format is not None and format is not None:
raise ValueError(
'non-generic %r needs format=None; got format=%r' % (
name, format)
)
if message is None:
if obj.format is None:
if format is None:
raise ValueError(
'%s.format is None yet format=None, message=None' % name
)
obj.format = format
obj.forwarded = False
obj.msg = obj.format % kw
if isinstance(obj.format, str):
obj.strerror = ugettext(obj.format) % kw
else:
obj.strerror = obj.format % kw
if 'instructions' in kw:
def convert_instructions(value):
if isinstance(value, list):
result = u'\n'.join(unicode(line) for line in value)
return result
return value
instructions = u'\n'.join((unicode(_('Additional instructions:')),
convert_instructions(kw['instructions'])))
obj.strerror = u'\n'.join((obj.strerror, instructions))
else:
if isinstance(message, (Gettext, NGettext)):
message = unicode(message)
elif type(message) is not unicode:
raise TypeError(
TYPE_ERROR % ('message', unicode, message, type(message))
)
obj.forwarded = True
obj.msg = message
obj.strerror = message
for (key, value) in kw.items():
assert not hasattr(obj, key), 'conflicting kwarg %s.%s = %r' % (
name, key, value,
)
setattr(obj, key, value)
_texts = []
def _(message):
_texts.append(message)
return message
class PublicMessage(UserWarning):
"""
**10000** Base class for messages that can be forwarded in an RPC response.
"""
def __init__(self, format=None, message=None, **kw):
process_message_arguments(self, format, message, **kw)
super(PublicMessage, self).__init__(self.msg)
errno = 10000
format = None
def to_dict(self):
"""Export this message to a dict that can be sent through RPC"""
return dict(
type=unicode(self.type),
name=unicode(type(self).__name__),
message=self.strerror,
code=self.errno,
data=self.kw,
)
class VersionMissing(PublicMessage):
"""
**13001** Used when client did not send the API version.
For example:
>>> VersionMissing(server_version='2.123').strerror
u"API Version number was not sent, forward compatibility not guaranteed. Assuming server's API version, 2.123"
"""
errno = 13001
type = 'warning'
format = _("API Version number was not sent, forward compatibility not "
"guaranteed. Assuming server's API version, %(server_version)s")
class ForwardersWarning(PublicMessage):
"""
**13002** Used when (master) zone contains forwarders
"""
errno = 13002
type = 'warning'
format = _(
u"DNS forwarder semantics changed since IPA 4.0.\n"
u"You may want to use forward zones (dnsforwardzone-*) instead.\n"
u"For more details read the docs.")
class DNSSECWarning(PublicMessage):
"""
**13003** Used when user change DNSSEC settings
"""
errno = 13003
type = "warning"
format = _("DNSSEC support is experimental.\n%(additional_info)s")
class OptionDeprecatedWarning(PublicMessage):
"""
**13004** Used when user uses a deprecated option
"""
errno = 13004
type = "warning"
format = _(u"'%(option)s' option is deprecated. %(additional_info)s")
class OptionSemanticChangedWarning(PublicMessage):
"""
**13005** Used when option which recently changes its semantic is used
"""
errno = 13005
type = "warning"
format = _(u"Semantic of %(label)s was changed. %(current_behavior)s\n"
u"%(hint)s")
class DNSServerValidationWarning(PublicMessage):
"""
**13006** Used when a DNS server is not to able to resolve query
"""
errno = 13006
type = "warning"
format = _(u"DNS server %(server)s: %(error)s.")
class DNSServerDoesNotSupportDNSSECWarning(PublicMessage):
"""
**13007** Used when a DNS server does not support DNSSEC validation
"""
errno = 13007
type = "warning"
format = _(u"DNS server %(server)s does not support DNSSEC: %(error)s.\n"
u"If DNSSEC validation is enabled on IPA server(s), "
u"please disable it.")
class ForwardzoneIsNotEffectiveWarning(PublicMessage):
"""
**13008** Forwardzone is not effective, forwarding will not work because
there is authoritative parent zone, without proper NS delegation
"""
errno = 13008
type = "warning"
format = _(u"forward zone \"%(fwzone)s\" is not effective because of "
u"missing proper NS delegation in authoritative zone "
u"\"%(authzone)s\". Please add NS record "
u"\"%(ns_rec)s\" to parent zone \"%(authzone)s\".")
class DNSServerDoesNotSupportEDNS0Warning(PublicMessage):
"""
**13009** Used when a DNS server does not support EDNS0, required for
DNSSEC support
"""
errno = 13009
type = "warning"
format = _(u"DNS server %(server)s does not support EDNS0 (RFC 6891): "
u"%(error)s.\n"
u"If DNSSEC validation is enabled on IPA server(s), "
u"please disable it.")
class DNSSECValidationFailingWarning(PublicMessage):
"""
**13010** Used when a DNSSEC validation failed on IPA DNS server
"""
errno = 13010
type = "warning"
format = _(u"DNSSEC validation failed: %(error)s.\n"
u"Please verify your DNSSEC configuration or disable DNSSEC "
u"validation on all IPA servers.")
class KerberosTXTRecordCreationFailure(PublicMessage):
"""
**13011** Used when a _kerberos TXT record could not be added to
a DNS zone.
"""
errno = 13011
type = "warning"
format = _(
"The _kerberos TXT record from domain %(domain)s could not be created "
"(%(error)s).\nThis can happen if the zone is not managed by IPA. "
"Please create the record manually, containing the following "
"value: '%(realm)s'"
)
class KerberosTXTRecordDeletionFailure(PublicMessage):
"""
**13012** Used when a _kerberos TXT record could not be removed from
a DNS zone.
"""
errno = 13012
type = "warning"
format = _(
"The _kerberos TXT record from domain %(domain)s could not be removed "
"(%(error)s).\nThis can happen if the zone is not managed by IPA. "
"Please remove the record manually."
)
class DNSSECMasterNotInstalled(PublicMessage):
"""
**13013** Used when a DNSSEC is not installed on system (no DNSSEC
master server is installed).
"""
errno = 13013
type = "warning"
format = _(
"No DNSSEC key master is installed. DNSSEC zone signing will not work "
"until the DNSSEC key master is installed."
)
class DNSSuspiciousRelativeName(PublicMessage):
"""
**13014** Relative name "record.zone" is being added into zone "zone.",
which is probably a mistake. User probably wanted to either specify
relative name "record" or use FQDN "record.zone.".
"""
errno = 13014
type = "warning"
format = _(
"Relative record name '%(record)s' contains the zone name '%(zone)s' "
"as a suffix, which results in FQDN '%(fqdn)s'. This is usually a "
"mistake caused by a missing dot at the end of the name specification."
)
class CommandDeprecatedWarning(PublicMessage):
"""
**13015** Used when user uses a deprecated option
"""
errno = 13015
type = "warning"
format = _(u"'%(command)s' is deprecated. %(additional_info)s")
class ExternalCommandOutput(PublicMessage):
"""
**13016** Line of output from an external command.
"""
errno = 13016
type = "info"
format = _("%(line)s")
class SearchResultTruncated(PublicMessage):
"""
**13017** Results of LDAP search has been truncated
"""
errno = 13017
type = "warning"
format = _("Search result has been truncated: %(reason)s")
class BrokenTrust(PublicMessage):
"""
**13018** Trust for a specified domain is broken
"""
errno = 13018
type = "warning"
format = _("Your trust to %(domain)s is broken. Please re-create it by "
"running 'ipa trust-add' again.")
class ResultFormattingError(PublicMessage):
"""
**13019** Unable to correctly format some part of the result
"""
type = "warning"
errno = 13019
class FailedToRemoveHostDNSRecords(PublicMessage):
"""
**13020** Failed to remove host DNS records
"""
errno = 13020
type = "warning"
format = _("DNS record(s) of host %(host)s could not be removed. "
"(%(reason)s)")
class DNSForwardPolicyConflictWithEmptyZone(PublicMessage):
"""
**13021** Forward zone 1.10.in-addr.arpa with policy "first"
will not forward anything because BIND automatically prefers
empty zone "10.in-addr.arpa.".
"""
errno = 13021
type = "warning"
format = _(
"Forwarding policy conflicts with some automatic empty zones. "
"Queries for zones specified by RFC 6303 will ignore "
"forwarding and recursion and always result in NXDOMAIN answers. "
"To override this behavior use forward policy 'only'."
)
class DNSUpdateOfSystemRecordFailed(PublicMessage):
"""
**13022** Update of a DNS system record failed
"""
errno = 13022
type = "warning"
format = _(
"Update of system record '%(record)s' failed with error: %(error)s"
)
class DNSUpdateNotIPAManagedZone(PublicMessage):
"""
**13023** Zone for system records is not managed by IPA
"""
errno = 13023
type = "warning"
format = _(
"IPA does not manage the zone %(zone)s, please add records "
"to your DNS server manually"
)
class AutomaticDNSRecordsUpdateFailed(PublicMessage):
"""
**13024** Automatic update of DNS records failed
"""
errno = 13024
type = "warning"
format = _(
"Automatic update of DNS system records failed. "
"Please re-run update of system records manually to get list of "
"missing records."
)
class ServiceRestartRequired(PublicMessage):
"""
**13025** Service restart is required
"""
errno = 13025
type = "warning"
format = _(
"Service %(service)s requires restart on IPA server %(server)s to "
"apply configuration changes."
)
class LocationWithoutDNSServer(PublicMessage):
"""
**13026** Location without DNS server
"""
errno = 13026
type = "warning"
format = _(
"No DNS servers in IPA location %(location)s. Without DNS servers "
"location is not working as expected."
)
class ServerRemovalInfo(PublicMessage):
"""
**13027** Informative message printed during removal of IPA server
"""
errno = 13027
type = "info"
class ServerRemovalWarning(PublicMessage):
"""
**13028** Warning raised during removal of IPA server
"""
errno = 13028
type = "warning"
class CertificateInvalid(PublicMessage):
"""
**13029** Failed to parse a certificate
"""
errno = 13029
type = "error"
format = _("%(subject)s: Malformed certificate. "
"%(reason)s")
class FailedToAddHostDNSRecords(PublicMessage):
"""
**13030** Failed to add host DNS records
"""
errno = 13030
type = "warning"
format = _("The host was added but the DNS update failed with: "
"%(reason)s")
class LightweightCACertificateNotAvailable(PublicMessage):
"""
**13031** Certificate is not available
"""
errno = 13031
type = "error"
format = _("The certificate for %(ca)s is not available on this server.")
class MissingTargetAttributesinPermission(PublicMessage):
"""
**13032** A permission was added with no target attributes
"""
errno = 13032
type = "warning"
format = _("The permission has %(right)s rights but no attributes "
"are set.")
def iter_messages(variables, base):
"""Return a tuple with all subclasses
"""
for (key, value) in variables.items():
if key.startswith('_') or not isclass(value):
continue
if issubclass(value, base):
yield value
public_messages = tuple(sorted(
iter_messages(globals(), PublicMessage), key=lambda E: E.errno))
def print_report(label, classes):
for cls in classes:
print('%d\t%s' % (cls.errno, cls.__name__))
print('(%d %s)' % (len(classes), label))
if __name__ == '__main__':
print_report('public messages', public_messages)
| 14,980
|
Python
|
.py
| 417
| 29.601918
| 114
| 0.646139
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,648
|
errors.py
|
freeipa_freeipa/ipalib/errors.py
|
# Authors:
# Jason Gerard DeRose <jderose@redhat.com>
#
# Copyright (C) 2008-2016 Red Hat
# see file 'COPYING' for use and warranty inmsgion
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Custom exception classes (some which are RPC transparent).
`PrivateError` and its subclasses are custom IPA excetions that will *never* be
forwarded in a Remote Procedure Call (RPC) response.
On the other hand, `PublicError` and its subclasses can be forwarded in an RPC
response. These public errors each carry a unique integer error code as well as
a gettext translated error message (translated at the time the exception is
raised). The purpose of the public errors is to relay information about
*expected* user errors, service availability errors, and so on. They should
*never* be used for *unexpected* programmatic or run-time errors.
For security reasons it is *extremely* important that arbitrary exceptions *not*
be forwarded in an RPC response. Unexpected exceptions can easily contain
compromising information in their error messages. Any time the server catches
any exception that isn't a `PublicError` subclass, it should raise an
`InternalError`, which itself always has the same, static error message (and
therefore cannot be populated with information about the true exception).
The public errors are arranging into five main blocks of error code ranges:
============= ========================================
Error codes Exceptions
============= ========================================
1000 - 1999 `AuthenticationError` and its subclasses
2000 - 2999 `AuthorizationError` and its subclasses
3000 - 3999 `InvocationError` and its subclasses
4000 - 4999 `ExecutionError` and its subclasses
5000 - 5999 `GenericError` and its subclasses
============= ========================================
Within these five blocks some sub-ranges are already allocated for certain types
of error messages, while others are reserved for future use. Here are the
current block assignments:
- **900-5999** `PublicError` and its subclasses
- **901 - 907** Assigned to special top-level public errors
- **908 - 999** *Reserved for future use*
- **1000 - 1999** `AuthenticationError` and its subclasses
- **1001 - 1099** Open for general authentication errors
- **1100 - 1199** `KerberosError` and its subclasses
- **1200 - 1299** `SessionError` and its subclasses
- **1300 - 1999** *Reserved for future use*
- **2000 - 2999** `AuthorizationError` and its subclasses
- **2001 - 2099** Open for general authorization errors
- **2100 - 2199** `ACIError` and its subclasses
- **2200 - 2999** *Reserved for future use*
- **3000 - 3999** `InvocationError` and its subclasses
- **3001 - 3099** Open for general invocation errors
- **3100 - 3199** *Reserved for future use*
- **4000 - 4999** `ExecutionError` and its subclasses
- **4001 - 4099** Open for general execution errors
- **4100 - 4199** `BuiltinError` and its subclasses
- **4200 - 4299** `LDAPError` and its subclasses
- **4300 - 4399** `CertificateError` and its subclasses
- **4400 - 4499** `DNSError` and (some of) its subclasses
- **4500 - 4999** *Reserved for future use*
- **5000 - 5999** `GenericError` and its subclasses
- **5001 - 5099** Open for generic errors
- **5100 - 5999** *Reserved for future use*
"""
import six
from ipalib.text import ngettext as ungettext
from ipalib import messages
class PrivateError(Exception):
"""
Base class for exceptions that are *never* forwarded in an RPC response.
"""
format = ''
def __init__(self, **kw):
self.msg = self.format % kw
self.kw = kw
for (key, value) in kw.items():
assert not hasattr(self, key), 'conflicting kwarg %s.%s = %r' % (
self.__class__.__name__, key, value,
)
setattr(self, key, value)
Exception.__init__(self, self.msg)
if six.PY3:
@property
def message(self):
return str(self)
class SubprocessError(PrivateError):
"""
Raised when ``subprocess.call()`` returns a non-zero exit status.
This custom exception is needed because Python 2.4 doesn't have the
``subprocess.CalledProcessError`` exception (which was added in Python 2.5).
For example:
>>> raise SubprocessError(returncode=2, argv=('ls', '-lh', '/no-foo/'))
Traceback (most recent call last):
...
SubprocessError: return code 2 from ('ls', '-lh', '/no-foo/')
The exit code of the sub-process is available via the ``returncode``
instance attribute. For example:
>>> e = SubprocessError(returncode=1, argv=('/bin/false',))
>>> e.returncode
1
>>> e.argv # argv is also available
('/bin/false',)
"""
format = 'return code %(returncode)d from %(argv)r'
class PluginSubclassError(PrivateError):
"""
Raised when a plugin doesn't subclass from an allowed base.
For example:
>>> raise PluginSubclassError(plugin='bad', bases=('base1', 'base2'))
Traceback (most recent call last):
...
PluginSubclassError: 'bad' not subclass of any base in ('base1', 'base2')
"""
format = '%(plugin)r not subclass of any base in %(bases)r'
class PluginDuplicateError(PrivateError):
"""
Raised when the same plugin class is registered more than once.
For example:
>>> raise PluginDuplicateError(plugin='my_plugin')
Traceback (most recent call last):
...
PluginDuplicateError: 'my_plugin' was already registered
"""
format = '%(plugin)r was already registered'
class PluginOverrideError(PrivateError):
"""
Raised when a plugin overrides another without using ``override=True``.
For example:
>>> raise PluginOverrideError(base='Command', name='env', plugin='my_env')
Traceback (most recent call last):
...
PluginOverrideError: unexpected override of Command.env with 'my_env'
"""
format = 'unexpected override of %(base)s.%(name)s with %(plugin)r'
class PluginMissingOverrideError(PrivateError):
"""
Raised when a plugin overrides another that has not been registered.
For example:
>>> raise PluginMissingOverrideError(base='Command', name='env', plugin='my_env')
Traceback (most recent call last):
...
PluginMissingOverrideError: Command.env not registered, cannot override with 'my_env'
"""
format = '%(base)s.%(name)s not registered, cannot override with %(plugin)r'
class SkipPluginModule(PrivateError):
"""
Raised to abort the loading of a plugin module.
"""
format = '%(reason)s'
class PluginsPackageError(PrivateError):
"""
Raised when ``package.plugins`` is a module instead of a sub-package.
"""
format = '%(name)s must be sub-package, not module: %(file)r'
class PluginModuleError(PrivateError):
"""
Raised when a module is not a valid plugin module.
"""
format = '%(name)s is not a valid plugin module'
class KrbPrincipalWrongFAST(PrivateError):
"""
Raised when it is not possible to use our FAST armor for kinit
"""
format = '%(principal)s cannot use Anonymous PKINIT as a FAST armor'
##############################################################################
# Public errors:
_texts = []
def _(message):
_texts.append(message)
return message
class PublicError(Exception):
"""
**900** Base class for exceptions that can be forwarded in an RPC response.
"""
def __init__(self, format=None, message=None, **kw):
messages.process_message_arguments(self, format, message, **kw)
super(PublicError, self).__init__(self.msg)
errno = 900
rval = 1
format = None
if six.PY3:
@property
def message(self):
return str(self)
class VersionError(PublicError):
"""
**901** Raised when client and server versions are incompatible.
For example:
>>> raise VersionError(cver='2.0', sver='2.1', server='https://localhost')
Traceback (most recent call last):
...
VersionError: 2.0 client incompatible with 2.1 server at 'https://localhost'
"""
errno = 901
format = _("%(cver)s client incompatible with %(sver)s server at '%(server)s'")
class UnknownError(PublicError):
"""
**902** Raised when client does not know error it caught from server.
For example:
>>> raise UnknownError(code=57, server='localhost', error=u'a new error')
...
Traceback (most recent call last):
...
UnknownError: unknown error 57 from localhost: a new error
"""
errno = 902
format = _('unknown error %(code)d from %(server)s: %(error)s')
class InternalError(PublicError):
"""
**903** Raised to conceal a non-public exception.
For example:
>>> raise InternalError()
Traceback (most recent call last):
...
InternalError: an internal error has occurred
"""
errno = 903
format = _('an internal error has occurred')
def __init__(self, message=None):
"""
Security issue: ignore any information given to constructor.
"""
PublicError.__init__(self)
class ServerInternalError(PublicError):
"""
**904** Raised when client catches an `InternalError` from server.
For example:
>>> raise ServerInternalError(server='https://localhost')
Traceback (most recent call last):
...
ServerInternalError: an internal error has occurred on server at 'https://localhost'
"""
errno = 904
format = _("an internal error has occurred on server at '%(server)s'")
class CommandError(PublicError):
"""
**905** Raised when an unknown command is called.
For example:
>>> raise CommandError(name='foobar')
Traceback (most recent call last):
...
CommandError: unknown command 'foobar'
"""
errno = 905
format = _("unknown command '%(name)s'")
class ServerCommandError(PublicError):
"""
**906** Raised when client catches a `CommandError` from server.
For example:
>>> e = CommandError(name='foobar')
>>> raise ServerCommandError(error=str(e), server='https://localhost')
Traceback (most recent call last):
...
ServerCommandError: error on server 'https://localhost': unknown command 'foobar'
"""
errno = 906
format = _("error on server '%(server)s': %(error)s")
class NetworkError(PublicError):
"""
**907** Raised when a network connection cannot be created.
For example:
>>> raise NetworkError(uri='ldap://localhost:389', error=_(u'Connection refused'))
Traceback (most recent call last):
...
NetworkError: cannot connect to 'ldap://localhost:389': Connection refused
"""
errno = 907
format = _("cannot connect to '%(uri)s': %(error)s")
class ServerNetworkError(PublicError):
"""
**908** Raised when client catches a `NetworkError` from server.
"""
errno = 908
format = _("error on server '%(server)s': %(error)s")
class JSONError(PublicError):
"""
**909** Raised when server received a malformed JSON-RPC request.
"""
errno = 909
format = _('Invalid JSON-RPC request: %(error)s')
class XMLRPCMarshallError(PublicError):
"""
**910** Raised when the XML-RPC lib cannot marshall the request
For example:
>>> raise XMLRPCMarshallError(error=_('int exceeds XML-RPC limits'))
Traceback (most recent call last):
...
XMLRPCMarshallError: error marshalling data for XML-RPC transport: int exceeds XML-RPC limits
"""
errno = 910
format = _('error marshalling data for XML-RPC transport: %(error)s')
class RefererError(PublicError):
"""
**911** Raised when the request does not contain an HTTP referer
For example:
>>> raise RefererError(referer='referer')
Traceback (most recent call last):
...
RefererError: Missing or invalid HTTP Referer, referer
"""
errno = 911
format = _('Missing or invalid HTTP Referer, %(referer)s')
class EnvironmentError(PublicError):
"""
**912** Raised when a command is called with invalid environment settings
"""
errno = 912
class SystemEncodingError(PublicError):
"""
**913** Raised when system encoding is not UTF-8
"""
errno = 913
format = _(
"System encoding must be UTF-8, '%(encoding)s' is not supported. "
"Set LC_ALL=\"C.UTF-8\", or LC_ALL=\"\" and LC_CTYPE=\"C.UTF-8\"."
)
##############################################################################
# 1000 - 1999: Authentication errors
class AuthenticationError(PublicError):
"""
**1000** Base class for authentication errors (*1000 - 1999*).
"""
errno = 1000
class KerberosError(AuthenticationError):
"""
**1100** Base class for Kerberos authentication errors (*1100 - 1199*).
For example:
>>> raise KerberosError(major=_('Unspecified GSS failure. Minor code may provide more information'), minor=_('No credentials cache found'))
Traceback (most recent call last):
...
KerberosError: Kerberos error: Unspecified GSS failure. Minor code may provide more information/No credentials cache found
"""
errno = 1100
format= _('Kerberos error: %(major)s/%(minor)s')
class CCacheError(KerberosError):
"""
**1101** Raised when sever does not receive Kerberose credentials.
For example:
>>> raise CCacheError()
Traceback (most recent call last):
...
CCacheError: did not receive Kerberos credentials
"""
errno = 1101
format = _('did not receive Kerberos credentials')
class ServiceError(KerberosError):
"""
**1102** Raised when service is not found in Kerberos DB.
For example:
>>> raise ServiceError(service='HTTP@localhost')
Traceback (most recent call last):
...
ServiceError: Service 'HTTP@localhost' not found in Kerberos database
"""
errno = 1102
format = _("Service '%(service)s' not found in Kerberos database")
class NoCCacheError(KerberosError):
"""
**1103** Raised when a client attempts to use Kerberos without a ccache.
For example:
>>> raise NoCCacheError()
Traceback (most recent call last):
...
NoCCacheError: No credentials cache found
"""
errno = 1103
format = _('No credentials cache found')
class TicketExpired(KerberosError):
"""
**1104** Raised when a client attempts to use an expired ticket
For example:
>>> raise TicketExpired()
Traceback (most recent call last):
...
TicketExpired: Ticket expired
"""
errno = 1104
format = _('Ticket expired')
class BadCCachePerms(KerberosError):
"""
**1105** Raised when a client has bad permissions on their ccache
For example:
>>> raise BadCCachePerms()
Traceback (most recent call last):
...
BadCCachePerms: Credentials cache permissions incorrect
"""
errno = 1105
format = _('Credentials cache permissions incorrect')
class BadCCacheFormat(KerberosError):
"""
**1106** Raised when a client has a misformated ccache
For example:
>>> raise BadCCacheFormat()
Traceback (most recent call last):
...
BadCCacheFormat: Bad format in credentials cache
"""
errno = 1106
format = _('Bad format in credentials cache')
class CannotResolveKDC(KerberosError):
"""
**1107** Raised when the KDC can't be resolved
For example:
>>> raise CannotResolveKDC()
Traceback (most recent call last):
...
CannotResolveKDC: Cannot resolve KDC for requested realm
"""
errno = 1107
format = _('Cannot resolve KDC for requested realm')
class SessionError(AuthenticationError):
"""
**1200** Base class for Session errors (*1200 - 1299*).
For example:
"""
errno = 1200
format= _('Session error')
class InvalidSessionPassword(SessionError):
"""
**1201** Raised when we cannot obtain a TGT for a principal.
"""
errno = 1201
format= _('Principal %(principal)s cannot be authenticated: %(message)s')
class PasswordExpired(InvalidSessionPassword):
"""
**1202** Raised when we cannot obtain a TGT for a principal because the password is expired.
"""
errno = 1202
class KrbPrincipalExpired(SessionError):
"""
**1203** Raised when Kerberos Principal is expired.
"""
errno = 1203
class UserLocked(SessionError):
"""
**1204** Raised when a user account is locked.
"""
errno = 1204
##############################################################################
# 2000 - 2999: Authorization errors
class AuthorizationError(PublicError):
"""
**2000** Base class for authorization errors (*2000 - 2999*).
"""
errno = 2000
class ACIError(AuthorizationError):
"""
**2100** Base class for ACI authorization errors (*2100 - 2199*).
"""
errno = 2100
format = _('Insufficient access: %(info)s')
##############################################################################
# 3000 - 3999: Invocation errors
class InvocationError(PublicError):
"""
**3000** Base class for command invocation errors (*3000 - 3999*).
"""
errno = 3000
class EncodingError(InvocationError):
"""
**3001** Raised when received text is incorrectly encoded.
"""
errno = 3001
class BinaryEncodingError(InvocationError):
"""
**3002** Raised when received binary data is incorrectly encoded.
"""
errno = 3002
class ZeroArgumentError(InvocationError):
"""
**3003** Raised when a command is called with arguments but takes none.
For example:
>>> raise ZeroArgumentError(name='ping')
Traceback (most recent call last):
...
ZeroArgumentError: command 'ping' takes no arguments
"""
errno = 3003
format = _("command '%(name)s' takes no arguments")
class MaxArgumentError(InvocationError):
"""
**3004** Raised when a command is called with too many arguments.
For example:
>>> raise MaxArgumentError(name='user_add', count=2)
Traceback (most recent call last):
...
MaxArgumentError: command 'user_add' takes at most 2 arguments
"""
errno = 3004
def __init__(self, message=None, **kw):
if message is None:
format = ungettext(
"command '%(name)s' takes at most %(count)d argument",
"command '%(name)s' takes at most %(count)d arguments",
kw['count']
)
else:
format = None
InvocationError.__init__(self, format, message, **kw)
class OptionError(InvocationError):
"""
**3005** Raised when a command is called with unknown options.
"""
errno = 3005
class OverlapError(InvocationError):
"""
**3006** Raised when arguments and options overlap.
For example:
>>> raise OverlapError(names=['givenname', 'login'])
Traceback (most recent call last):
...
OverlapError: overlapping arguments and options: ['givenname', 'login']
"""
errno = 3006
format = _("overlapping arguments and options: %(names)s")
class RequirementError(InvocationError):
"""
**3007** Raised when a required parameter is not provided.
For example:
>>> raise RequirementError(name='givenname')
Traceback (most recent call last):
...
RequirementError: 'givenname' is required
"""
errno = 3007
format = _("'%(name)s' is required")
class ConversionError(InvocationError):
"""
**3008** Raised when parameter value can't be converted to correct type.
For example:
>>> raise ConversionError(name='age', error=_(u'must be an integer'))
Traceback (most recent call last):
...
ConversionError: invalid 'age': must be an integer
"""
errno = 3008
format = _("invalid '%(name)s': %(error)s")
class ValidationError(InvocationError):
"""
**3009** Raised when a parameter value fails a validation rule.
For example:
>>> raise ValidationError(name='sn', error=_(u'can be at most 128 characters'))
Traceback (most recent call last):
...
ValidationError: invalid 'sn': can be at most 128 characters
"""
errno = 3009
format = _("invalid '%(name)s': %(error)s")
class NoSuchNamespaceError(InvocationError):
"""
**3010** Raised when an unknown namespace is requested.
For example:
>>> raise NoSuchNamespaceError(name='Plugins')
Traceback (most recent call last):
...
NoSuchNamespaceError: api has no such namespace: 'Plugins'
"""
errno = 3010
format = _("api has no such namespace: '%(name)s'")
class PasswordMismatch(InvocationError):
"""
**3011** Raise when password and password confirmation don't match.
"""
errno = 3011
format = _('Passwords do not match')
class NotImplementedError(InvocationError):
"""
**3012** Raise when a function hasn't been implemented.
"""
errno = 3012
format = _('Command not implemented')
class NotConfiguredError(InvocationError):
"""
**3013** Raise when there is no configuration
"""
errno = 3013
format = _('Client is not configured. Run ipa-client-install.')
class PromptFailed(InvocationError):
"""
**3014** Raise when an interactive prompt failed.
"""
errno = 3014
format = _('Could not get %(name)s interactively')
class DeprecationError(InvocationError):
"""
**3015** Raise when a command has been deprecated
For example:
>>> raise DeprecationError(name='hbacrule_add_sourcehost')
Traceback (most recent call last):
...
DeprecationError: Command 'hbacrule_add_sourcehost' has been deprecated
"""
errno = 3015
format = _("Command '%(name)s' has been deprecated")
class NotAForestRootError(InvocationError):
"""
**3016** Raised when an attempt to establish trust is done against non-root domain
Forest root domain has the same name as the forest itself
For example:
>>> raise NotAForestRootError(forest='example.test', domain='jointops.test')
Traceback (most recent call last):
...
NotAForestRootError: Domain 'jointops.test' is not a root domain for forest 'example.test'
"""
errno = 3016
format = _("Domain '%(domain)s' is not a root domain for forest '%(forest)s'")
##############################################################################
# 4000 - 4999: Execution errors
class ExecutionError(PublicError):
"""
**4000** Base class for execution errors (*4000 - 4999*).
"""
errno = 4000
class NotFound(ExecutionError):
"""
**4001** Raised when an entry is not found.
For example:
>>> raise NotFound(reason='no such user')
Traceback (most recent call last):
...
NotFound: no such user
"""
errno = 4001
rval = 2
format = _('%(reason)s')
class DuplicateEntry(ExecutionError):
"""
**4002** Raised when an entry already exists.
For example:
>>> raise DuplicateEntry
Traceback (most recent call last):
...
DuplicateEntry: This entry already exists
"""
errno = 4002
format = _('This entry already exists')
class HostService(ExecutionError):
"""
**4003** Raised when a host service principal is requested
For example:
>>> raise HostService
Traceback (most recent call last):
...
HostService: You must enroll a host in order to create a host service
"""
errno = 4003
format = _('You must enroll a host in order to create a host service')
class MalformedServicePrincipal(ExecutionError):
"""
**4004** Raised when a service principal is not of the form: service/fully-qualified host name
For example:
>>> raise MalformedServicePrincipal(reason=_('missing service'))
Traceback (most recent call last):
...
MalformedServicePrincipal: Service principal is not of the form: service/fully-qualified host name: missing service
"""
errno = 4004
format = _('Service principal is not of the form: service/fully-qualified host name: %(reason)s')
class RealmMismatch(ExecutionError):
"""
**4005** Raised when the requested realm does not match the IPA realm
For example:
>>> raise RealmMismatch
Traceback (most recent call last):
...
RealmMismatch: The realm for the principal does not match the realm for this IPA server
"""
errno = 4005
format = _('The realm for the principal does not match the realm for this IPA server')
class RequiresRoot(ExecutionError):
"""
**4006** Raised when a command requires the unix super-user to run
For example:
>>> raise RequiresRoot
Traceback (most recent call last):
...
RequiresRoot: This command requires root access
"""
errno = 4006
format = _('This command requires root access')
class AlreadyPosixGroup(ExecutionError):
"""
**4007** Raised when a group is already a posix group
For example:
>>> raise AlreadyPosixGroup
Traceback (most recent call last):
...
AlreadyPosixGroup: This is already a posix group
"""
errno = 4007
format = _('This is already a posix group')
class MalformedUserPrincipal(ExecutionError):
"""
**4008** Raised when a user principal is not of the form: user@REALM
For example:
>>> raise MalformedUserPrincipal(principal='jsmith@@EXAMPLE.COM')
Traceback (most recent call last):
...
MalformedUserPrincipal: Principal is not of the form user@REALM: 'jsmith@@EXAMPLE.COM'
"""
errno = 4008
format = _("Principal is not of the form user@REALM: '%(principal)s'")
class AlreadyActive(ExecutionError):
"""
**4009** Raised when an entry is made active that is already active
For example:
>>> raise AlreadyActive()
Traceback (most recent call last):
...
AlreadyActive: This entry is already enabled
"""
errno = 4009
format = _('This entry is already enabled')
class AlreadyInactive(ExecutionError):
"""
**4010** Raised when an entry is made inactive that is already inactive
For example:
>>> raise AlreadyInactive()
Traceback (most recent call last):
...
AlreadyInactive: This entry is already disabled
"""
errno = 4010
format = _('This entry is already disabled')
class HasNSAccountLock(ExecutionError):
"""
**4011** Raised when an entry has the nsAccountLock attribute set
For example:
>>> raise HasNSAccountLock()
Traceback (most recent call last):
...
HasNSAccountLock: This entry cannot be enabled or disabled
"""
errno = 4011
format = _('This entry cannot be enabled or disabled')
class NotGroupMember(ExecutionError):
"""
**4012** Raised when a non-member is attempted to be removed from a group
For example:
>>> raise NotGroupMember()
Traceback (most recent call last):
...
NotGroupMember: This entry is not a member
"""
errno = 4012
format = _('This entry is not a member')
class RecursiveGroup(ExecutionError):
"""
**4013** Raised when a group is added as a member of itself
For example:
>>> raise RecursiveGroup()
Traceback (most recent call last):
...
RecursiveGroup: A group may not be a member of itself
"""
errno = 4013
format = _('A group may not be a member of itself')
class AlreadyGroupMember(ExecutionError):
"""
**4014** Raised when a member is attempted to be re-added to a group
For example:
>>> raise AlreadyGroupMember()
Traceback (most recent call last):
...
AlreadyGroupMember: This entry is already a member
"""
errno = 4014
format = _('This entry is already a member')
class Base64DecodeError(ExecutionError):
"""
**4015** Raised when a base64-encoded blob cannot decoded
For example:
>>> raise Base64DecodeError(reason=_('Incorrect padding'))
Traceback (most recent call last):
...
Base64DecodeError: Base64 decoding failed: Incorrect padding
"""
errno = 4015
format = _('Base64 decoding failed: %(reason)s')
class RemoteRetrieveError(ExecutionError):
"""
**4016** Raised when retrieving data from a remote server fails
For example:
>>> raise RemoteRetrieveError(reason=_("Failed to get certificate chain."))
Traceback (most recent call last):
...
RemoteRetrieveError: Failed to get certificate chain.
"""
errno = 4016
format = _('%(reason)s')
class SameGroupError(ExecutionError):
"""
**4017** Raised when adding a group as a member of itself
For example:
>>> raise SameGroupError()
Traceback (most recent call last):
...
SameGroupError: A group may not be added as a member of itself
"""
errno = 4017
format = _('A group may not be added as a member of itself')
class DefaultGroupError(ExecutionError):
"""
**4018** Raised when removing the default user group
For example:
>>> raise DefaultGroupError()
Traceback (most recent call last):
...
DefaultGroupError: The default users group cannot be removed
"""
errno = 4018
format = _('The default users group cannot be removed')
class ManagedGroupError(ExecutionError):
"""
**4020** Raised when a managed group is deleted
For example:
>>> raise ManagedGroupError()
Traceback (most recent call last):
...
ManagedGroupError: Deleting a managed group is not allowed. It must be detached first.
"""
errno = 4020
format = _('Deleting a managed group is not allowed. It must be detached first.')
class ManagedPolicyError(ExecutionError):
"""
**4021** Raised when password policy is assigned to a managed group
For example:
>>> raise ManagedPolicyError()
Traceback (most recent call last):
...
ManagedPolicyError: A managed group cannot have a password policy.
"""
errno = 4021
format = _('A managed group cannot have a password policy.')
class FileError(ExecutionError):
"""
**4022** Errors when dealing with files
For example:
>>> raise FileError(reason=_("cannot write file \'test\'"))
Traceback (most recent call last):
...
FileError: cannot write file 'test'
"""
errno = 4022
format = _('%(reason)s')
class NoCertificateError(ExecutionError):
"""
**4023** Raised when trying to retrieve a certificate that doesn't exist.
For example:
>>> raise NoCertificateError(entry='ipa.example.com')
Traceback (most recent call last):
...
NoCertificateError: 'ipa.example.com' doesn't have a certificate.
"""
errno = 4023
format = _('\'%(entry)s\' doesn\'t have a certificate.')
class ManagedGroupExistsError(ExecutionError):
"""
**4024** Raised when adding a user and its managed group exists
For example:
>>> raise ManagedGroupExistsError(group=u'engineering')
Traceback (most recent call last):
...
ManagedGroupExistsError: Unable to create private group. A group 'engineering' already exists.
"""
errno = 4024
format = _('Unable to create private group. A group \'%(group)s\' already exists.')
class ReverseMemberError(ExecutionError):
"""
**4025** Raised when verifying that all reverse members have been added or removed.
For example:
>>> raise ReverseMemberError(verb=_('added'), exc=_("Group 'foo' not found."))
Traceback (most recent call last):
...
ReverseMemberError: A problem was encountered when verifying that all members were added: Group 'foo' not found.
"""
errno = 4025
format = _('A problem was encountered when verifying that all members were %(verb)s: %(exc)s')
class AttrValueNotFound(ExecutionError):
"""
**4026** Raised when an Attribute/Value pair is not found.
For example:
>>> raise AttrValueNotFound(attr='ipasudoopt', value='authenticate')
Traceback (most recent call last):
...
AttrValueNotFound: ipasudoopt does not contain 'authenticate'
"""
errno = 4026
rval = 1
format = _('%(attr)s does not contain \'%(value)s\'')
class SingleMatchExpected(ExecutionError):
"""
**4027** Raised when a search should return a single match
For example:
>>> raise SingleMatchExpected(found=9)
Traceback (most recent call last):
...
SingleMatchExpected: The search criteria was not specific enough. Expected 1 and found 9.
"""
errno = 4027
rval = 1
format = _('The search criteria was not specific enough. Expected 1 and found %(found)d.')
class AlreadyExternalGroup(ExecutionError):
"""
**4028** Raised when a group is already an external member group
For example:
>>> raise AlreadyExternalGroup
Traceback (most recent call last):
...
AlreadyExternalGroup: This group already allows external members
"""
errno = 4028
format = _('This group already allows external members')
class ExternalGroupViolation(ExecutionError):
"""
**4029** Raised when a group is already an external member group
and an attempt is made to use it as posix group
For example:
>>> raise ExternalGroupViolation
Traceback (most recent call last):
...
ExternalGroupViolation: This group cannot be posix because it is external
"""
errno = 4029
format = _('This group cannot be posix because it is external')
class PosixGroupViolation(ExecutionError):
"""
**4030** Raised when a group is already a posix group
and cannot be converted to external
For example:
>>> raise PosixGroupViolation
Traceback (most recent call last):
...
PosixGroupViolation: This is already a posix group and cannot be converted to external one
"""
errno = 4030
format = _('This is already a posix group and cannot be converted to external one')
class EmptyResult(NotFound):
"""
**4031** Raised when a LDAP search returned no results.
For example:
>>> raise EmptyResult(reason='no matching entry found')
Traceback (most recent call last):
...
EmptyResult: no matching entry found
"""
errno = 4031
class InvalidDomainLevelError(ExecutionError):
"""
**4032** Raised when a operation could not be completed due to a invalid
domain level.
For example:
>>> raise InvalidDomainLevelError(reason='feature requires domain level 4')
Traceback (most recent call last):
...
InvalidDomainLevelError: feature requires domain level 4
"""
errno = 4032
format = _('%(reason)s')
class ServerRemovalError(ExecutionError):
"""
**4033** Raised when a removal of IPA server from managed topology fails
For example:
>>> raise ServerRemovalError(reason='Removal disconnects topology')
Traceback (most recent call last):
...
ServerRemovalError: Server removal aborted: Removal disconnects topology
"""
errno = 4033
format = _('Server removal aborted: %(reason)s.')
class OperationNotSupportedForPrincipalType(ExecutionError):
"""
**4034** Raised when an operation is not supported for a principal type
"""
errno = 4034
format = _(
'%(operation)s is not supported for %(principal_type)s principals')
class HTTPRequestError(RemoteRetrieveError):
"""
**4035** Raised when an HTTP request fails. Includes the response
status in the ``status`` attribute.
"""
errno = 4035
format = _('Request failed with status %(status)s: %(reason)s')
class RedundantMappingRule(SingleMatchExpected):
"""
**4036** Raised when more than one rule in a CSR generation ruleset matches
a particular helper.
For example:
>>> raise RedundantMappingRule(ruleset='syntaxSubject', helper='certutil')
Traceback (most recent call last):
...
RedundantMappingRule: Mapping ruleset "syntaxSubject" has more than one
rule for the certutil helper.
"""
errno = 4036
format = _('Mapping ruleset "%(ruleset)s" has more than one rule for the'
' %(helper)s helper')
class CSRTemplateError(ExecutionError):
"""
**4037** Raised when evaluation of a CSR generation template fails
"""
errno = 4037
format = _('%(reason)s')
class AlreadyContainsValueError(ExecutionError):
"""
**4038** Raised when BaseLDAPAddAttribute operation fails because one
or more new values are already present.
"""
errno = 4038
format = _("'%(attr)s' already contains one or more values")
class BuiltinError(ExecutionError):
"""
**4100** Base class for builtin execution errors (*4100 - 4199*).
"""
errno = 4100
class HelpError(BuiltinError):
"""
**4101** Raised when requesting help for an unknown topic.
For example:
>>> raise HelpError(topic='newfeature')
Traceback (most recent call last):
...
HelpError: no command nor help topic 'newfeature'
"""
errno = 4101
format = _("no command nor help topic '%(topic)s'")
class LDAPError(ExecutionError):
"""
**4200** Base class for LDAP execution errors (*4200 - 4299*).
"""
errno = 4200
class MidairCollision(ExecutionError):
"""
**4201** Raised when a change collides with another change
For example:
>>> raise MidairCollision()
Traceback (most recent call last):
...
MidairCollision: change collided with another change
"""
errno = 4201
format = _('change collided with another change')
class EmptyModlist(ExecutionError):
"""
**4202** Raised when an LDAP update makes no changes
For example:
>>> raise EmptyModlist()
Traceback (most recent call last):
...
EmptyModlist: no modifications to be performed
"""
errno = 4202
format = _('no modifications to be performed')
class DatabaseError(ExecutionError):
"""
**4203** Raised when an LDAP error is not otherwise handled
For example:
>>> raise DatabaseError(desc=_("Can't contact LDAP server"), info=_('Info goes here'))
Traceback (most recent call last):
...
DatabaseError: Can't contact LDAP server: Info goes here
"""
errno = 4203
format = _('%(desc)s: %(info)s')
class LimitsExceeded(ExecutionError):
"""
**4204** Raised when search limits are exceeded.
For example:
>>> raise LimitsExceeded()
Traceback (most recent call last):
...
LimitsExceeded: limits exceeded for this query
"""
errno = 4204
format = _('limits exceeded for this query')
class ObjectclassViolation(ExecutionError):
"""
**4205** Raised when an entry is missing a required attribute or objectclass
For example:
>>> raise ObjectclassViolation(info=_('attribute "krbPrincipalName" not allowed'))
Traceback (most recent call last):
...
ObjectclassViolation: attribute "krbPrincipalName" not allowed
"""
errno = 4205
format = _('%(info)s')
class NotAllowedOnRDN(ExecutionError):
"""
**4206** Raised when an RDN value is modified.
For example:
>>> raise NotAllowedOnRDN()
Traceback (most recent call last):
...
NotAllowedOnRDN: modifying primary key is not allowed
"""
errno = 4206
format = _('modifying primary key is not allowed')
class OnlyOneValueAllowed(ExecutionError):
"""
**4207** Raised when trying to set more than one value to single-value attributes
For example:
>> raise OnlyOneValueAllowed(attr='ipasearchtimelimit')
Traceback (most recent call last):
...
OnlyOneValueAllowed: ipasearchtimelimit: Only one value allowed.
"""
errno = 4207
format = _('%(attr)s: Only one value allowed.')
class InvalidSyntax(ExecutionError):
"""
**4208** Raised when an value does not match the required syntax
For example:
>> raise InvalidSyntax(attr='ipahomesrootdir')
Traceback (most recent call last):
...
InvalidSyntax: ipahomesrootdir: Invalid syntax
"""
errno = 4208
format = _('%(attr)s: Invalid syntax.')
class BadSearchFilter(ExecutionError):
"""
**4209** Raised when an invalid LDAP search filter is used
For example:
>>> raise BadSearchFilter(info=_('invalid syntax'))
Traceback (most recent call last):
...
BadSearchFilter: Bad search filter invalid syntax
"""
errno = 4209
format = _('Bad search filter %(info)s')
class NotAllowedOnNonLeaf(ExecutionError):
"""
**4210** Raised when operation is not allowed on a non-leaf entry
For example:
>>> raise NotAllowedOnNonLeaf()
Traceback (most recent call last):
...
NotAllowedOnNonLeaf: Not allowed on non-leaf entry
"""
errno = 4210
format = _('Not allowed on non-leaf entry')
class DatabaseTimeout(DatabaseError):
"""
**4211** Raised when an LDAP call times out
For example:
>>> raise DatabaseTimeout()
Traceback (most recent call last):
...
DatabaseTimeout: LDAP timeout
"""
errno = 4211
format = _('LDAP timeout')
class TaskTimeout(DatabaseError):
"""
**4213** Raised when an LDAP task times out
For example:
>>> raise TaskTimeout(task='Automember', task_dn='')
Traceback (most recent call last):
...
TaskTimeout: Automember LDAP task timeout, Task DN: ''
"""
errno = 4213
format = _("%(task)s LDAP task timeout, Task DN: '%(task_dn)s'")
class TimeLimitExceeded(LimitsExceeded):
"""
**4214** Raised when time limit for the operation is exceeded.
"""
errno = 4214
format = _('Configured time limit exceeded')
class SizeLimitExceeded(LimitsExceeded):
"""
**4215** Raised when size limit for the operation is exceeded.
"""
errno = 4215
format = _('Configured size limit exceeded')
class AdminLimitExceeded(LimitsExceeded):
"""
**4216** Raised when server limit imposed by administrative authority was
exceeded
"""
errno = 4216
format = _('Configured administrative server limit exceeded')
class CertificateError(ExecutionError):
"""
**4300** Base class for Certificate execution errors (*4300 - 4399*).
"""
errno = 4300
class CertificateOperationError(CertificateError):
"""
**4301** Raised when a certificate operation cannot be completed
For example:
>>> raise CertificateOperationError(error=_(u'bad serial number'))
Traceback (most recent call last):
...
CertificateOperationError: Certificate operation cannot be completed: bad serial number
"""
errno = 4301
format = _('Certificate operation cannot be completed: %(error)s')
class CertificateFormatError(CertificateError):
"""
**4302** Raised when a certificate is badly formatted
For example:
>>> raise CertificateFormatError(error=_(u'improperly formated DER-encoded certificate'))
Traceback (most recent call last):
...
CertificateFormatError: Certificate format error: improperly formated DER-encoded certificate
"""
errno = 4302
format = _('Certificate format error: %(error)s')
class MutuallyExclusiveError(ExecutionError):
"""
**4303** Raised when an operation would result in setting two attributes which are mutually exlusive.
For example:
>>> raise MutuallyExclusiveError(reason=_(u'hosts may not be added when hostcategory=all'))
Traceback (most recent call last):
...
MutuallyExclusiveError: hosts may not be added when hostcategory=all
"""
errno = 4303
format = _('%(reason)s')
class NonFatalError(ExecutionError):
"""
**4304** Raised when part of an operation succeeds and the part that failed isn't critical.
For example:
>>> raise NonFatalError(reason=_(u'The host was added but the DNS update failed'))
Traceback (most recent call last):
...
NonFatalError: The host was added but the DNS update failed
"""
errno = 4304
format = _('%(reason)s')
class AlreadyRegisteredError(ExecutionError):
"""
**4305** Raised when registering a user that is already registered.
For example:
>>> raise AlreadyRegisteredError()
Traceback (most recent call last):
...
AlreadyRegisteredError: Already registered
"""
errno = 4305
format = _('Already registered')
class NotRegisteredError(ExecutionError):
"""
**4306** Raised when not registered and a registration is required
For example:
>>> raise NotRegisteredError()
Traceback (most recent call last):
...
NotRegisteredError: Not registered yet
"""
errno = 4306
format = _('Not registered yet')
class DependentEntry(ExecutionError):
"""
**4307** Raised when an entry being deleted has dependencies
For example:
>>> raise DependentEntry(label=u'SELinux User Map', key=u'test', dependent=u'test1')
Traceback (most recent call last):
...
DependentEntry: test cannot be deleted because SELinux User Map test1 requires it
"""
errno = 4307
format = _('%(key)s cannot be deleted because %(label)s %(dependent)s requires it')
class LastMemberError(ExecutionError):
"""
**4308** Raised when an entry being deleted or disabled is last member of a protected group
For example:
>>> raise LastMemberError(key=u'admin', label=u'group', container=u'admins')
Traceback (most recent call last):
...
LastMemberError: admin cannot be deleted or disabled because it is the last member of group admins
"""
errno = 4308
format = _('%(key)s cannot be deleted or disabled because it is the last member of %(label)s %(container)s')
class ProtectedEntryError(ExecutionError):
"""
**4309** Raised when an entry being deleted or modified in a forbidden way is protected
For example:
>>> raise ProtectedEntryError(label=u'group', key=u'admins', reason=_(u'privileged group'))
Traceback (most recent call last):
...
ProtectedEntryError: group admins cannot be deleted/modified: privileged group
"""
errno = 4309
format = _('%(label)s %(key)s cannot be deleted/modified: %(reason)s')
class CertificateInvalidError(CertificateError):
"""
**4310** Raised when a certificate is not valid
For example:
>>> raise CertificateInvalidError(name=_(u'CA'))
Traceback (most recent call last):
...
CertificateInvalidError: CA certificate is not valid
"""
errno = 4310
format = _('%(name)s certificate is not valid')
class SchemaUpToDate(ExecutionError):
"""
**4311** Raised by server when client asks for metadata but
already has current version. Exception's attribute 'fingerprint'
identitfies schema version to use. Attribute 'ttl' specifies how
long (in seconds) before client should check for schema update.
For example:
>>> raise SchemaUpToDate(fingerprint=u'deadbeef', ttl=3600)
Traceback (most recent call last):
...
SchemaUpToDate: Schema is up to date (FP 'deadbeef', TTL 3600 s)
"""
errno = 4311
format = _("Schema is up to date (FP '%(fingerprint)s', TTL %(ttl)s s)")
class DNSError(ExecutionError):
"""
**4400** Base class for DNS execution errors (*4400 - 4499*).
These are typically wrapper exceptions around dns.exception.DNSException.
"""
errno = 4400
class DNSNotARecordError(DNSError):
"""
**4019** Raised when a hostname is not a DNS A/AAAA record
For example:
>>> raise DNSNotARecordError(hostname='x')
Traceback (most recent call last):
...
DNSNotARecordError: Host 'x' does not have corresponding DNS A/AAAA record
"""
errno = 4019 # this exception was defined before DNSError
format = _(
'Host \'%(hostname)s\' does not have corresponding DNS A/AAAA record')
class DNSDataMismatch(DNSError):
"""
**4212** Raised when an DNS query didn't return expected answer
in a configured time limit.
For example:
>>> raise DNSDataMismatch(expected="zone3.test. 86400 IN A 192.0.2.1", \
got="zone3.test. 86400 IN A 192.168.1.1")
Traceback (most recent call last):
...
DNSDataMismatch: DNS check failed: Expected {zone3.test. 86400 IN A 192.0.2.1} got {zone3.test. 86400 IN A 192.168.1.1}
"""
errno = 4212 # this exception was defined before DNSError
format = _('DNS check failed: Expected {%(expected)s} got {%(got)s}')
class DNSResolverError(DNSError):
"""
**4401** Wrapper around dns.exception.DNSException.
Raised when an error occured in dns.resolver.
For example:
>>> raise DNSResolverError(exception=ValueError("this is bad"))
Traceback (most recent call last):
...
DNSResolverError: this is bad
"""
errno = 4401
format = _('%(exception)s')
class TrustError(ExecutionError):
"""
**4500** Base class for trust execution errors (*4500 - 4599*).
These are typically instantiated when there is an error in establishing or
modifying a trust to another forest.
"""
errno = 4500
class TrustTopologyConflictError(TrustError):
"""
**4501** Raised when an attempt to establish trust fails with a topology
conflict against another forest the target forest trusts
For example:
>>> raise TrustTopologyConflictError(forest='example.test',
conflict='my.ad.test',
domains=['ad.test'])
Traceback (most recent call last):
...
TrustTopologyConflictError: Forest 'example.test' has existing trust to forest(s) ['ad.test'] which prevents a trust to 'my.ad.test'
"""
errno = 4501
format = _("Forest '%(forest)s' has existing trust to forest(s) "
"%(domains)s which prevents a trust to '%(conflict)s'")
##############################################################################
# 5000 - 5999: Generic errors
class GenericError(PublicError):
"""
**5000** Base class for errors that don't fit elsewhere (*5000 - 5999*).
"""
errno = 5000
public_errors = tuple(sorted(
messages.iter_messages(globals(), PublicError), key=lambda E: E.errno))
errors_by_code = dict((e.errno, e) for e in public_errors)
if __name__ == '__main__':
messages.print_report('public errors', public_errors)
| 51,557
|
Python
|
.py
| 1,399
| 31.440315
| 144
| 0.667932
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,649
|
facts.py
|
freeipa_freeipa/ipalib/facts.py
|
#
# Copyright (C) 2020 FreeIPA Contributors see COPYING for license
#
"""
Facts about the installation
"""
import logging
import os
from . import sysrestore
from ipaplatform.paths import paths
logger = logging.getLogger(__name__)
# Used to determine install status
IPA_MODULES = [
'httpd', 'kadmin', 'dirsrv', 'pki-tomcatd', 'install', 'krb5kdc', 'named']
def is_ipa_configured():
"""
Use the state to determine if IPA has been configured.
"""
sstore = sysrestore.StateFile(paths.SYSRESTORE)
if sstore.has_state('installation'):
return sstore.get_state('installation', 'complete')
# Fall back to older method in case this is an existing installation
installed = False
fstore = sysrestore.FileStore(paths.SYSRESTORE)
for module in IPA_MODULES:
if sstore.has_state(module):
logger.debug('%s is configured', module)
installed = True
else:
logger.debug('%s is not configured', module)
if fstore.has_files():
logger.debug('filestore has files')
installed = True
else:
logger.debug('filestore is tracking no files')
return installed
def is_ipa_client_configured(on_master=False):
"""
Consider IPA client not installed if nothing is backed up
and default.conf file does not exist. If on_master is set to True,
the existence of default.conf file is not taken into consideration,
since it has been already created by ipa-server-install.
"""
fstore = sysrestore.FileStore(paths.IPA_CLIENT_SYSRESTORE)
statestore = sysrestore.StateFile(paths.IPA_CLIENT_SYSRESTORE)
installed = statestore.get_state('installation', 'complete')
if installed is not None:
return installed
# Fall back to the old detection
installed = (
fstore.has_files() or (
not on_master and os.path.exists(paths.IPA_DEFAULT_CONF)
)
)
return installed
| 1,954
|
Python
|
.py
| 55
| 30.054545
| 78
| 0.694843
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,650
|
crud.py
|
freeipa_freeipa/ipalib/crud.py
|
# Authors:
# Jason Gerard DeRose <jderose@redhat.com>
#
# Copyright (C) 2008 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Base classes for standard CRUD operations.
These base classes are for `Method` plugins that provide standard
Create, Retrieve, Updated, and Delete operations (CRUD) for their corresponding
`Object` plugin. In particuar, these base classes provide logic to
automatically create the plugin args and options by inspecting the params on
their corresponding `Object` plugin. This provides a single point of definition
for LDAP attributes and enforces a simple, consistent API for CRUD operations.
For example, say we want CRUD operations on a hypothetical "user" entry. First
we need an `Object` plugin:
>>> from ipalib import Object, Str
>>> class user(Object):
... takes_params = (
... Str('login', primary_key=True),
... Str('first'),
... Str('last'),
... Str('ipauniqueid', flags=['no_create', 'no_update']),
... )
...
Next we need `Create`, `Retrieve`, `Updated`, and `Delete` plugins, and
optionally a `Search` plugin. For brevity, we'll just define `Create` and
`Retrieve` plugins:
>>> from ipalib import crud
>>> class user_add(crud.Create):
... pass
...
>>> class user_show(crud.Retrieve):
... pass
...
Now we'll register the plugins and finalize the `plugable.API` instance:
>>> from ipalib import create_api
>>> api = create_api()
>>> api.add_plugin(user)
>>> api.add_plugin(user_add)
>>> api.add_plugin(user_show)
>>> api.finalize()
First, notice that our ``user`` `Object` has the params we defined with the
``takes_params`` tuple:
>>> list(api.Object.user.params)
['login', 'first', 'last', 'ipauniqueid']
>>> api.Object.user.params.login
Str('login', primary_key=True)
Although we defined neither ``takes_args`` nor ``takes_options`` for our
``user_add`` plugin, the `Create` base class automatically generated them for
us:
>>> list(api.Command.user_add.args)
['login']
>>> list(api.Command.user_add.options)
['first', 'last', 'all', 'raw', 'version']
Notice that ``'ipauniqueid'`` isn't included in the options for our ``user_add``
plugin. This is because of the ``'no_create'`` flag we used when defining the
``ipauniqueid`` param. Often times there are LDAP attributes that are
automatically created by the server and therefor should not be supplied as an
option to the `Create` plugin. Often these same attributes shouldn't be
update-able either, in which case you can also supply the ``'no_update'`` flag,
as we did with our ``ipauniqueid`` param. Lastly, you can also use the ``'no_search'`` flag for attributes that shouldn't be search-able (because, for
example, the attribute isn't indexed).
As with our ``user_add` plugin, we defined neither ``takes_args`` nor
``takes_options`` for our ``user_show`` plugin; instead the `Retrieve` base
class created them for us:
>>> list(api.Command.user_show.args)
['login']
>>> list(api.Command.user_show.options)
['all', 'raw', 'version']
As you can see, `Retrieve` plugins take a single argument (the primary key) and
no options. If needed, you can still specify options for your `Retrieve` plugin
with a ``takes_options`` tuple.
Flags like ``'no_create'`` remove LDAP attributes from those that can be
supplied as *input* to a `Method`, but they don't effect the attributes that can
be returned as *output*. Regardless of what flags have been used, the output
entry (or list of entries) can contain all the attributes defined on the
`Object` plugin (in our case, the above ``user.params``).
For example, compare ``user.params`` with ``user_add.output_params`` and
``user_show.output_params``:
>>> list(api.Object.user.params)
['login', 'first', 'last', 'ipauniqueid']
>>> list(api.Command.user_add.output_params)
['login', 'first', 'last', 'ipauniqueid']
>>> list(api.Command.user_show.output_params)
['login', 'first', 'last', 'ipauniqueid']
Note that the above are all equal.
"""
from ipalib.frontend import Method
from ipalib import backend
from ipalib import parameters
from ipalib import output
from ipalib.text import _
class Create(Method):
"""
Create a new entry.
"""
has_output = output.standard_entry
def __clone(self, param, **kw):
if 'optional_create' in param.flags:
kw['required'] = False
return param.clone(**kw) if kw else param
def get_args(self):
if self.obj.primary_key:
yield self.__clone(self.obj.primary_key, attribute=True)
for arg in super(Create, self).get_args():
yield self.__clone(arg)
def get_options(self):
if self.extra_options_first:
for option in super(Create, self).get_options():
yield self.__clone(option)
for option in self.obj.params_minus(self.args):
attribute = 'virtual_attribute' not in option.flags
if 'no_create' in option.flags:
continue
if 'ask_create' in option.flags:
yield option.clone(
attribute=attribute, query=False, required=False,
autofill=False, alwaysask=True
)
else:
yield self.__clone(option, attribute=attribute)
if not self.extra_options_first:
for option in super(Create, self).get_options():
yield self.__clone(option)
class PKQuery(Method):
"""
Base class for `Retrieve`, `Update`, and `Delete`.
"""
def get_args(self):
if self.obj.primary_key:
# Don't enforce rules on the primary key so we can reference
# any stored entry, legal or not
yield self.obj.primary_key.clone(attribute=True, query=True)
for arg in super(PKQuery, self).get_args():
yield arg
class Retrieve(PKQuery):
"""
Retrieve an entry by its primary key.
"""
has_output = output.standard_entry
class Update(PKQuery):
"""
Update one or more attributes on an entry.
"""
has_output = output.standard_entry
def get_options(self):
if self.extra_options_first:
for option in super(Update, self).get_options():
yield option
for option in self.obj.params_minus_pk():
new_flags = option.flags
attribute = 'virtual_attribute' not in option.flags
if option.required:
# Required options turn into non-required, since not specifying
# them means that they are not changed.
# However, they cannot be empty (i.e. explicitly set to None).
new_flags = new_flags.union(['nonempty'])
if 'no_update' in option.flags:
continue
if 'ask_update' in option.flags:
yield option.clone(
attribute=attribute, query=False, required=False,
autofill=False, alwaysask=True, flags=new_flags,
)
elif 'req_update' in option.flags:
yield option.clone(
attribute=attribute, required=True, alwaysask=False,
flags=new_flags,
)
else:
yield option.clone(attribute=attribute, required=False,
autofill=False, flags=new_flags,
)
if not self.extra_options_first:
for option in super(Update, self).get_options():
yield option
class Delete(PKQuery):
"""
Delete one or more entries.
"""
has_output = output.standard_delete
class Search(Method):
"""
Retrieve all entries that match a given search criteria.
"""
has_output = output.standard_list_of_entries
def get_args(self):
yield parameters.Str(
'criteria?', noextrawhitespace=False,
doc=_('A string searched in all relevant object attributes'))
for arg in super(Search, self).get_args():
yield arg
def get_options(self):
if self.extra_options_first:
for option in super(Search, self).get_options():
yield option
for option in self.obj.params_minus(self.args):
attribute = 'virtual_attribute' not in option.flags
if 'no_search' in option.flags:
continue
if 'ask_search' in option.flags:
yield option.clone(
attribute=attribute, query=True, required=False,
autofill=False, alwaysask=True
)
elif isinstance(option, parameters.Flag):
yield option.clone_retype(
option.name, parameters.Bool,
attribute=attribute, query=True, required=False, autofill=False
)
else:
yield option.clone(
attribute=attribute, query=True, required=False, autofill=False
)
if not self.extra_options_first:
for option in super(Search, self).get_options():
yield option
class CrudBackend(backend.Connectible):
"""
Base class defining generic CRUD backend API.
"""
def create(self, **kw):
"""
Create a new entry.
This method should take key word arguments representing the
attributes the created entry will have.
If this methods constructs the primary_key internally, it should raise
an exception if the primary_key was passed. Likewise, if this method
requires the primary_key to be passed in from the caller, it should
raise an exception if the primary key was *not* passed.
This method should return a dict of the exact entry as it was created
in the backing store, including any automatically created attributes.
"""
raise NotImplementedError('%s.create()' % self.name)
def retrieve(self, primary_key, attributes):
"""
Retrieve an existing entry.
This method should take a two arguments: the primary_key of the
entry in question and a list of the attributes to be retrieved.
If the list of attributes is None then all non-operational
attributes will be returned.
If such an entry exists, this method should return a dict
representing that entry. If no such entry exists, this method
should return None.
"""
raise NotImplementedError('%s.retrieve()' % self.name)
def update(self, primary_key, **kw):
"""
Update an existing entry.
This method should take one required argument, the primary_key of the
entry to modify, plus optional keyword arguments for each of the
attributes being updated.
This method should return a dict representing the entry as it now
exists in the backing store. If no such entry exists, this method
should return None.
"""
raise NotImplementedError('%s.update()' % self.name)
def delete(self, primary_key):
"""
Delete an existing entry.
This method should take one required argument, the primary_key of the
entry to delete.
"""
raise NotImplementedError('%s.delete()' % self.name)
def search(self, **kw):
"""
Return entries matching specific criteria.
This method should take keyword arguments representing the search
criteria. If a key is the name of an entry attribute, the value
should be treated as a filter on that attribute. The meaning of
keys outside this namespace is left to the implementation.
This method should return and iterable containing the matched
entries, where each entry is a dict. If no entries are matched,
this method should return an empty iterable.
"""
raise NotImplementedError('%s.search()' % self.name)
| 12,590
|
Python
|
.py
| 289
| 36.259516
| 151
| 0.656671
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,651
|
util.py
|
freeipa_freeipa/ipalib/util.py
|
# Authors:
# Jason Gerard DeRose <jderose@redhat.com>
#
# Copyright (C) 2008 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Various utility functions.
"""
from __future__ import (
absolute_import,
print_function,
)
import logging
import os
import socket
import re
import decimal
import dns
import encodings
import sys
import ssl
import termios
import fcntl
import shutil
import struct
import subprocess
import netaddr
from dns import rdatatype
from dns.exception import DNSException
from dns.resolver import NXDOMAIN
from netaddr.core import AddrFormatError
import six
try:
from httplib import HTTPSConnection
except ImportError:
# Python 3
from http.client import HTTPSConnection
from ipalib import errors, messages
from ipalib.constants import (
DOMAIN_LEVEL_0,
TLS_VERSIONS, TLS_VERSION_MINIMAL, TLS_VERSION_MAXIMAL,
TLS_VERSION_DEFAULT_MIN, TLS_VERSION_DEFAULT_MAX,
)
from ipalib.facts import is_ipa_client_configured
from ipalib.text import _
from ipaplatform.constants import constants
from ipaplatform.paths import paths
from ipapython import ipautil
from ipapython.ssh import SSHPublicKey
from ipapython.dn import DN, RDN
from ipapython.dnsutil import (
DNSName,
DNSResolver,
resolve,
resolve_ip_addresses,
)
from ipapython.admintool import ScriptError
from ipapython.kerberos import Principal
if sys.version_info >= (3, 2):
import reprlib
else:
reprlib = None
if six.PY3:
unicode = str
_IPA_CLIENT_SYSRESTORE = "/var/lib/ipa-client/sysrestore"
_IPA_DEFAULT_CONF = "/etc/ipa/default.conf"
logger = logging.getLogger(__name__)
def json_serialize(obj):
if isinstance(obj, (list, tuple)):
return [json_serialize(o) for o in obj]
if isinstance(obj, dict):
return {k: json_serialize(v) for (k, v) in obj.items()}
if isinstance(obj, (int, bool, float, unicode, type(None))):
return obj
if isinstance(obj, str):
return obj.decode('utf-8')
if isinstance(obj, (decimal.Decimal, DN)):
return str(obj)
if not callable(getattr(obj, '__json__', None)):
# raise TypeError('%r is not JSON serializable')
return ''
return json_serialize(obj.__json__())
def verify_host_resolvable(fqdn):
try:
if not resolve_ip_addresses(fqdn):
raise errors.DNSNotARecordError(hostname=fqdn)
except dns.exception.DNSException as ex:
# wrap DNSException in a PublicError
raise errors.DNSResolverError(exception=ex)
def has_soa_or_ns_record(domain):
"""
Checks to see if given domain has SOA or NS record.
Returns True or False.
"""
try:
resolve(domain, rdatatype.SOA)
soa_record_found = True
except DNSException:
soa_record_found = False
try:
resolve(domain, rdatatype.NS)
ns_record_found = True
except DNSException:
ns_record_found = False
return soa_record_found or ns_record_found
def normalize_name(name):
result = dict()
components = name.split('@')
if len(components) == 2:
result['domain'] = unicode(components[1]).lower()
result['name'] = unicode(components[0]).lower()
else:
components = name.split('\\')
if len(components) == 2:
result['flatname'] = unicode(components[0]).lower()
result['name'] = unicode(components[1]).lower()
else:
result['name'] = unicode(name).lower()
return result
def isvalid_base64(data):
"""
Validate the incoming data as valid base64 data or not. This is only
used in the ipalib.Parameters module which expects ``data`` to be unicode.
The character set must only include of a-z, A-Z, 0-9, + or / and
be padded with = to be a length divisible by 4 (so only 0-2 =s are
allowed). Its length must be divisible by 4. Whitespace is
not significant so it is removed.
This doesn't guarantee we have a base64-encoded value, just that it
fits the base64 requirements.
"""
data = ''.join(data.split())
if (len(data) % 4 > 0 or
re.match(r'^[a-zA-Z0-9\+\/]+\={0,2}$', data) is None):
return False
else:
return True
def strip_csr_header(csr):
"""
Remove the header and footer (and surrounding material) from a CSR.
"""
headerlen = 40
s = csr.find(b"-----BEGIN NEW CERTIFICATE REQUEST-----")
if s == -1:
headerlen = 36
s = csr.find(b"-----BEGIN CERTIFICATE REQUEST-----")
if s >= 0:
e = csr.find(b"-----END")
csr = csr[s + headerlen:e]
return csr
def validate_ipaddr(ipaddr):
"""
Check to see if the given IP address is a valid IPv4 or IPv6 address.
Returns True or False
"""
try:
socket.inet_pton(socket.AF_INET, ipaddr)
except socket.error:
try:
socket.inet_pton(socket.AF_INET6, ipaddr)
except socket.error:
return False
return True
def check_writable_file(filename):
"""
Determine if the file is writable. If the file doesn't exist then
open the file to test writability.
"""
if filename is None:
raise errors.FileError(reason=_('Filename is empty'))
try:
if os.path.isfile(filename):
if not os.access(filename, os.W_OK):
raise errors.FileError(reason=_('Permission denied: %(file)s') % dict(file=filename))
else:
fp = open(filename, 'w')
fp.close()
except (IOError, OSError) as e:
raise errors.FileError(reason=str(e))
def normalize_zonemgr(zonemgr):
if not zonemgr or not isinstance(zonemgr, str):
return zonemgr
if '@' in zonemgr:
# local-part needs to be normalized
name, _at, domain = zonemgr.partition('@')
name = name.replace('.', '\\.')
zonemgr = u''.join((name, u'.', domain))
return zonemgr
def normalize_zone(zone):
if zone[-1] != '.':
return zone + '.'
else:
return zone
def get_proper_tls_version_span(tls_version_min, tls_version_max):
"""
This function checks whether the given TLS versions are known in
IPA and that these versions fulfill the requirements for minimal
TLS version (see
`ipalib.constants: TLS_VERSIONS, TLS_VERSION_MINIMAL`).
:param tls_version_min:
the lower value in the TLS min-max span, raised to the lowest
allowed value if too low
:param tls_version_max:
the higher value in the TLS min-max span, raised to tls_version_min
if lower than TLS_VERSION_MINIMAL
:raises: ValueError
"""
if tls_version_min is None and tls_version_max is None:
# no defaults, use system's default TLS version range
return None
if tls_version_min is None:
tls_version_min = TLS_VERSION_MINIMAL
if tls_version_max is None:
tls_version_max = TLS_VERSION_MAXIMAL
min_allowed_idx = TLS_VERSIONS.index(TLS_VERSION_MINIMAL)
try:
min_version_idx = TLS_VERSIONS.index(tls_version_min)
except ValueError:
raise ValueError("tls_version_min ('{val}') is not a known "
"TLS version.".format(val=tls_version_min))
try:
max_version_idx = TLS_VERSIONS.index(tls_version_max)
except ValueError:
raise ValueError("tls_version_max ('{val}') is not a known "
"TLS version.".format(val=tls_version_max))
if min_version_idx > max_version_idx:
raise ValueError("tls_version_min is higher than "
"tls_version_max.")
if min_version_idx < min_allowed_idx:
min_version_idx = min_allowed_idx
logger.warning("tls_version_min set too low ('%s'),using '%s' instead",
tls_version_min, TLS_VERSIONS[min_version_idx])
if max_version_idx < min_allowed_idx:
max_version_idx = min_version_idx
logger.warning("tls_version_max set too low ('%s'),using '%s' instead",
tls_version_max, TLS_VERSIONS[max_version_idx])
return TLS_VERSIONS[min_version_idx:max_version_idx+1]
def create_https_connection(
host, port=HTTPSConnection.default_port,
cafile=None,
client_certfile=None, client_keyfile=None,
keyfile_passwd=None,
tls_version_min=TLS_VERSION_DEFAULT_MIN,
tls_version_max=TLS_VERSION_DEFAULT_MAX,
**kwargs
):
"""
Create a customized HTTPSConnection object.
:param host: The host to connect to
:param port: The port to connect to, defaults to
HTTPSConnection.default_port
:param cafile: A PEM-format file containning the trusted
CA certificates
:param client_certfile:
A PEM-format client certificate file that will be used to
identificate the user to the server.
:param client_keyfile:
A file with the client private key. If this argument is not
supplied, the key will be sought in client_certfile.
:param keyfile_passwd:
A path to the file which stores the password that is used to
encrypt client_keyfile. Leave default value if the keyfile
is not encrypted.
:returns An established HTTPS connection to host:port
"""
tls_cutoff_map = {
"ssl2": ssl.OP_NO_SSLv2,
"ssl3": ssl.OP_NO_SSLv3,
"tls1.0": ssl.OP_NO_TLSv1,
"tls1.1": ssl.OP_NO_TLSv1_1,
"tls1.2": ssl.OP_NO_TLSv1_2,
"tls1.3": getattr(ssl, "OP_NO_TLSv1_3", 0),
}
if cafile is None:
raise RuntimeError("cafile argument is required to perform server "
"certificate verification")
if not os.path.isfile(cafile) or not os.access(cafile, os.R_OK):
raise RuntimeError("cafile \'{file}\' doesn't exist or is unreadable".
format(file=cafile))
# official Python documentation states that the best option to get
# TLSv1 and later is to setup SSLContext with PROTOCOL_SSLv23
# and then negate the insecure SSLv2 and SSLv3. However, with Python 3.10
# PROTOCOL_SSLv23 is deprecated as well as PROTOCOL_TLS. We should use
# PROTOCOL_TLS_CLIENT since Python 3.6
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.options |= (
ssl.OP_ALL | ssl.OP_NO_COMPRESSION | ssl.OP_SINGLE_DH_USE |
ssl.OP_SINGLE_ECDH_USE
)
if constants.TLS_HIGH_CIPHERS is not None:
# configure ciphers, uses system crypto policies on RH platforms.
ctx.set_ciphers(constants.TLS_HIGH_CIPHERS)
# remove the slice of negating protocol options according to options
tls_span = get_proper_tls_version_span(tls_version_min, tls_version_max)
# set up the correct TLS version flags for the SSL context
if tls_span is not None:
for version in TLS_VERSIONS:
if version in tls_span:
# make sure the required TLS versions are available if Python
# decides to modify the default TLS flags
ctx.options &= ~tls_cutoff_map[version]
else:
# disable all TLS versions not in tls_span
ctx.options |= tls_cutoff_map[version]
# Enable TLS 1.3 post-handshake auth
if getattr(ctx, "post_handshake_auth", None) is not None:
ctx.post_handshake_auth = True
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.check_hostname = True
ctx.load_verify_locations(cafile)
if client_certfile is not None:
if keyfile_passwd is not None:
with open(keyfile_passwd) as pwd_f:
passwd = pwd_f.read()
else:
passwd = None
ctx.load_cert_chain(client_certfile, client_keyfile, passwd)
return HTTPSConnection(host, port, context=ctx, **kwargs)
def validate_dns_label(dns_label, allow_underscore=False, allow_slash=False):
base_chars = 'a-z0-9'
extra_chars = ''
middle_chars = ''
if allow_underscore:
extra_chars += '_'
if allow_slash:
middle_chars += '/'
middle_chars = middle_chars + '-' #has to be always the last in the regex [....-]
label_regex = r'''^[%(base)s%(extra)s] # must begin with an alphanumeric
# character, or underscore if
# allow_underscore is True
([%(base)s%(extra)s%(middle)s]* # can contain all allowed character
# classes in the middle
[%(base)s%(extra)s])*$ # must end with alphanumeric
# character or underscore if
# allow_underscore is True
''' % dict(base=base_chars, extra=extra_chars, middle=middle_chars)
regex = re.compile(label_regex, re.IGNORECASE | re.VERBOSE)
if not dns_label:
raise ValueError(_('empty DNS label'))
if len(dns_label) > 63:
raise ValueError(_('DNS label cannot be longer that 63 characters'))
if not regex.match(dns_label):
chars = ', '.join("'%s'" % c for c in extra_chars + middle_chars)
chars2 = ', '.join("'%s'" % c for c in middle_chars)
raise ValueError(_("only letters, numbers, %(chars)s are allowed. " \
"DNS label may not start or end with %(chars2)s") \
% dict(chars=chars, chars2=chars2))
def validate_domain_name(
domain_name, allow_underscore=False,
allow_slash=False, entity='domain'
):
if domain_name.endswith('.'):
domain_name = domain_name[:-1]
domain_name = domain_name.split(".")
if len(domain_name) < 2:
raise ValueError(_(
'single label {}s are not supported'.format(entity)))
# apply DNS name validator to every name part
for label in domain_name:
validate_dns_label(label, allow_underscore, allow_slash)
def validate_zonemgr(zonemgr):
assert isinstance(zonemgr, DNSName)
if any(b'@' in label for label in zonemgr.labels):
raise ValueError(_('too many \'@\' characters'))
def validate_zonemgr_str(zonemgr):
zonemgr = normalize_zonemgr(zonemgr)
validate_idna_domain(zonemgr)
zonemgr = DNSName(zonemgr)
return validate_zonemgr(zonemgr)
def validate_hostname(hostname, check_fqdn=True, allow_underscore=False,
allow_slash=False, maxlen=255):
""" See RFC 952, 1123
Length limit of 64 imposed by MAXHOSTNAMELEN on Linux.
DNS and other operating systems has a max length of 255. Default to
the theoretical max unless explicitly told to limit. The cases
where a limit would be set might include:
* *-install --hostname
* ipa host-add
The *-install commands by definition are executed on Linux hosts so
the maximum length needs to be limited.
:param hostname Checked value
:param check_fqdn Check if hostname is fully qualified
"""
if len(hostname) > maxlen:
raise ValueError(_('cannot be longer that {} characters'.format(
maxlen)))
if hostname.endswith('.'):
hostname = hostname[:-1]
if '..' in hostname:
raise ValueError(_('hostname contains empty label (consecutive dots)'))
if '.' not in hostname:
if check_fqdn:
raise ValueError(_('not fully qualified'))
validate_dns_label(hostname, allow_underscore, allow_slash)
else:
validate_domain_name(hostname, allow_underscore, allow_slash)
def normalize_sshpubkey(value):
return SSHPublicKey(value).openssh()
def validate_sshpubkey(ugettext, value):
try:
SSHPublicKey(value)
except (ValueError, UnicodeDecodeError):
return _('invalid SSH public key')
else:
return None
def validate_sshpubkey_no_options(ugettext, value):
try:
pubkey = SSHPublicKey(value)
except (ValueError, UnicodeDecodeError):
return _('invalid SSH public key')
if pubkey.has_options():
return _('options are not allowed')
else:
return None
def convert_sshpubkey_post(entry_attrs):
pubkeys = entry_attrs.get('ipasshpubkey')
if not pubkeys:
return
newpubkeys = []
fingerprints = []
for pubkey in pubkeys:
try:
pubkey = SSHPublicKey(pubkey)
except (ValueError, UnicodeDecodeError):
continue
fp = pubkey.fingerprint_hex_sha256()
comment = pubkey.comment()
if comment:
fp = u'%s %s' % (fp, comment)
fp = u'%s (%s)' % (fp, pubkey.keytype())
newpubkeys.append(pubkey.openssh())
fingerprints.append(fp)
if 'ipasshpubkey' in entry_attrs:
entry_attrs['ipasshpubkey'] = newpubkeys or None
if fingerprints:
entry_attrs['sshpubkeyfp'] = fingerprints
def add_sshpubkey_to_attrs_pre(context, attrs_list):
"""
Attribute ipasshpubkey should be added to attrs_list to be able compute
ssh fingerprint. This attribute must be removed later if was added here
(see remove_sshpubkey_from_output_post).
"""
if not ('ipasshpubkey' in attrs_list or '*' in attrs_list):
setattr(context, 'ipasshpubkey_added', True)
attrs_list.append('ipasshpubkey')
def remove_sshpubkey_from_output_post(context, entry_attrs):
"""
Remove ipasshpubkey from output if it was added in pre_callbacks
"""
if getattr(context, 'ipasshpubkey_added', False):
entry_attrs.pop('ipasshpubkey', None)
delattr(context, 'ipasshpubkey_added')
def remove_sshpubkey_from_output_list_post(context, entries):
"""
Remove ipasshpubkey from output if it was added in pre_callbacks
"""
if getattr(context, 'ipasshpubkey_added', False):
for entry_attrs in entries:
entry_attrs.pop('ipasshpubkey', None)
delattr(context, 'ipasshpubkey_added')
# regexp matching signed floating point number (group 1) followed by
# optional whitespace followed by time unit, e.g. day, hour (group 7)
time_duration_re = re.compile(r'([-+]?((\d+)|(\d+\.\d+)|(\.\d+)|(\d+\.)))\s*([a-z]+)', re.IGNORECASE)
# number of seconds in a time unit
time_duration_units = {
'year' : 365*24*60*60,
'years' : 365*24*60*60,
'y' : 365*24*60*60,
'month' : 30*24*60*60,
'months' : 30*24*60*60,
'week' : 7*24*60*60,
'weeks' : 7*24*60*60,
'w' : 7*24*60*60,
'day' : 24*60*60,
'days' : 24*60*60,
'd' : 24*60*60,
'hour' : 60*60,
'hours' : 60*60,
'h' : 60*60,
'minute' : 60,
'minutes' : 60,
'min' : 60,
'second' : 1,
'seconds' : 1,
'sec' : 1,
's' : 1,
}
def parse_time_duration(value):
'''
Given a time duration string, parse it and return the total number
of seconds represented as a floating point value. Negative values
are permitted.
The string should be composed of one or more numbers followed by a
time unit. Whitespace and punctuation is optional. The numbers may
be optionally signed. The time units are case insenstive except
for the single character 'M' or 'm' which means month and minute
respectively.
Recognized time units are:
* year, years, y
* month, months, M
* week, weeks, w
* day, days, d
* hour, hours, h
* minute, minutes, min, m
* second, seconds, sec, s
Examples:
"1h" # 1 hour
"2 HOURS, 30 Minutes" # 2.5 hours
"1week -1 day" # 6 days
".5day" # 12 hours
"2M" # 2 months
"1h:15m" # 1.25 hours
"1h, -15min" # 45 minutes
"30 seconds" # .5 minute
Note: Despite the appearance you can perform arithmetic the
parsing is much simpler, the parser searches for signed values and
adds the signed value to a running total. Only + and - are permitted
and must appear prior to a digit.
:parameters:
value : string
A time duration string in the specified format
:returns:
total number of seconds as float (may be negative)
'''
matches = 0
duration = 0.0
for match in time_duration_re.finditer(value):
matches += 1
magnitude = match.group(1)
unit = match.group(7)
# Get the unit, only M and m are case sensitive
if unit == 'M': # month
seconds_per_unit = 30*24*60*60
elif unit == 'm': # minute
seconds_per_unit = 60
else:
unit = unit.lower()
seconds_per_unit = time_duration_units.get(unit)
if seconds_per_unit is None:
raise ValueError('unknown time duration unit "%s"' % unit)
magnitude = float(magnitude)
seconds = magnitude * seconds_per_unit
duration += seconds
if matches == 0:
raise ValueError('no time duration found in "%s"' % value)
return duration
def get_dns_forward_zone_update_policy(realm, rrtypes=('A', 'AAAA', 'SSHFP')):
"""
Generate update policy for a forward DNS zone (idnsUpdatePolicy
attribute). Bind uses this policy to grant/reject access for client
machines trying to dynamically update their records.
:param realm: A realm of the of the client
:param rrtypes: A list of resource records types that client shall be
allowed to update
"""
policy_element = "grant %(realm)s krb5-self * %(rrtype)s"
policies = [ policy_element % dict(realm=realm, rrtype=rrtype) \
for rrtype in rrtypes ]
policy = "; ".join(policies)
policy += ";"
return policy
def get_dns_reverse_zone_update_policy(realm, reverse_zone, rrtypes=('PTR',)):
"""
Generate update policy for a reverse DNS zone (idnsUpdatePolicy
attribute). Bind uses this policy to grant/reject access for client
machines trying to dynamically update their records.
:param realm: A realm of the of the client
:param reverse_zone: Name of the actual zone. All clients with IPs in this
sub-domain will be allowed to perform changes
:param rrtypes: A list of resource records types that client shall be
allowed to update
"""
policy_element = "grant %(realm)s krb5-subdomain %(zone)s %(rrtype)s"
policies = [ policy_element \
% dict(realm=realm, zone=reverse_zone, rrtype=rrtype) \
for rrtype in rrtypes ]
policy = "; ".join(policies)
policy += ";"
return policy
# dictionary of valid reverse zone -> number of address components
REVERSE_DNS_ZONES = {
DNSName.ip4_rev_zone : 4,
DNSName.ip6_rev_zone : 32,
}
def zone_is_reverse(zone_name):
return DNSName(zone_name).is_reverse()
def get_reverse_zone_default(ip_address):
ip = netaddr.IPAddress(str(ip_address))
items = ip.reverse_dns.split('.')
if ip.version == 4:
items = items[1:] # /24 for IPv4
elif ip.version == 6:
items = items[16:] # /64 for IPv6
return normalize_zone('.'.join(items))
def validate_rdn_param(ugettext, value):
try:
RDN(value)
except Exception as e:
return str(e)
else:
return None
def validate_hostmask(ugettext, hostmask):
try:
netaddr.IPNetwork(hostmask)
except (ValueError, AddrFormatError):
return _('invalid hostmask')
else:
return None
class ForwarderValidationError(Exception):
format = None
def __init__(self, format=None, message=None, **kw):
messages.process_message_arguments(self, format, message, **kw)
super(ForwarderValidationError, self).__init__(self.msg)
class UnresolvableRecordError(ForwarderValidationError):
format = _("query '%(owner)s %(rtype)s': %(error)s")
class EDNS0UnsupportedError(ForwarderValidationError):
format = _("query '%(owner)s %(rtype)s' with EDNS0: %(error)s")
class DNSSECSignatureMissingError(ForwarderValidationError):
format = _("answer to query '%(owner)s %(rtype)s' is missing DNSSEC "
"signatures (no RRSIG data)")
class DNSSECValidationError(ForwarderValidationError):
format = _("record '%(owner)s %(rtype)s' "
"failed DNSSEC validation on server %(ip)s")
def _log_response(e):
"""
If exception contains response from server, log this response to debug log
:param log: if log is None, do not log
:param e: DNSException
"""
assert isinstance(e, DNSException)
response = getattr(e, 'kwargs', {}).get('response')
if response:
logger.debug("DNSException: %s; server response: %s", e, response)
def _resolve_record(owner, rtype, nameserver_ip=None, edns0=False,
dnssec=False, flag_cd=False, timeout=10):
"""
:param nameserver_ip: if None, default resolvers will be used
:param edns0: enables EDNS0
:param dnssec: enabled EDNS0, flags: DO
:param flag_cd: requires dnssec=True, adds flag CD
:raise DNSException: if error occurs
"""
assert isinstance(nameserver_ip, str) or nameserver_ip is None
assert isinstance(rtype, str)
res = DNSResolver()
if nameserver_ip:
res.nameservers = [nameserver_ip]
res.lifetime = timeout
# Recursion Desired,
# this option prevents to get answers in authority section instead of answer
res.set_flags(dns.flags.RD)
if dnssec:
res.use_edns(0, dns.flags.DO, 4096)
flags = dns.flags.RD
if flag_cd:
flags = flags | dns.flags.CD
res.set_flags(flags)
elif edns0:
res.use_edns(0, 0, 4096)
return res.resolve(owner, rtype)
def _validate_edns0_forwarder(owner, rtype, ip_addr, timeout=10):
"""
Validate if forwarder supports EDNS0
:raise UnresolvableRecordError: record cannot be resolved
:raise EDNS0UnsupportedError: EDNS0 is not supported by forwarder
"""
try:
_resolve_record(owner, rtype, nameserver_ip=ip_addr, timeout=timeout)
except DNSException as e:
_log_response(e)
raise UnresolvableRecordError(owner=owner, rtype=rtype, ip=ip_addr,
error=e)
try:
_resolve_record(owner, rtype, nameserver_ip=ip_addr, edns0=True,
timeout=timeout)
except DNSException as e:
_log_response(e)
raise EDNS0UnsupportedError(owner=owner, rtype=rtype, ip=ip_addr,
error=e)
def validate_dnssec_global_forwarder(ip_addr, timeout=10):
"""Test DNS forwarder properties. against root zone.
Global forwarders should be able return signed root zone
:raise UnresolvableRecordError: record cannot be resolved
:raise EDNS0UnsupportedError: EDNS0 is not supported by forwarder
:raise DNSSECSignatureMissingError: did not receive RRSIG for root zone
"""
ip_addr = str(ip_addr)
owner = "."
rtype = "SOA"
_validate_edns0_forwarder(owner, rtype, ip_addr, timeout=timeout)
# DNS root has to be signed
try:
ans = _resolve_record(owner, rtype, nameserver_ip=ip_addr, dnssec=True,
timeout=timeout)
except DNSException as e:
_log_response(e)
raise DNSSECSignatureMissingError(owner=owner, rtype=rtype, ip=ip_addr)
try:
ans.response.find_rrset(
ans.response.answer, dns.name.root, dns.rdataclass.IN,
dns.rdatatype.RRSIG, dns.rdatatype.SOA
)
except KeyError:
raise DNSSECSignatureMissingError(owner=owner, rtype=rtype, ip=ip_addr)
def validate_dnssec_zone_forwarder_step1(ip_addr, fwzone, timeout=10):
"""
Only forwarders in forward zones can be validated in this way
:raise UnresolvableRecordError: record cannot be resolved
:raise EDNS0UnsupportedError: ENDS0 is not supported by forwarder
"""
_validate_edns0_forwarder(fwzone, "SOA", ip_addr, timeout=timeout)
def validate_dnssec_zone_forwarder_step2(ipa_ip_addr, fwzone, timeout=10):
"""
This step must be executed after forwarders are added into LDAP, and only
when we are sure the forwarders work.
Query will be send to IPA DNS server, to verify if reply passed,
or DNSSEC validation failed.
Only forwarders in forward zones can be validated in this way
:raise UnresolvableRecordError: record cannot be resolved
:raise DNSSECValidationError: response from forwarder is not DNSSEC valid
"""
rtype = "SOA"
try:
ans_cd = _resolve_record(fwzone, rtype, nameserver_ip=ipa_ip_addr,
edns0=True, dnssec=True, flag_cd=True,
timeout=timeout)
except NXDOMAIN as e:
# sometimes CD flag is ignored and NXDomain is returned
_log_response(e)
raise DNSSECValidationError(owner=fwzone, rtype=rtype, ip=ipa_ip_addr)
except DNSException as e:
_log_response(e)
raise UnresolvableRecordError(owner=fwzone, rtype=rtype,
ip=ipa_ip_addr, error=e)
try:
ans_do = _resolve_record(fwzone, rtype, nameserver_ip=ipa_ip_addr,
edns0=True, dnssec=True, timeout=timeout)
except DNSException as e:
_log_response(e)
raise DNSSECValidationError(owner=fwzone, rtype=rtype, ip=ipa_ip_addr)
else:
if (ans_do.canonical_name == ans_cd.canonical_name
and ans_do.rrset == ans_cd.rrset):
return
# records received with and without CD flag are not equivalent:
# this might be caused by an DNSSEC validation failure in cases where
# existing zone id being 'shadowed' by another zone on forwarder
raise DNSSECValidationError(owner=fwzone, rtype=rtype, ip=ipa_ip_addr)
def validate_idna_domain(value):
"""
Validate if value is valid IDNA domain.
If domain is not valid, raises ValueError
:param value:
:return:
"""
error = None
try:
DNSName(value)
except dns.name.BadEscape:
error = _('invalid escape code in domain name')
except dns.name.EmptyLabel:
error = _('empty DNS label')
except dns.name.NameTooLong:
error = _('domain name cannot be longer than 255 characters')
except dns.name.LabelTooLong:
error = _('DNS label cannot be longer than 63 characters')
except dns.exception.SyntaxError:
error = _('invalid domain name')
else:
#compare if IDN normalized and original domain match
#there is N:1 mapping between unicode and IDNA names
#user should use normalized names to avoid mistakes
labels = re.split(u'[.\uff0e\u3002\uff61]', value, flags=re.UNICODE)
try:
for label in labels:
label.encode("ascii")
except UnicodeError:
# IDNA
is_nonnorm = any(encodings.idna.nameprep(x) != x for x in labels)
if is_nonnorm:
error = _("domain name '%(domain)s' should be normalized to"
": %(normalized)s") % {
'domain': value,
'normalized': '.'.join([encodings.idna.nameprep(x)
for x in labels])}
if error:
raise ValueError(error)
def detect_dns_zone_realm_type(api, domain):
"""
Detects the type of the realm that the given DNS zone belongs to.
Note: This method is heuristic. Possible values:
- 'current': For IPA domains belonging in the current realm.
- 'foreign': For domains belonging in a foreing kerberos realm.
- 'unknown': For domains whose allegiance could not be detected.
"""
# First, try to detect _kerberos TXT record in the domain
# This would indicate that the domain belongs to IPA realm
kerberos_prefix = DNSName('_kerberos')
domain_suffix = DNSName(domain)
kerberos_record_name = kerberos_prefix + domain_suffix
try:
result = resolve(kerberos_record_name, rdatatype.TXT)
answer = result.response.answer
# IPA domain will have only one _kerberos TXT record
if (len(answer) == 1 and
len(answer[0]) == 1 and
answer[0].rdtype == rdatatype.TXT):
record = answer[0][0]
# If the record contains our current realm, it is 'ipa-current'
if record.to_text() == '"{0}"'.format(api.env.realm):
return 'current'
else:
return 'foreign'
except DNSException:
pass
# Try to detect AD specific record in the zone.
# This would indicate that the domain belongs to foreign (AD) realm
gc_prefix = DNSName('_ldap._tcp.gc._msdcs')
ad_specific_record_name = gc_prefix + domain_suffix
try:
# The presence of this record is enough, return foreign in such case
resolve(ad_specific_record_name, rdatatype.SRV)
except DNSException:
# If we could not detect type with certainty, return unknown
return 'unknown'
else:
return 'foreign'
def has_managed_topology(api):
domainlevel = api.Command['domainlevel_get']().get('result', DOMAIN_LEVEL_0)
return domainlevel > DOMAIN_LEVEL_0
def print_replication_status(entry, verbose):
"""Pretty print nsds5replicalastinitstatus, nsds5replicalastinitend,
nsds5replicalastupdatestatus, nsds5replicalastupdateend for a
replication agreement.
"""
if verbose:
initstatus = entry.single_value.get('nsds5replicalastinitstatus')
if initstatus is not None:
print(" last init status: %s" % initstatus)
print(" last init ended: %s" % str(
ipautil.parse_generalized_time(
entry.single_value['nsds5replicalastinitend'])))
updatestatus = entry.single_value.get(
'nsds5replicalastupdatestatus'
)
if updatestatus is not None:
print(" last update status: %s" % updatestatus)
print(" last update ended: %s" % str(
ipautil.parse_generalized_time(
entry.single_value['nsds5replicalastupdateend']
))
)
class classproperty:
__slots__ = ('__doc__', 'fget')
def __init__(self, fget=None, doc=None):
assert isinstance(fget, classmethod)
if doc is None and fget is not None:
doc = fget.__doc__
self.fget = fget
self.__doc__ = doc
def __get__(self, obj, obj_type):
if self.fget is not None:
return self.fget.__get__(obj, obj_type)()
raise AttributeError("unreadable attribute")
def __set__(self, obj, value):
raise AttributeError("can't set attribute")
def __delete__(self, obj):
raise AttributeError("can't delete attribute")
def getter(self, fget):
self.fget = fget
return self
class classobjectproperty(classproperty):
# A class property that also passes the object to the getter
# obj is None for class objects and 'self' for instance objects.
__slots__ = ('__doc__',) # pylint: disable=redefined-slots-in-subclass
def __get__(self, obj, obj_type):
if self.fget is not None:
return self.fget.__get__(obj, obj_type)(obj)
raise AttributeError("unreadable attribute")
def normalize_hostname(hostname):
"""Use common fqdn form without the trailing dot"""
if hostname.endswith(u'.'):
hostname = hostname[:-1]
hostname = hostname.lower()
return hostname
def hostname_validator(ugettext, value, maxlen=255):
"""Validator used by plugins to ensure hostname compliance.
In Linux the maximum hostname length is 64. In DNS and
other operaring systems (Solaris) it is 255. If not explicitly
checking a Linux hostname (e.g. the server) use the DNS
default.
"""
try:
validate_hostname(value, maxlen=maxlen)
except ValueError as e:
return _('invalid domain-name: %s') % unicode(e)
return None
def ipaddr_validator(ugettext, ipaddr, ip_version=None):
try:
ip = netaddr.IPAddress(str(ipaddr), flags=netaddr.INET_PTON)
if ip_version is not None:
if ip.version != ip_version:
return _(
'invalid IP address version (is %(value)d, must be '
'%(required_value)d)!') % dict(
value=ip.version,
required_value=ip_version
)
except (netaddr.AddrFormatError, ValueError):
return _('invalid IP address format')
return None
def validate_bind_forwarder(ugettext, forwarder):
ip_address, sep, port = forwarder.partition(u' port ')
ip_address_validation = ipaddr_validator(ugettext, ip_address)
if ip_address_validation is not None:
return ip_address_validation
if sep:
try:
port = int(port)
if port < 0 or port > 65535:
raise ValueError()
except ValueError:
return _('%(port)s is not a valid port' % dict(port=port))
return None
def set_krbcanonicalname(entry_attrs):
objectclasses = set(i.lower() for i in entry_attrs['objectclass'])
if 'krbprincipalaux' not in objectclasses:
return
if ('krbprincipalname' in entry_attrs
and 'krbcanonicalname' not in entry_attrs):
entry_attrs['krbcanonicalname'] = entry_attrs['krbprincipalname']
def ensure_last_krbprincipalname(ldap, entry_attrs, *keys):
"""
ensure that the LDAP entry has at least one value of krbprincipalname
and that this value is equal to krbcanonicalname
:param ldap: LDAP connection object
:param entry_attrs: LDAP entry made prior to update
:param options: command options
"""
entry = ldap.get_entry(
entry_attrs.dn, ['krbcanonicalname', 'krbprincipalname'])
krbcanonicalname = entry.single_value.get('krbcanonicalname', None)
if krbcanonicalname in keys[-1]:
raise errors.ValidationError(
name='krbprincipalname',
error=_('at least one value equal to the canonical '
'principal name must be present')
)
def ensure_krbcanonicalname_set(ldap, entry_attrs):
old_entry = ldap.get_entry(
entry_attrs.dn,
['krbcanonicalname', 'krbprincipalname', 'objectclass'])
if old_entry.single_value.get('krbcanonicalname', None) is not None:
return
set_krbcanonicalname(old_entry)
old_entry.pop('krbprincipalname', None)
old_entry.pop('objectclass', None)
entry_attrs.update(old_entry)
def check_client_configuration(env=None):
"""
Check if IPA client is configured on the system.
This is a convenience wrapper that also supports using
a custom configuration via IPA_CONFDIR.
Raises a ScriptError exception if the client is not
configured.
Hardcode return code to avoid recursive imports
"""
CLIENT_NOT_CONFIGURED = 2
if env is not None and env.confdir != paths.ETC_IPA:
# custom IPA conf dir, check for custom conf_default
if os.path.isfile(env.conf_default):
return True
else:
raise ScriptError(
f'IPA client is not configured on this system (confdir '
f'{env.confdir} is missing {env.conf_default})',
CLIENT_NOT_CONFIGURED
)
if is_ipa_client_configured():
return True
else:
raise ScriptError(
'IPA client is not configured on this system',
CLIENT_NOT_CONFIGURED
)
def _collect_trust_namespaces(api_instance, add_local=False):
"""
Return UPNs and realm names of trusted forests.
:param api_instance: API instance
:param add_local: bool flag
:return: set of namespace names as strings.
If add_local is True, add own realm namesapce
"""
trust_objects = api_instance.Command.trust_find(u'', sizelimit=0)['result']
trust_suffix_namespace = set()
for obj in trust_objects:
nt_suffixes = obj.get('ipantadditionalsuffixes', [])
trust_suffix_namespace.update(
set(upn.lower() for upn in nt_suffixes))
if 'ipantflatname' in obj:
trust_suffix_namespace.add(obj['ipantflatname'][0].lower())
trust_suffix_namespace.add(obj['cn'][0].lower())
if add_local:
trust_suffix_namespace.add(api_instance.env.realm.lower())
return trust_suffix_namespace
def check_principal_realm_in_trust_namespace(api_instance, *suffixes,
attr_name='krbprincipalname'):
"""
Check that principal name's suffix does not overlap with UPNs and realm
names of trusted forests.
:param api_instance: API instance
:param suffixes: principal suffixes
:raises: ValidationError if the suffix coincides with realm name, UPN
suffix or netbios name of trusted domains
"""
trust_suffix_namespace = _collect_trust_namespaces(api_instance,
add_local=False)
for p in suffixes[-1]:
principal = Principal(p, realm=api_instance.env.realm)
realm = principal.realm
upn = principal.upn_suffix if principal.is_enterprise else None
if realm in trust_suffix_namespace or upn in trust_suffix_namespace:
raise errors.ValidationError(
name=attr_name,
error=_('realm or UPN suffix overlaps with trusted domain '
'namespace'))
def check_principal_realm_supported(api_instance, *suffixes,
attr_name='krbprincipalname'):
"""
Check that principal name's suffix does not overlap with UPNs and realm
names of trusted forests.
:param api_instance: API instance
:param suffixes: principal suffixes
:raises: ValidationError if the suffix does not match with realm name, UPN
suffix or netbios name of trusted domains or IPA domain
"""
trust_suffix_namespace = _collect_trust_namespaces(api_instance,
add_local=True)
for p in suffixes[-1]:
principal = Principal(p, realm=api_instance.env.realm)
realm = principal.realm
upn = principal.upn_suffix if principal.is_enterprise else None
conditions = [(realm.lower() not in trust_suffix_namespace),
(upn is not None and (
upn.lower() not in trust_suffix_namespace))]
if any(conditions):
raise errors.ValidationError(
name=attr_name,
error=_('realm or UPN suffix outside of supported realm '
'domains or trusted domains namespace'))
def no_matching_interface_for_ip_address_warning(addr_list):
for ip in addr_list:
if not ip.get_matching_interface():
logger.warning(
"No network interface matches the IP address %s", ip)
# fixme: once when loggers will be fixed, we can remove this
# print
print(
"WARNING: No network interface matches the IP address "
"{}".format(ip),
file=sys.stderr
)
def get_terminal_height(fd=1):
"""
Get current terminal height
Args:
fd (int): file descriptor. Default: 1 (stdout)
Returns:
int: Terminal height
"""
try:
return struct.unpack(
'hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, b'1234'))[0]
except (IOError, OSError, struct.error):
return os.environ.get("LINES", 25)
def get_pager():
""" Get path to a pager
:return: path to the file if it exists otherwise None
:rtype: str or None
"""
pager = os.environ.get('PAGER', 'less')
return shutil.which(pager)
def open_in_pager(data, pager):
"""
Open text data in pager
Args:
data (bytes): data to view in pager
pager (str): path to the pager
Returns:
None
"""
pager_process = subprocess.Popen([pager], stdin=subprocess.PIPE)
try:
pager_process.stdin.write(data)
pager_process.communicate()
except IOError:
pass
if reprlib is not None:
class APIRepr(reprlib.Repr):
builtin_types = {
bool, int, float,
str, bytes,
dict, tuple, list, set, frozenset,
type(None),
}
def __init__(self):
super(APIRepr, self).__init__()
# no limitation
for k, v in self.__dict__.items():
if isinstance(v, int):
setattr(self, k, sys.maxsize)
def repr_str(self, x, level):
"""Output with u'' prefix"""
return 'u' + repr(x)
def repr_type(self, x, level):
if x is str:
return "<type 'unicode'>"
if x in self.builtin_types:
return "<type '{}'>".format(x.__name__)
else:
return repr(x)
apirepr = APIRepr().repr
else:
apirepr = repr
| 45,962
|
Python
|
.py
| 1,136
| 32.543134
| 101
| 0.638427
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,652
|
setup.py
|
freeipa_freeipa/ipalib/setup.py
|
# Copyright (C) 2007 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""FreeIPA common python library
FreeIPA is a server for identity, policy, and audit.
"""
from os.path import abspath, dirname
import sys
if __name__ == '__main__':
# include ../ for ipasetup.py
sys.path.append(dirname(dirname(abspath(__file__))))
from ipasetup import ipasetup # noqa: E402
ipasetup(
name="ipalib",
doc=__doc__,
package_dir={'ipalib': ''},
packages=[
"ipalib",
"ipalib.install",
],
install_requires=[
"ipaplatform",
"ipapython",
"netaddr",
"pyasn1",
"pyasn1-modules",
"six",
"urllib3",
],
extras_require={
"install": ["dbus-python"], # for certmonger and resolve1
},
)
| 1,526
|
Python
|
.py
| 46
| 27.478261
| 71
| 0.642276
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,653
|
config.py
|
freeipa_freeipa/ipalib/config.py
|
# Authors:
# Martin Nagy <mnagy@redhat.com>
# Jason Gerard DeRose <jderose@redhat.com>
#
# Copyright (C) 2008 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Process-wide static configuration and environment.
The standard run-time instance of the `Env` class is initialized early in the
`ipalib` process and is then locked into a read-only state, after which no
further changes can be made to the environment throughout the remaining life
of the process.
For the per-request thread-local information, see `ipalib.request`.
"""
from __future__ import absolute_import
import os
from os import path
import sys
from urllib.parse import urlparse, urlunparse
from configparser import RawConfigParser, ParsingError
import six
from ipaplatform.tasks import tasks
from ipapython.dn import DN
from ipalib.base import check_name
from ipalib.constants import (
CONFIG_SECTION,
OVERRIDE_ERROR, SET_ERROR, DEL_ERROR,
TLS_VERSIONS, TLS_VERSION_DEFAULT_MIN, TLS_VERSION_DEFAULT_MAX,
USER_CACHE_PATH
)
from ipalib import errors
if six.PY3:
unicode = str
class Env:
"""
Store and retrieve environment variables.
First an foremost, the `Env` class provides a handy container for
environment variables. These variables can be both set *and* retrieved
either as attributes *or* as dictionary items.
For example, you can set a variable as an attribute:
>>> env = Env()
>>> env.attr = 'I was set as an attribute.'
>>> env.attr
u'I was set as an attribute.'
>>> env['attr'] # Also retrieve as a dictionary item
u'I was set as an attribute.'
Or you can set a variable as a dictionary item:
>>> env['item'] = 'I was set as a dictionary item.'
>>> env['item']
u'I was set as a dictionary item.'
>>> env.item # Also retrieve as an attribute
u'I was set as a dictionary item.'
The variable names must be valid lower-case Python identifiers that neither
start nor end with an underscore. If your variable name doesn't meet these
criteria, a ``ValueError`` will be raised when you try to set the variable
(compliments of the `base.check_name()` function). For example:
>>> env.BadName = 'Wont work as an attribute'
Traceback (most recent call last):
...
ValueError: name must match '^[a-z][_a-z0-9]*[a-z0-9]$|^[a-z]$'; got 'BadName'
>>> env['BadName'] = 'Also wont work as a dictionary item'
Traceback (most recent call last):
...
ValueError: name must match '^[a-z][_a-z0-9]*[a-z0-9]$|^[a-z]$'; got 'BadName'
The variable values can be ``str``, ``int``, or ``float`` instances, or the
``True``, ``False``, or ``None`` constants. When the value provided is an
``str`` instance, some limited automatic type conversion is performed, which
allows values of specific types to be set easily from configuration files or
command-line options.
So in addition to their actual values, the ``True``, ``False``, and ``None``
constants can be specified with an ``str`` equal to what ``repr()`` would
return. For example:
>>> env.true = True
>>> env.also_true = 'True' # Equal to repr(True)
>>> env.true
True
>>> env.also_true
True
Note that the automatic type conversion is case sensitive. For example:
>>> env.not_false = 'false' # Not equal to repr(False)!
>>> env.not_false
u'false'
If an ``str`` value looks like an integer, it's automatically converted to
the ``int`` type.
>>> env.lucky = '7'
>>> env.lucky
7
Leading and trailing white-space is automatically stripped from ``str``
values. For example:
>>> env.message = ' Hello! ' # Surrounded by double spaces
>>> env.message
u'Hello!'
>>> env.number = ' 42 ' # Still converted to an int
>>> env.number
42
>>> env.false = ' False ' # Still equal to repr(False)
>>> env.false
False
Also, empty ``str`` instances are converted to ``None``. For example:
>>> env.empty = ''
>>> env.empty is None
True
`Env` variables are all set-once (first-one-wins). Once a variable has been
set, trying to override it will raise an ``AttributeError``. For example:
>>> env.date = 'First'
>>> env.date = 'Second'
Traceback (most recent call last):
...
AttributeError: cannot override Env.date value u'First' with 'Second'
An `Env` instance can be *locked*, after which no further variables can be
set. Trying to set variables on a locked `Env` instance will also raise
an ``AttributeError``. For example:
>>> env = Env()
>>> env.okay = 'This will work.'
>>> env.__lock__()
>>> env.nope = 'This wont work!'
Traceback (most recent call last):
...
AttributeError: locked: cannot set Env.nope to 'This wont work!'
`Env` instances also provide standard container emulation for membership
testing, counting, and iteration. For example:
>>> env = Env()
>>> 'key1' in env # Has key1 been set?
False
>>> env.key1 = 'value 1'
>>> 'key1' in env
True
>>> env.key2 = 'value 2'
>>> len(env) # How many variables have been set?
2
>>> list(env) # What variables have been set?
['key1', 'key2']
Lastly, in addition to all the handy container functionality, the `Env`
class provides high-level methods for bootstraping a fresh `Env` instance
into one containing all the run-time and configuration information needed
by the built-in freeIPA plugins.
These are the `Env` bootstraping methods, in the order they must be called:
1. `Env._bootstrap()` - initialize the run-time variables and then
merge-in variables specified on the command-line.
2. `Env._finalize_core()` - merge-in variables from the configuration
files and then merge-in variables from the internal defaults, after
which at least all the standard variables will be set. After this
method is called, the plugins will be loaded, during which
third-party plugins can merge-in defaults for additional variables
they use (likely using the `Env._merge()` method).
3. `Env._finalize()` - one last chance to merge-in variables and then
the instance is locked. After this method is called, no more
environment variables can be set during the remaining life of the
process.
However, normally none of these three bootstraping methods are called
directly and instead only `plugable.API.bootstrap()` is called, which itself
takes care of correctly calling the `Env` bootstrapping methods.
"""
__locked = False
def __init__(self, **initialize):
object.__setattr__(self, '_Env__d', {})
object.__setattr__(self, '_Env__done', set())
if initialize:
self._merge(**initialize)
def __lock__(self):
"""
Prevent further changes to environment.
"""
if self.__locked is True:
raise Exception(
'%s.__lock__() already called' % self.__class__.__name__
)
object.__setattr__(self, '_Env__locked', True)
def __islocked__(self):
"""
Return ``True`` if locked.
"""
return self.__locked
def __setattr__(self, name, value):
"""
Set the attribute named ``name`` to ``value``.
This just calls `Env.__setitem__()`.
"""
self[name] = value
def __setitem__(self, key, value):
"""
Set ``key`` to ``value``.
"""
if self.__locked:
raise AttributeError(
SET_ERROR % (self.__class__.__name__, key, value)
)
check_name(key)
# pylint: disable=no-member
if key in self.__d:
raise AttributeError(OVERRIDE_ERROR %
(self.__class__.__name__, key, self.__d[key], value)
)
# pylint: enable=no-member
assert not hasattr(self, key)
if isinstance(value, str):
value = value.strip()
if isinstance(value, bytes):
value = value.decode('utf-8')
m = {
'True': True,
'False': False,
'None': None,
'': None,
}
if value in m:
value = m[value]
elif value.isdigit():
value = int(value)
elif key == 'basedn':
value = DN(value)
if type(value) not in (unicode, int, float, bool, type(None), DN):
raise TypeError(key, value)
object.__setattr__(self, key, value)
# pylint: disable=no-member
self.__d[key] = value
# pylint: enable=no-member
def __getitem__(self, key):
"""
Return the value corresponding to ``key``.
"""
return self.__d[key] # pylint: disable=no-member
def __delattr__(self, name):
"""
Raise an ``AttributeError`` (deletion is never allowed).
For example:
>>> env = Env()
>>> env.name = 'A value'
>>> del env.name
Traceback (most recent call last):
...
AttributeError: locked: cannot delete Env.name
"""
raise AttributeError(
DEL_ERROR % (self.__class__.__name__, name)
)
def __contains__(self, key):
"""
Return True if instance contains ``key``; otherwise return False.
"""
return key in self.__d # pylint: disable=no-member
def __len__(self):
"""
Return number of variables currently set.
"""
return len(self.__d) # pylint: disable=no-member
def __iter__(self):
"""
Iterate through keys in ascending order.
"""
for key in sorted(self.__d): # pylint: disable=no-member
yield key
def _merge(self, **kw):
"""
Merge variables from ``kw`` into the environment.
Any variables in ``kw`` that have already been set will be ignored
(meaning this method will *not* try to override them, which would raise
an exception).
This method returns a ``(num_set, num_total)`` tuple containing first
the number of variables that were actually set, and second the total
number of variables that were provided.
For example:
>>> env = Env()
>>> env._merge(one=1, two=2)
(2, 2)
>>> env._merge(one=1, three=3)
(1, 2)
>>> env._merge(one=1, two=2, three=3)
(0, 3)
Also see `Env._merge_from_file()`.
:param kw: Variables provides as keyword arguments.
"""
i = 0
for (key, value) in kw.items():
if key not in self:
self[key] = value
i += 1
return (i, len(kw))
def _merge_from_file(self, config_file):
"""
Merge variables from ``config_file`` into the environment.
Any variables in ``config_file`` that have already been set will be
ignored (meaning this method will *not* try to override them, which
would raise an exception).
If ``config_file`` does not exist or is not a regular file, or if there
is an error parsing ``config_file``, ``None`` is returned.
Otherwise this method returns a ``(num_set, num_total)`` tuple
containing first the number of variables that were actually set, and
second the total number of variables found in ``config_file``.
Also see `Env._merge()`.
:param config_file: Path of the configuration file to load.
"""
if not path.isfile(config_file):
return None
parser = RawConfigParser()
try:
parser.read(config_file)
except ParsingError:
return None
if not parser.has_section(CONFIG_SECTION):
parser.add_section(CONFIG_SECTION)
items = parser.items(CONFIG_SECTION)
if len(items) == 0:
return 0, 0
i = 0
for (key, value) in items:
if key not in self:
self[key] = value
i += 1
if 'config_loaded' not in self: # we loaded at least 1 file
self['config_loaded'] = True
return i, len(items)
def _join(self, key, *parts):
"""
Append path components in ``parts`` to base path ``self[key]``.
For example:
>>> env = Env()
>>> env.home = '/people/joe'
>>> env._join('home', 'Music', 'favourites')
u'/people/joe/Music/favourites'
"""
if key in self and self[key] is not None:
return path.join(self[key], *parts)
else:
return None
def __doing(self, name):
# pylint: disable=no-member
if name in self.__done:
raise Exception(
'%s.%s() already called' % (self.__class__.__name__, name)
)
self.__done.add(name)
def __do_if_not_done(self, name):
if name not in self.__done: # pylint: disable=no-member
getattr(self, name)()
def _isdone(self, name):
return name in self.__done # pylint: disable=no-member
def _bootstrap(self, **overrides):
"""
Initialize basic environment.
This method will perform the following steps:
1. Initialize certain run-time variables. These run-time variables
are strictly determined by the external environment the process
is running in; they cannot be specified on the command-line nor
in the configuration files.
2. Merge-in the variables in ``overrides`` by calling
`Env._merge()`. The intended use of ``overrides`` is to merge-in
variables specified on the command-line.
3. Intelligently fill-in the *in_tree*, *context*, *conf*, and
*conf_default* variables if they haven't been set already.
Also see `Env._finalize_core()`, the next method in the bootstrap
sequence.
:param overrides: Variables specified via command-line options.
"""
self.__doing('_bootstrap')
# Set run-time variables (cannot be overridden):
self.ipalib = path.dirname(path.abspath(__file__))
self.site_packages = path.dirname(self.ipalib)
self.script = path.abspath(sys.argv[0])
self.bin = path.dirname(self.script)
home = os.path.expanduser('~')
self.home = home if not home.startswith('~') else None
self.fips_mode = tasks.is_fips_enabled()
# Merge in overrides:
self._merge(**overrides)
# Determine if running in source tree. The root directory of
# IPA source directory contains ipasetup.py.in.
if 'in_tree' not in self:
self.in_tree = os.path.isfile(
os.path.join(self.site_packages, "ipasetup.py.in")
)
if self.in_tree and 'mode' not in self:
self.mode = 'developer'
# Set dot_ipa:
if 'dot_ipa' not in self:
self.dot_ipa = self._join('home', '.ipa')
# Set context
if 'context' not in self:
self.context = 'default'
# Set confdir:
self.env_confdir = os.environ.get('IPA_CONFDIR')
if 'confdir' in self and self.env_confdir is not None:
raise errors.EnvironmentError(
"IPA_CONFDIR env cannot be set because explicit confdir "
"is used")
if 'confdir' not in self:
if self.env_confdir is not None:
if (not path.isabs(self.env_confdir)
or not path.isdir(self.env_confdir)):
raise errors.EnvironmentError(
"IPA_CONFDIR env var must be an absolute path to an "
"existing directory, got '{}'.".format(
self.env_confdir))
self.confdir = self.env_confdir
elif self.in_tree:
self.confdir = self.dot_ipa
else:
self.confdir = path.join('/', 'etc', 'ipa')
# Set conf (config file for this context):
if 'conf' not in self:
self.conf = self._join('confdir', '%s.conf' % self.context)
# Set conf_default (default base config used in all contexts):
if 'conf_default' not in self:
self.conf_default = self._join('confdir', 'default.conf')
if 'nss_dir' not in self:
self.nss_dir = self._join('confdir', 'nssdb')
# user cache dir for IPA, defaults to '$XDG_CACHE_HOME/ipa' or
# '~/.cache/ipa'. XDG_CACHE_HOME env var is cached at import time.
if 'cache_dir' not in self:
self.cache_dir = os.path.join(USER_CACHE_PATH, 'ipa')
if 'tls_ca_cert' not in self:
self.tls_ca_cert = self._join('confdir', 'ca.crt')
# having tls_ca_cert an absolute path could help us extending this
# in the future for different certificate providers simply by adding
# a prefix to the path
if not path.isabs(self.tls_ca_cert):
raise errors.EnvironmentError(
"tls_ca_cert has to be an absolute path to a CA certificate, "
"got '{}'".format(self.tls_ca_cert))
# Set plugins_on_demand:
if 'plugins_on_demand' not in self:
self.plugins_on_demand = (self.context == 'cli')
def _finalize_core(self, **defaults):
"""
Complete initialization of standard IPA environment.
This method will perform the following steps:
1. Call `Env._bootstrap()` if it hasn't already been called.
2. Merge-in variables from the configuration file ``self.conf``
(if it exists) by calling `Env._merge_from_file()`.
3. Merge-in variables from the defaults configuration file
``self.conf_default`` (if it exists) by calling
`Env._merge_from_file()`.
4. Intelligently fill-in the *in_server* , *logdir*, *log*, and
*jsonrpc_uri* variables if they haven't already been set.
5. Merge-in the variables in ``defaults`` by calling `Env._merge()`.
In normal circumstances ``defaults`` will simply be those
specified in `constants.DEFAULT_CONFIG`.
After this method is called, all the environment variables used by all
the built-in plugins will be available. As such, this method should be
called *before* any plugins are loaded.
After this method has finished, the `Env` instance is still writable
so that 3rd-party plugins can set variables they may require as the
plugins are registered.
Also see `Env._finalize()`, the final method in the bootstrap sequence.
:param defaults: Internal defaults for all built-in variables.
"""
self.__doing('_finalize_core')
self.__do_if_not_done('_bootstrap')
# Merge in context config file and then default config file:
mode = self.__d.get('mode') # pylint: disable=no-member
# documented public modes: production, developer
# internal modes: dummy, unit_test
if mode != 'dummy':
self._merge_from_file(self.conf)
self._merge_from_file(self.conf_default)
# Determine if in_server:
if 'in_server' not in self:
self.in_server = (self.context == 'server')
# Set logdir:
if 'logdir' not in self:
if self.in_tree or not self.in_server:
self.logdir = self._join('dot_ipa', 'log')
else:
self.logdir = path.join('/', 'var', 'log', 'ipa')
# Set log file:
if 'log' not in self:
self.log = self._join('logdir', '%s.log' % self.context)
# Workaround for ipa-server-install --uninstall. When no config file
# is available, we set realm, domain, and basedn to RFC 2606 reserved
# suffix to suppress attribute errors during uninstallation.
if (self.in_server and self.context == 'installer' and
not getattr(self, 'config_loaded', False)):
if 'realm' not in self:
self.realm = 'UNCONFIGURED.INVALID'
if 'domain' not in self:
self.domain = self.realm.lower()
if 'basedn' not in self and 'domain' in self:
self.basedn = DN(*(('dc', dc) for dc in self.domain.split('.')))
# Derive xmlrpc_uri from server
# (Note that this is done before deriving jsonrpc_uri from xmlrpc_uri
# and server from jsonrpc_uri so that when only server or xmlrpc_uri
# is specified, all 3 keys have a value.)
if 'xmlrpc_uri' not in self and 'server' in self:
self.xmlrpc_uri = 'https://{}/ipa/xml'.format(self.server)
# Derive ldap_uri from server
if 'ldap_uri' not in self and 'server' in self:
self.ldap_uri = 'ldap://{}'.format(self.server)
# Derive jsonrpc_uri from xmlrpc_uri
if 'jsonrpc_uri' not in self:
if 'xmlrpc_uri' in self:
xmlrpc_uri = self.xmlrpc_uri
else:
xmlrpc_uri = defaults.get('xmlrpc_uri')
if xmlrpc_uri:
(scheme, netloc, uripath, params, query, fragment
) = urlparse(xmlrpc_uri)
uripath = uripath.replace('/xml', '/json', 1)
self.jsonrpc_uri = urlunparse((
scheme, netloc, uripath, params, query, fragment))
if 'server' not in self:
if 'jsonrpc_uri' in self:
jsonrpc_uri = self.jsonrpc_uri
else:
jsonrpc_uri = defaults.get('jsonrpc_uri')
if jsonrpc_uri:
parsed = urlparse(jsonrpc_uri)
self.server = parsed.netloc
self._merge(**defaults)
# set the best known TLS version if min/max versions are not set
if 'tls_version_min' not in self:
self.tls_version_min = TLS_VERSION_DEFAULT_MIN
if (
self.tls_version_min is not None and
self.tls_version_min not in TLS_VERSIONS
):
raise errors.EnvironmentError(
"Unknown TLS version '{ver}' set in tls_version_min."
.format(ver=self.tls_version_min))
if 'tls_version_max' not in self:
self.tls_version_max = TLS_VERSION_DEFAULT_MAX
if (
self.tls_version_max is not None and
self.tls_version_max not in TLS_VERSIONS
):
raise errors.EnvironmentError(
"Unknown TLS version '{ver}' set in tls_version_max."
.format(ver=self.tls_version_max))
if (
self.tls_version_min is not None and
self.tls_version_max is not None and
self.tls_version_max < self.tls_version_min
):
raise errors.EnvironmentError(
"tls_version_min is set to a higher TLS version than "
"tls_version_max.")
def _finalize(self, **lastchance):
"""
Finalize and lock environment.
This method will perform the following steps:
1. Call `Env._finalize_core()` if it hasn't already been called.
2. Merge-in the variables in ``lastchance`` by calling
`Env._merge()`.
3. Lock this `Env` instance, after which no more environment
variables can be set on this instance. Aside from unit-tests
and example code, normally only one `Env` instance is created,
which means that after this step, no more variables can be set
during the remaining life of the process.
This method should be called after all plugins have been loaded and
after `plugable.API.finalize()` has been called.
:param lastchance: Any final variables to merge-in before locking.
"""
self.__doing('_finalize')
self.__do_if_not_done('_finalize_core')
self._merge(**lastchance)
self.__lock__()
| 25,064
|
Python
|
.py
| 568
| 34.651408
| 82
| 0.598523
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,654
|
ipajson.py
|
freeipa_freeipa/ipalib/ipajson.py
|
#
# Copyright (C) 2024 FreeIPA Contributors see COPYING for license
#
import base64
from cryptography import x509 as crypto_x509
import datetime
from decimal import Decimal
import json
import six
from ipalib.constants import LDAP_GENERALIZED_TIME_FORMAT
from ipalib import capabilities
from ipalib.x509 import Encoding as x509_Encoding
from ipapython.dn import DN
from ipapython.dnsutil import DNSName
from ipapython.kerberos import Principal
if six.PY3:
unicode = str
class _JSONPrimer(dict):
"""Fast JSON primer and pre-converter
Prepare a data structure for JSON serialization. In an ideal world, priming
could be handled by the default hook of json.dumps(). Unfortunately the
hook treats Python 2 str as text while IPA considers str as bytes.
The primer uses a couple of tricks to archive maximum performance:
* O(1) type look instead of O(n) chain of costly isinstance() calls
* __missing__ and __mro__ with caching to handle subclasses
* inline code with minor code duplication (func lookup in enc_list/dict)
* avoid surplus function calls (e.g. func is _identity, obj.__class__
instead if type(obj))
* function default arguments to turn global into local lookups
* avoid re-creation of bound method objects (e.g. result.append)
* on-demand lookup of client capabilities with cached values
Depending on the client version number, the primer converts:
* bytes -> {'__base64__': b64encode}
* datetime -> {'__datetime__': LDAP_GENERALIZED_TIME}
* DNSName -> {'__dns_name__': unicode}
The _ipa_obj_hook() functions unserializes the marked JSON objects to
bytes, datetime and DNSName.
:see: _ipa_obj_hook
"""
__slots__ = ('version', '_cap_datetime', '_cap_dnsname')
_identity = object()
def __init__(self, version, _identity=_identity):
super(_JSONPrimer, self).__init__()
self.version = version
self._cap_datetime = None
self._cap_dnsname = None
self.update({
unicode: _identity,
bool: _identity,
int: _identity,
type(None): _identity,
float: _identity,
Decimal: unicode,
DN: str,
Principal: unicode,
DNSName: self._enc_dnsname,
datetime.datetime: self._enc_datetime,
bytes: self._enc_bytes,
list: self._enc_list,
tuple: self._enc_list,
dict: self._enc_dict,
crypto_x509.Certificate: self._enc_certificate,
crypto_x509.CertificateSigningRequest: self._enc_certificate,
})
def __missing__(self, typ):
# walk MRO to find best match
for c in typ.__mro__:
if c in self:
self[typ] = self[c]
return self[c]
# use issubclass to check for registered ABCs
for c in self:
if issubclass(typ, c):
self[typ] = self[c]
return self[c]
raise TypeError(typ)
def convert(self, obj, _identity=_identity):
# obj.__class__ is twice as fast as type(obj)
func = self[obj.__class__]
return obj if func is _identity else func(obj)
def _enc_datetime(self, val):
cap = self._cap_datetime
if cap is None:
cap = capabilities.client_has_capability(self.version,
'datetime_values')
self._cap_datetime = cap
if cap:
return {'__datetime__': val.strftime(LDAP_GENERALIZED_TIME_FORMAT)}
else:
return val.strftime(LDAP_GENERALIZED_TIME_FORMAT)
def _enc_dnsname(self, val):
cap = self._cap_dnsname
if cap is None:
cap = capabilities.client_has_capability(self.version,
'dns_name_values')
self._cap_dnsname = cap
if cap:
return {'__dns_name__': unicode(val)}
else:
return unicode(val)
def _enc_bytes(self, val):
encoded = base64.b64encode(val)
if not six.PY2:
encoded = encoded.decode('ascii')
return {'__base64__': encoded}
def _enc_list(self, val, _identity=_identity):
result = []
append = result.append
for v in val:
func = self[v.__class__]
append(v if func is _identity else func(v))
return result
def _enc_dict(self, val, _identity=_identity, _iteritems=six.iteritems):
result = {}
for k, v in _iteritems(val):
func = self[v.__class__]
result[k] = v if func is _identity else func(v)
return result
def _enc_certificate(self, val):
return self._enc_bytes(val.public_bytes(x509_Encoding.DER))
def json_encode_binary(val, version, pretty_print=False):
"""Serialize a Python object structure to JSON
:param object val: Python object structure
:param str version: client version
:param bool pretty_print: indent and sort JSON (warning: slow!)
:return: text
:note: pretty printing triggers a slow path in Python's JSON module. Only
use pretty_print in debug mode.
"""
result = _JSONPrimer(version).convert(val)
if pretty_print:
return json.dumps(result, indent=4, sort_keys=True)
else:
return json.dumps(result)
def _ipa_obj_hook(dct, _iteritems=six.iteritems, _list=list):
"""JSON object hook
:see: _JSONPrimer
"""
if '__base64__' in dct:
return base64.b64decode(dct['__base64__'])
elif '__datetime__' in dct:
return datetime.datetime.strptime(dct['__datetime__'],
LDAP_GENERALIZED_TIME_FORMAT)
elif '__dns_name__' in dct:
return DNSName(dct['__dns_name__'])
else:
# XXX tests assume tuples. Is this really necessary?
for k, v in _iteritems(dct):
if v.__class__ is _list:
dct[k] = tuple(v)
return dct
def json_decode_binary(val):
"""Convert serialized JSON string back to Python data structure
:param val: JSON string
:type val: str, bytes
:return: Python data structure
:see: _ipa_obj_hook, _JSONPrimer
"""
if isinstance(val, bytes):
val = val.decode('utf-8')
return json.loads(val, object_hook=_ipa_obj_hook)
| 6,402
|
Python
|
.py
| 161
| 31.161491
| 79
| 0.614493
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,655
|
parameters.py
|
freeipa_freeipa/ipalib/parameters.py
|
# Authors:
# Jason Gerard DeRose <jderose@redhat.com>
#
# Copyright (C) 2008 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Parameter system for command plugins.
A `Param` instance can be used to describe an argument or option that a command
takes, or an attribute that a command returns. The `Param` base class is not
used directly, but there are many subclasses for specific Python data types
(like `Str` or `Int`) and specific properties (like `Password`).
To create a `Param` instance, you must always provide the parameter *name*,
which should be the LDAP attribute name if the parameter describes the attribute
of an LDAP entry. For example, we could create an `Str` instance describing the user's last-name attribute like this:
>>> from ipalib import Str
>>> sn = Str('sn')
>>> sn.name
'sn'
When creating a `Param`, there are also a number of optional kwargs which
which can provide additional meta-data and functionality. For example, every
parameter has a *cli_name*, the name used on the command-line-interface. By
default the *cli_name* is the same as the *name*:
>>> sn.cli_name
'sn'
But often the LDAP attribute name isn't user friendly for the command-line, so
you can override this with the *cli_name* kwarg:
>>> sn = Str('sn', cli_name='last')
>>> sn.name
'sn'
>>> sn.cli_name
'last'
Note that the RPC interfaces (and the internal processing pipeline) always use
the parameter *name*, regardless of what the *cli_name* might be.
A `Param` also has two translatable kwargs: *label* and *doc*. These must both
be `Gettext` instances. They both default to a place-holder `FixMe` instance,
a subclass of `Gettext` used to mark a missing translatable string:
>>> sn.label
FixMe('sn')
>>> sn.doc
FixMe('sn')
The *label* is a short phrase describing the parameter. It's used on the CLI
when interactively prompting for values, and as a label for form inputs in the
web-UI. The *label* should start with an initial capital. For example:
>>> from ipalib import _
>>> sn = Str('sn',
... cli_name='last',
... label=_('Last name'),
... )
>>> sn.label
Gettext('Last name', domain='ipa', localedir=None)
The *doc* is a longer description of the parameter. It's used on the CLI when
displaying the help information for a command, and as extra instruction for a
form input on the web-UI. By default the *doc* is the same as the *label*:
>>> sn.doc
Gettext('Last name', domain='ipa', localedir=None)
But you can override this with the *doc* kwarg. Like the *label*, the *doc*
should also start with an initial capital and should not end with any
punctuation. For example:
>>> sn = Str('sn',
... cli_name='last',
... label=_('Last name'),
... doc=_("The user's last name"),
... )
>>> sn.doc
Gettext("The user's last name", domain='ipa', localedir=None)
Demonstration aside, you should always provide at least the *label* so the
various UIs are translatable. Only provide the *doc* if the parameter needs
a more detailed description for clarity.
"""
import re
import decimal
import base64
import datetime
import inspect
import typing
from xmlrpc.client import MAXINT, MININT
import six
from cryptography import x509 as crypto_x509
import dns.name
from ipalib.text import _ as ugettext
from ipalib.base import check_name
from ipalib.plugable import ReadOnly, lock
from ipalib.errors import ConversionError, RequirementError, ValidationError
from ipalib.errors import (
PasswordMismatch, Base64DecodeError, CertificateFormatError,
CertificateOperationError
)
from ipalib.constants import TYPE_ERROR, CALLABLE_ERROR, LDAP_GENERALIZED_TIME_FORMAT
from ipalib.text import Gettext, FixMe
from ipalib.util import json_serialize, validate_idna_domain
from ipalib.x509 import (
load_der_x509_certificate, IPACertificate, default_backend)
from ipalib.util import strip_csr_header, apirepr
from ipapython import kerberos
from ipapython.dn import DN
from ipapython.dnsutil import DNSName
MAX_UINT32 = (1 << 32) - 1
# JavaScript Number.MAX_SAFE_INTEGER / Number.MIN_SAFE_INTEGER
# JSON cannot safely encode values outside this range as regular number
MAX_SAFE_INTEGER = (2**53) - 1
MIN_SAFE_INTEGER = -MAX_SAFE_INTEGER
def _is_null(value):
if value:
return False
elif isinstance(value, (int, float, decimal.Decimal)):
# 0 is not NULL
return False
else:
return True
if six.PY3:
unicode = str
class DefaultFrom(ReadOnly):
"""
Derive a default value from other supplied values.
For example, say you wanted to create a default for the user's login from
the user's first and last names. It could be implemented like this:
>>> login = DefaultFrom(lambda first, last: first[0] + last)
>>> login(first='John', last='Doe')
'JDoe'
If you do not explicitly provide keys when you create a `DefaultFrom`
instance, the keys are implicitly derived from your callback by
inspecting ``callback.func_code.co_varnames``. The keys are available
through the ``DefaultFrom.keys`` instance attribute, like this:
>>> login.keys
('first', 'last')
The callback is available through the ``DefaultFrom.callback`` instance
attribute, like this:
>>> login.callback # doctest:+ELLIPSIS
<function <lambda> at 0x...>
>>> login.callback.func_code.co_varnames # The keys
('first', 'last')
The keys can be explicitly provided as optional positional arguments after
the callback. For example, this is equivalent to the ``login`` instance
above:
>>> login2 = DefaultFrom(lambda a, b: a[0] + b, 'first', 'last')
>>> login2.keys
('first', 'last')
>>> login2.callback.func_code.co_varnames # Not the keys
('a', 'b')
>>> login2(first='John', last='Doe')
'JDoe'
If any keys are missing when calling your `DefaultFrom` instance, your
callback is not called and ``None`` is returned. For example:
>>> login(first='John', lastname='Doe') is None
True
>>> login() is None
True
Any additional keys are simply ignored, like this:
>>> login(last='Doe', first='John', middle='Whatever')
'JDoe'
As above, because `DefaultFrom.__call__` takes only pure keyword
arguments, they can be supplied in any order.
Of course, the callback need not be a ``lambda`` expression. This third
example is equivalent to both the ``login`` and ``login2`` instances
above:
>>> def get_login(first, last):
... return first[0] + last
...
>>> login3 = DefaultFrom(get_login)
>>> login3.keys
('first', 'last')
>>> login3.callback.func_code.co_varnames
('first', 'last')
>>> login3(first='John', last='Doe')
'JDoe'
"""
def __init__(self, callback, *keys):
"""
:param callback: The callable to call when all keys are present.
:param keys: Optional keys used for source values.
"""
if not callable(callback):
raise TypeError(
CALLABLE_ERROR % ('callback', callback, type(callback))
)
self.callback = callback
if len(keys) == 0:
fc = callback.__code__
if fc.co_flags & 0x0c:
raise ValueError("callback: variable-length argument list not allowed")
self.keys = fc.co_varnames[:fc.co_argcount]
else:
self.keys = keys
for key in self.keys:
if type(key) is not str:
raise TypeError(
TYPE_ERROR % ('keys', str, key, type(key))
)
lock(self)
def __repr__(self):
args = tuple(repr(k) for k in self.keys)
return '%s(%s)' % (
self.__class__.__name__,
', '.join(args)
)
def __call__(self, **kw):
"""
Call the callback if all keys are present.
If all keys are present, the callback is called and its return value is
returned. If any keys are missing, ``None`` is returned.
:param kw: The keyword arguments.
"""
vals = tuple(kw.get(k, None) for k in self.keys)
if None in vals:
return None
try:
return self.callback(*vals)
except Exception:
pass
return None
def __json__(self):
return self.keys
def parse_param_spec(spec):
"""
Parse shorthand ``spec`` into to ``(name, kw)``.
The ``spec`` string determines the parameter name, whether the parameter is
required, and whether the parameter is multivalue according the following
syntax:
====== ===== ======== ==========
Spec Name Required Multivalue
====== ===== ======== ==========
'var' 'var' True False
'var?' 'var' False False
'var*' 'var' False True
'var+' 'var' True True
====== ===== ======== ==========
For example,
>>> parse_param_spec('login')
('login', {'required': True, 'multivalue': False})
>>> parse_param_spec('gecos?')
('gecos', {'required': False, 'multivalue': False})
>>> parse_param_spec('telephone_numbers*')
('telephone_numbers', {'required': False, 'multivalue': True})
>>> parse_param_spec('group+')
('group', {'required': True, 'multivalue': True})
:param spec: A spec string.
"""
if type(spec) is not str:
raise TypeError(
TYPE_ERROR % ('spec', str, spec, type(spec))
)
_map = {
'?': dict(required=False, multivalue=False),
'*': dict(required=False, multivalue=True),
'+': dict(required=True, multivalue=True),
}
end = spec[-1]
if end in _map:
return (spec[:-1], _map[end])
return (spec, dict(required=True, multivalue=False))
__messages = set()
def _(message):
__messages.add(message)
return message
class Param(ReadOnly):
"""
Base class for all parameters.
Param attributes:
=================
The behavior of Param class and subclasses can be controlled using the
following set of attributes:
- cli_name: option name in CLI
- cli_short_name: one character version of cli_name
- deprecated_cli_aliases: deprecated CLI aliases
- label: very short description of the parameter. This value is used in
when the Command output is printed to CLI or in a Command help
- doc: parameter long description used in help
- required: the parameter is marked as required for given Command
- multivalue: indicates if the attribute is multivalued
- primary_key: Command's parameter primary key is used for unique
identification of an LDAP object and for sorting
- normalizer: a custom function for Param value normalization
- default_from: a custom function for generating default values of
parameter instance
- autofill: by default, only `required` parameters get a default value
from the default_from function. When autofill is enabled, optional
attributes get the default value filled too
- query: this attribute is controlled by framework. When the `query`
is enabled, framework assumes that the value is only queried and not
inserted in the LDAP. Validation is then relaxed - custom
parameter validators are skipped and only basic class validators are
executed to check the parameter value
- attribute: this attribute is controlled by framework and enabled for
all LDAP objects parameters (unless parameter has "virtual_attribute"
flag). All parameters with enabled `attribute` are being encoded and
placed to an entry passed to LDAP Create/Update calls
- include: a list of contexts where this parameter should be included.
`Param.use_in_context()` provides further information.
- exclude: a list of contexts where this parameter should be excluded.
`Param.use_in_context()` provides further information.
- flags: there are several flags that can be used to further tune the
parameter behavior:
* no_display (Output parameters only): do not display the parameter
* no_create: do not include the parameter for crud.Create based
commands
* no_update: do not include the parameter for crud.Update based
commands
* no_search: do not include the parameter for crud.Search based
commands
* no_option: this attribute is not displayed in the CLI, usually
because there's a better way of setting it (for example, a
separate command)
* virtual_attribute: the parameter is not stored physically in the
LDAP and thus attribute `attribute` is not enabled
* suppress_empty (Output parameters only): do not display parameter
value when empty
* ask_create: CLI asks for parameter value even when the parameter
is not `required`. Applied for all crud.Create based commands
* ask_update: CLI asks for parameter value even when the parameter
is not `required`. Applied for all crud.Update based commands
* req_update: The parameter is `required` in all crud.Update based
commands
* nonempty: This is an internal flag; a required attribute should
be used instead of it.
The value of this parameter must not be empty, but it may
not be given at all. All crud.Update commands automatically
convert required parameters to `nonempty` ones, so the value
can be unspecified (unchanged) but cannot be deleted.
* optional_create: do not require the parameter for crud.Create
based commands
* allow_mod_for_managed_permission: permission-mod allows changing
the parameter for managed permissions
- hint: this attribute is currently not used
- alwaysask: when enabled, CLI asks for parameter value even when the
parameter is not `required`
- sortorder: used to sort a list of parameters for Command. See
`Command.finalize()` for further information
- confirm: if password, ask for confirmation
"""
# This is a dummy type so that most of the functionality of Param can be
# unit tested directly without always creating a subclass; however, a real
# (direct) subclass must *always* override this class attribute.
# If multiple types are permitted, set `type` to the canonical type and
# `allowed_types` to a tuple of all allowed types.
type = type(None) # Ouch, this wont be very useful in the real world!
# Subclasses should override this with something more specific:
type_error = _('incorrect type')
# _convert_scalar operates only on scalar values
scalar_error = _('Only one value is allowed')
password = False
kwargs = (
('cli_name', str, None),
('cli_short_name', str, None),
('deprecated_cli_aliases', frozenset, frozenset()),
('label', (str, Gettext), None),
('doc', (str, Gettext), None),
('required', bool, True),
('multivalue', bool, False),
('primary_key', bool, False),
('normalizer', callable, None),
('default_from', DefaultFrom, None),
('autofill', bool, False),
('query', bool, False),
('attribute', bool, False),
('include', frozenset, None),
('exclude', frozenset, None),
('flags', frozenset, frozenset()),
('hint', (str, Gettext), None),
('alwaysask', bool, False),
('sortorder', int, 2), # see finalize()
('option_group', unicode, None),
('cli_metavar', str, None),
('no_convert', bool, False),
('deprecated', bool, False),
('confirm', bool, True),
# The 'default' kwarg gets appended in Param.__init__():
# ('default', self.type, None),
)
@property
def allowed_types(self):
"""The allowed datatypes for this Param"""
return (self.type,)
def __init__(self, name, *rules, **kw):
# Merge in kw from parse_param_spec():
(name, kw_from_spec) = parse_param_spec(name)
check_name(name)
if 'required' not in kw:
kw['required'] = kw_from_spec['required']
if 'multivalue' not in kw:
kw['multivalue'] = kw_from_spec['multivalue']
# Add 'default' to self.kwargs
if kw.get('multivalue', True):
self.kwargs += (('default', tuple, None),)
else:
self.kwargs += (('default', self.type, None),)
# Wrap 'default_from' in a DefaultFrom if not already:
df = kw.get('default_from')
if callable(df) and not isinstance(df, DefaultFrom):
kw['default_from'] = DefaultFrom(df)
# Perform type validation on kw:
for (key, kind, default) in self.kwargs:
value = kw.get(key)
if value is not None:
if kind in (tuple, frozenset):
if type(value) in (list, tuple, set, frozenset):
value = kind(value)
elif type(value) is str:
value = kind([value])
if kind is callable and not callable(value):
raise TypeError(
CALLABLE_ERROR % (key, value, type(value))
)
elif (isinstance(kind, (type, tuple)) and
not isinstance(value, kind)):
raise TypeError(
TYPE_ERROR % (key, kind, value, type(value))
)
kw[key] = value
elif key not in ('required', 'multivalue'):
kw.pop(key, None)
# We keep these values to use in __repr__():
if kw['required']:
if kw['multivalue']:
self.param_spec = name + '+'
else:
self.param_spec = name
else:
if kw['multivalue']:
self.param_spec = name + '*'
else:
self.param_spec = name + '?'
self.__kw = dict(kw)
del self.__kw['required']
del self.__kw['multivalue']
self.name = name
self.nice = '%s(%r)' % (self.__class__.__name__, self.param_spec)
# Make sure no unknown kw were given:
assert all(isinstance(t, type) for t in self.allowed_types)
if not set(t[0] for t in self.kwargs).issuperset(self.__kw):
extra = set(kw) - set(t[0] for t in self.kwargs)
raise TypeError(
'%s: takes no such kwargs: %s' % (self.nice,
', '.join(repr(k) for k in sorted(extra))
)
)
# We keep this copy with merged values also to use when cloning:
self.__clonekw = dict(kw)
# Merge in default for 'cli_name', label, doc if not given:
if kw.get('cli_name') is None:
kw['cli_name'] = self.name
if kw.get('cli_metavar') is None:
kw['cli_metavar'] = self.__class__.__name__.upper()
if kw.get('label') is None:
kw['label'] = FixMe(self.name)
if kw.get('doc') is None:
kw['doc'] = kw['label']
# Add in class rules:
class_rules = []
for (key, kind, default) in self.kwargs:
value = kw.get(key, default)
if hasattr(self, key):
raise ValueError('kwarg %r conflicts with attribute on %s' % (
key, self.__class__.__name__)
)
setattr(self, key, value)
rule_name = '_rule_%s' % key
if value is not None and hasattr(self, rule_name):
class_rules.append(getattr(self, rule_name))
check_name(self.cli_name)
# Check that only 'include' or 'exclude' was provided:
if None not in (self.include, self.exclude):
raise ValueError(
'%s: cannot have both %s=%r and %s=%r' % (
self.nice,
'include', self.include,
'exclude', self.exclude,
)
)
# Check that all the rules are callable
self.class_rules = tuple(class_rules)
self.rules = rules
if self.query: # pylint: disable=using-constant-test
# by definition a query enforces no class or parameter rules
self.all_rules = ()
else:
self.all_rules = self.class_rules + self.rules
for rule in self.all_rules:
if not callable(rule):
raise TypeError(
'%s: rules must be callable; got %r' % (self.nice, rule)
)
# Check that cli_short_name is only 1 character long:
if not (self.cli_short_name is None or len(self.cli_short_name) == 1):
raise ValueError(
'%s: cli_short_name can only be a single character: %s' % (
self.nice, self.cli_short_name)
)
# And we're done.
lock(self)
def __repr__(self):
"""
Return an expresion that could construct this `Param` instance.
"""
return '%s(%s)' % (
self.__class__.__name__,
', '.join(self.__repr_iter())
)
def __repr_iter(self):
yield repr(self.param_spec)
for rule in self.rules:
yield rule.__name__
for key in sorted(self.__kw):
value = self.__kw[key]
if callable(value) and hasattr(value, '__name__'):
value = value.__name__
elif isinstance(value, int):
value = str(value)
elif isinstance(value, (tuple, set, frozenset)):
value = apirepr(list(value))
elif key == 'cli_name':
# always represented as native string
value = repr(value)
else:
value = apirepr(value)
yield '%s=%s' % (key, value)
def __call__(self, value, **kw):
"""
One stop shopping.
"""
if _is_null(value):
value = self.get_default(**kw)
else:
value = self.convert(self.normalize(value))
return value
def get_param_name(self):
"""
Return the right name of an attribute depending on usage.
Normally errors should use cli_name, our "friendly" name. When
using the API directly or *attr return the real name.
"""
name = self.cli_name
if not name:
name = self.name
return name
def kw(self):
"""
Iterate through ``(key,value)`` for all kwargs passed to constructor.
"""
for key in sorted(self.__kw):
value = self.__kw[key]
if callable(value) and hasattr(value, '__name__'):
value = value.__name__
yield (key, value)
def use_in_context(self, env):
"""
Return ``True`` if this parameter should be used in ``env.context``.
If a parameter is created with niether the ``include`` nor the
``exclude`` kwarg, this method will always return ``True``. For
example:
>>> from ipalib.config import Env
>>> param = Param('my_param')
>>> param.use_in_context(Env(context='foo'))
True
>>> param.use_in_context(Env(context='bar'))
True
If a parameter is created with an ``include`` kwarg, this method will
only return ``True`` if ``env.context`` is in ``include``. For example:
>>> param = Param('my_param', include=['foo', 'whatever'])
>>> param.include
frozenset(['foo', 'whatever'])
>>> param.use_in_context(Env(context='foo'))
True
>>> param.use_in_context(Env(context='bar'))
False
If a paremeter is created with an ``exclude`` kwarg, this method will
only return ``True`` if ``env.context`` is not in ``exclude``. For
example:
>>> param = Param('my_param', exclude=['foo', 'whatever'])
>>> param.exclude
frozenset(['foo', 'whatever'])
>>> param.use_in_context(Env(context='foo'))
False
>>> param.use_in_context(Env(context='bar'))
True
Note that the ``include`` and ``exclude`` kwargs are mutually exclusive
and that at most one can be suppelied to `Param.__init__()`. For
example:
>>> param = Param('nope', include=['foo'], exclude=['bar'])
Traceback (most recent call last):
...
ValueError: Param('nope'): cannot have both include=frozenset(['foo']) and exclude=frozenset(['bar'])
So that subclasses can add additional logic based on other environment
variables, the entire `config.Env` instance is passed in rather than
just the value of ``env.context``.
"""
if self.include is not None:
return (env.context in self.include)
if self.exclude is not None:
return (env.context not in self.exclude)
return True
def safe_value(self, value):
"""
Return a value safe for logging.
This is used so that sensitive values like passwords don't get logged.
For example:
>>> p = Password('my_password')
>>> p.safe_value(u'This is my password')
u'********'
>>> p.safe_value(None) is None
True
>>> s = Str('my_str')
>>> s.safe_value(u'Some arbitrary value')
u'Some arbitrary value'
"""
if self.password and value is not None:
return u'********'
return value
def clone(self, **overrides):
"""
Return a new `Param` instance similar to this one.
"""
return self.clone_rename(self.name, **overrides)
def clone_rename(self, name, **overrides):
"""
Return a new `Param` instance similar to this one, but named differently
"""
return self.clone_retype(name, self.__class__, **overrides)
def clone_retype(self, name, klass, **overrides):
"""
Return a new `Param` instance similar to this one, but of a different type
"""
kw = dict(self.__clonekw)
kw.update(overrides)
return klass(name, *self.rules, **kw)
def normalize(self, value):
"""
Normalize ``value`` using normalizer callback.
For example:
>>> param = Param('telephone',
... normalizer=lambda value: value.replace('.', '-')
... )
>>> param.normalize(u'800.123.4567')
u'800-123-4567'
If this `Param` instance was created with a normalizer callback and
``value`` is a unicode instance, the normalizer callback is called and
*its* return value is returned.
On the other hand, if this `Param` instance was *not* created with a
normalizer callback, if ``value`` is *not* a unicode instance, or if an
exception is caught when calling the normalizer callback, ``value`` is
returned unchanged.
:param value: A proposed value for this parameter.
"""
if self.multivalue: # pylint: disable=using-constant-test
if type(value) not in (tuple, list):
value = (value,)
if self.multivalue: # pylint: disable=using-constant-test
return tuple(
self._normalize_scalar(v) for v in value
)
else:
return self._normalize_scalar(value)
def _normalize_scalar(self, value):
"""
Normalize a scalar value.
This method is called once for each value in a multivalue.
"""
if self.normalizer is None:
return value
try:
return self.normalizer(value)
except Exception:
return value
def convert(self, value):
"""
Convert ``value`` to the Python type required by this parameter.
For example:
>>> scalar = Str('my_scalar')
>>> scalar.type
<type 'unicode'>
>>> scalar.convert(43.2)
u'43.2'
(Note that `Str` is a subclass of `Param`.)
All non-numeric, non-boolean values which evaluate to False will be
converted to None. For example:
>>> scalar.convert(u'') is None # An empty string
True
>>> scalar.convert([]) is None # An empty list
True
Likewise, they will be filtered out of a multivalue parameter.
For example:
>>> multi = Str('my_multi', multivalue=True)
>>> multi.convert([1.5, '', 17, None, u'Hello'])
(u'1.5', u'17', u'Hello')
>>> multi.convert([None, u'']) is None # Filters to an empty list
True
Lastly, multivalue parameters will always return a ``tuple`` (assuming
they don't return ``None`` as in the last example above). For example:
>>> multi.convert(42) # Called with a scalar value
(u'42',)
>>> multi.convert([0, 1]) # Called with a list value
(u'0', u'1')
Note that how values are converted (and from what types they will be
converted) completely depends upon how a subclass implements its
`Param._convert_scalar()` method. For example, see
`Str._convert_scalar()`.
:param value: A proposed value for this parameter.
"""
if not self.no_convert:
convert = self._convert_scalar
else:
def convert(value):
if isinstance(value, unicode):
return value
return self._convert_scalar(value)
if _is_null(value):
return
if self.multivalue: # pylint: disable=using-constant-test
if type(value) not in (tuple, list):
value = (value,)
values = tuple(
convert(v) for v in value if not _is_null(v)
)
if len(values) == 0:
return
return values
return convert(value)
def _convert_scalar(self, value, index=None):
"""
Convert a single scalar value.
"""
for t in self.allowed_types:
if isinstance(value, t):
return value
raise ConversionError(name=self.name, error=ugettext(self.type_error))
def validate(self, value, supplied=None):
"""
Check validity of ``value``.
:param value: A proposed value for this parameter.
:param supplied: True if this parameter was supplied explicitly.
"""
if value is None:
if self.required or (supplied and 'nonempty' in self.flags):
raise RequirementError(name=self.name)
return
if self.deprecated: # pylint: disable=using-constant-test
raise ValidationError(name=self.get_param_name(),
error=_('this option is deprecated'))
if self.multivalue: # pylint: disable=using-constant-test
if type(value) is not tuple:
raise TypeError(
TYPE_ERROR % ('value', tuple, value, type(value))
)
if len(value) < 1:
raise ValueError('value: empty tuple must be converted to None')
for v in value:
self._validate_scalar(v)
else:
self._validate_scalar(value)
def _validate_scalar(self, value, index=None):
for t in self.allowed_types:
if isinstance(value, t):
break
else:
raise TypeError(
TYPE_ERROR % (self.name, self.type, value, type(value))
)
for rule in self.all_rules:
error = rule(ugettext, value)
if error is not None:
raise ValidationError(name=self.get_param_name(), error=error)
def get_default(self, **kw):
"""
Return the static default or construct and return a dynamic default.
(In these examples, we will use the `Str` and `Bytes` classes, which
both subclass from `Param`.)
The *default* static default is ``None``. For example:
>>> s = Str('my_str')
>>> s.default is None
True
>>> s.get_default() is None
True
However, you can provide your own static default via the ``default``
keyword argument when you create your `Param` instance. For example:
>>> s = Str('my_str', default=u'My Static Default')
>>> s.default
u'My Static Default'
>>> s.get_default()
u'My Static Default'
If you need to generate a dynamic default from other supplied parameter
values, provide a callback via the ``default_from`` keyword argument.
This callback will be automatically wrapped in a `DefaultFrom` instance
if it isn't one already (see the `DefaultFrom` class for all the gory
details). For example:
>>> login = Str('login', default=u'my-static-login-default',
... default_from=lambda first, last: (first[0] + last).lower(),
... )
>>> isinstance(login.default_from, DefaultFrom)
True
>>> login.default_from.keys
('first', 'last')
Then when all the keys needed by the `DefaultFrom` instance are present,
the dynamic default is constructed and returned. For example:
>>> kw = dict(last=u'Doe', first=u'John')
>>> login.get_default(**kw)
u'jdoe'
Or if any keys are missing, your *static* default is returned.
For example:
>>> kw = dict(first=u'John', department=u'Engineering')
>>> login.get_default(**kw)
u'my-static-login-default'
"""
if self.default_from is not None:
default = self.default_from(**kw)
if default is not None:
try:
return self.convert(self.normalize(default))
except Exception:
pass
return self.default
def sort_key(self, value):
return value
def __json__(self):
json_dict = {}
for a, k, _d in self.kwargs:
if k in (callable, DefaultFrom):
continue
if isinstance(getattr(self, a), frozenset):
json_dict[a] = list(getattr(self, a, []))
else:
val = getattr(self, a, '')
if val is None:
# ignore 'not set' because lack of their presence is
# the information itself
continue
json_dict[a] = json_serialize(val)
json_dict['class'] = self.__class__.__name__
json_dict['name'] = self.name
json_dict['type'] = self.type.__name__
return json_dict
class Bool(Param):
"""
A parameter for boolean values (stored in the ``bool`` type).
"""
type = bool
type_error = _('must be True or False')
# FIXME: This my quick hack to get some UI stuff working, change these defaults
# --jderose 2009-08-28
kwargs = Param.kwargs + (
('truths', frozenset, frozenset([1, u'1', True, u'true', u'TRUE'])),
('falsehoods', frozenset, frozenset([0, u'0', False, u'false', u'FALSE'])),
)
def _convert_scalar(self, value, index=None):
"""
Convert a single scalar value.
"""
if type(value) in self.allowed_types:
return value
if isinstance(value, str):
value = value.lower()
if value in self.truths:
return True
if value in self.falsehoods:
return False
if type(value) in (tuple, list):
raise ConversionError(name=self.name,
error=ugettext(self.scalar_error))
raise ConversionError(name=self.name, error=ugettext(self.type_error))
class Flag(Bool):
"""
A boolean parameter that always gets filled in with a default value.
This `Bool` subclass forces ``autofill=True`` in `Flag.__init__()`. If no
default is provided, it also fills in a default value of ``False``.
Lastly, unlike the `Bool` class, the default must be either ``True`` or
``False`` and cannot be ``None``.
For example:
>>> flag = Flag('my_flag')
>>> (flag.autofill, flag.default)
(True, False)
To have a default value of ``True``, create your `Flag` intance with
``default=True``. For example:
>>> flag = Flag('my_flag', default=True)
>>> (flag.autofill, flag.default)
(True, True)
Also note that creating a `Flag` instance with ``autofill=False`` will have
no effect. For example:
>>> flag = Flag('my_flag', autofill=False)
>>> flag.autofill
True
"""
def __init__(self, name, *rules, **kw):
kw['autofill'] = True
if 'default' not in kw:
kw['default'] = False
if type(kw['default']) is not bool:
default = kw['default']
raise TypeError(
TYPE_ERROR % ('default', bool, default, type(default))
)
super(Flag, self).__init__(name, *rules, **kw)
class Number(Param):
"""
Base class for the `Int` and `Decimal` parameters.
"""
def _convert_scalar(self, value, index=None):
"""
Convert a single scalar value.
"""
if type(value) in self.allowed_types:
return value
if type(value) in (unicode, float, int):
try:
return self.type(value)
except ValueError:
pass
if type(value) in (tuple, list):
raise ConversionError(name=self.name,
error=ugettext(self.scalar_error))
raise ConversionError(name=self.name, error=ugettext(self.type_error))
class Int(Number):
"""
A parameter for integer values (stored in the ``int`` type).
"""
type = int
allowed_types = (int,)
type_error = _('must be an integer')
MININT = MININT
MAXINT = MAXINT
MAX_UINT32 = MAX_UINT32
MAX_SAFE_INTEGER = MAX_SAFE_INTEGER
MIN_SAFE_INTEGER = MIN_SAFE_INTEGER
kwargs = Param.kwargs + (
('minvalue', int, int(MININT)),
('maxvalue', int, int(MAXINT)),
)
@staticmethod
def convert_int(value):
if type(value) in Int.allowed_types:
return value
if type(value) is float:
return int(value)
if type(value) is unicode:
if u'.' in value:
return int(float(value))
if six.PY3 and re.match('0[0-9]+', value):
# 0-prefixed octal format
return int(value, 8)
return int(value, 0)
raise ValueError(value)
def __init__(self, name, *rules, **kw):
super(Int, self).__init__(name, *rules, **kw)
if (self.minvalue > self.maxvalue) and (self.minvalue is not None and self.maxvalue is not None):
raise ValueError(
'%s: minvalue > maxvalue (minvalue=%r, maxvalue=%r)' % (
self.nice, self.minvalue, self.maxvalue)
)
if self.minvalue < self.MIN_SAFE_INTEGER:
raise ValueError(
f"minvalue {self.minvalue} outside range of safe JSON "
f"integer limit {self.MIN_SAFE_INTEGER}"
)
if self.maxvalue > self.MAX_SAFE_INTEGER:
raise ValueError(
f"maxvalue {self.maxvalue} outside range of safe JSON "
f"integer limit {self.MAX_SAFE_INTEGER}"
)
def _convert_scalar(self, value, index=None):
"""
Convert a single scalar value.
"""
try:
return Int.convert_int(value)
except ValueError:
raise ConversionError(name=self.get_param_name(),
error=ugettext(self.type_error))
def _rule_minvalue(self, _, value):
"""
Check min constraint.
"""
assert isinstance(value, int)
if value < self.minvalue:
return _('must be at least %(minvalue)d') % dict(
minvalue=self.minvalue,
)
else:
return None
def _rule_maxvalue(self, _, value):
"""
Check max constraint.
"""
assert isinstance(value, int)
if value > self.maxvalue:
return _('can be at most %(maxvalue)d') % dict(
maxvalue=self.maxvalue,
)
else:
return None
class Decimal(Number):
"""
A parameter for floating-point values (stored in the ``Decimal`` type).
Python Decimal type helps overcome problems tied to plain "float" type,
e.g. problem with representation or value comparison. In order to safely
transfer the value over RPC libraries, it is being converted to string
which is then converted back to Decimal number.
"""
type = decimal.Decimal
type_error = _('must be a decimal number')
kwargs = Param.kwargs + (
('minvalue', decimal.Decimal, None),
('maxvalue', decimal.Decimal, None),
# round Decimal to given precision
('precision', int, None),
# when False, number is normalized to non-exponential form
('exponential', bool, False),
# set of allowed decimal number classes
('numberclass', tuple, ('-Normal', '+Zero', '+Normal')),
)
def __init__(self, name, *rules, **kw):
for kwparam in ('minvalue', 'maxvalue', 'default'):
value = kw.get(kwparam)
if value is None:
continue
if isinstance(value, (str, float)):
try:
value = decimal.Decimal(value)
except Exception as e:
raise ValueError(
'%s: cannot parse kwarg %s: %s' % (
name, kwparam, str(e)))
kw[kwparam] = value
super(Decimal, self).__init__(name, *rules, **kw)
if (self.minvalue is not None and
self.maxvalue is not None and
self.minvalue > self.maxvalue):
raise ValueError(
'%s: minvalue > maxvalue (minvalue=%s, maxvalue=%s)' % (
self.nice, self.minvalue, self.maxvalue)
)
if self.precision is not None and self.precision < 0:
raise ValueError('%s: precision must be at least 0' % self.nice)
def _rule_minvalue(self, _, value):
"""
Check min constraint.
"""
assert type(value) is decimal.Decimal
if value < self.minvalue:
return _('must be at least %(minvalue)s') % dict(
minvalue=self.minvalue,
)
else:
return None
def _rule_maxvalue(self, _, value):
"""
Check max constraint.
"""
assert type(value) is decimal.Decimal
if value > self.maxvalue:
return _('can be at most %(maxvalue)s') % dict(
maxvalue=self.maxvalue,
)
else:
return None
def _enforce_numberclass(self, value):
numberclass = value.number_class()
if numberclass not in self.numberclass:
raise ValidationError(name=self.get_param_name(),
error=_("number class '%(cls)s' is not included in a list "
"of allowed number classes: %(allowed)s") \
% dict(cls=numberclass,
allowed=u', '.join(self.numberclass))
)
def _enforce_precision(self, value):
assert type(value) is decimal.Decimal
if self.precision is not None:
quantize_exp = decimal.Decimal(10) ** -int(self.precision)
try:
value = value.quantize(quantize_exp)
except decimal.DecimalException as e:
raise ConversionError(name=self.get_param_name(),
error=unicode(e))
return value
def _remove_exponent(self, value):
assert type(value) is decimal.Decimal
if not self.exponential:
try:
# adopted from http://docs.python.org/library/decimal.html
value = value.quantize(decimal.Decimal(1)) \
if value == value.to_integral() \
else value.normalize()
except decimal.DecimalException as e:
raise ConversionError(name=self.get_param_name(),
error=unicode(e))
return value
def _test_and_normalize(self, value):
"""
This method is run in conversion and normalization methods to test
that the Decimal number conforms to Parameter boundaries and then
normalizes the value.
"""
self._enforce_numberclass(value)
value = self._remove_exponent(value)
value = self._enforce_precision(value)
return value
def _convert_scalar(self, value, index=None):
if isinstance(value, (str, float)):
try:
value = decimal.Decimal(value)
except decimal.DecimalException as e:
raise ConversionError(name=self.get_param_name(),
error=unicode(e))
if isinstance(value, decimal.Decimal):
return self._test_and_normalize(value)
return super(Decimal, self)._convert_scalar(value)
def _normalize_scalar(self, value):
if isinstance(value, decimal.Decimal):
return self._test_and_normalize(value)
return super(Decimal, self)._normalize_scalar(value)
class Data(Param):
"""
Base class for the `Bytes` and `Str` parameters.
Previously `Str` was as subclass of `Bytes`. Now the common functionality
has been split into this base class so that ``isinstance(foo, Bytes)`` wont
be ``True`` when ``foo`` is actually an `Str` instance (which is confusing).
"""
kwargs = Param.kwargs + (
('minlength', int, None),
('maxlength', int, None),
('length', int, None),
('pattern_errmsg', (str,), None),
)
re = None
re_errmsg = None
def __init__(self, name, *rules, **kw):
super(Data, self).__init__(name, *rules, **kw)
if not (
self.length is None or
(self.minlength is None and self.maxlength is None)
):
raise ValueError(
'%s: cannot mix length with minlength or maxlength' % self.nice
)
if self.minlength is not None and self.minlength < 1:
raise ValueError(
'%s: minlength must be >= 1; got %r' % (self.nice, self.minlength)
)
if self.maxlength is not None and self.maxlength < 1:
raise ValueError(
'%s: maxlength must be >= 1; got %r' % (self.nice, self.maxlength)
)
if None not in (self.minlength, self.maxlength):
if self.minlength > self.maxlength:
raise ValueError(
'%s: minlength > maxlength (minlength=%r, maxlength=%r)' % (
self.nice, self.minlength, self.maxlength)
)
elif self.minlength == self.maxlength:
raise ValueError(
'%s: minlength == maxlength; use length=%d instead' % (
self.nice, self.minlength)
)
def _rule_pattern(self, _, value):
"""
Check pattern (regex) contraint.
"""
assert type(value) in self.allowed_types
if self.re.match(value) is None:
if self.re_errmsg:
return self.re_errmsg % dict(pattern=self.pattern,)
else:
return _('must match pattern "%(pattern)s"') % dict(
pattern=self.pattern,
)
else:
return None
class Bytes(Data):
"""
A parameter for binary data (stored in the ``str`` type).
This class is named *Bytes* instead of *Str* so it's aligned with the
Python v3 ``(str, unicode) => (bytes, str)`` clean-up. See:
http://docs.python.org/3.0/whatsnew/3.0.html
Also see the `Str` parameter.
"""
type = bytes
type_error = _('must be binary data')
kwargs = Data.kwargs + (
('pattern', (bytes,), None),
)
def __init__(self, name, *rules, **kw):
if kw.get('pattern', None) is None:
self.re = None
else:
self.re = re.compile(kw['pattern'])
self.re_errmsg = kw.get('pattern_errmsg', None)
super(Bytes, self).__init__(name, *rules, **kw)
def _rule_minlength(self, _, value):
"""
Check minlength constraint.
"""
assert type(value) is bytes
if len(value) < self.minlength:
return _('must be at least %(minlength)d bytes') % dict(
minlength=self.minlength,
)
else:
return None
def _rule_maxlength(self, _, value):
"""
Check maxlength constraint.
"""
assert type(value) is bytes
if len(value) > self.maxlength:
return _('can be at most %(maxlength)d bytes') % dict(
maxlength=self.maxlength,
)
else:
return None
def _rule_length(self, _, value):
"""
Check length constraint.
"""
assert type(value) is bytes
if len(value) != self.length:
return _('must be exactly %(length)d bytes') % dict(
length=self.length,
)
else:
return None
def _convert_scalar(self, value, index=None):
if isinstance(value, unicode):
try:
value = base64.b64decode(value)
except (TypeError, ValueError) as e:
raise Base64DecodeError(reason=str(e))
return super(Bytes, self)._convert_scalar(value)
class Certificate(Param):
type = crypto_x509.Certificate
type_error = _('must be a certificate')
allowed_types = (IPACertificate, bytes, unicode)
def _convert_scalar(self, value, index=None):
"""
:param value: either DER certificate or base64 encoded certificate
:returns: bytes representing value converted to DER format
"""
if isinstance(value, bytes):
try:
value = value.decode('ascii')
except UnicodeDecodeError:
# value is possibly a DER-encoded certificate
pass
if isinstance(value, unicode):
# if we received unicodes right away or we got them after the
# decoding, we will now try to receive DER-certificate
try:
value = base64.b64decode(value)
except (TypeError, ValueError) as e:
raise Base64DecodeError(reason=str(e))
if isinstance(value, bytes):
# we now only have either bytes or an IPACertificate object
# if it's bytes, make it an IPACertificate object
try:
value = load_der_x509_certificate(value)
except ValueError as e:
raise CertificateFormatError(error=str(e))
return super(Certificate, self)._convert_scalar(value)
class CertificateSigningRequest(Param):
type = crypto_x509.CertificateSigningRequest
type_error = _('must be a certificate signing request')
allowed_types = (crypto_x509.CertificateSigningRequest, bytes, unicode)
def __extract_der_from_input(self, value):
"""
Tries to get the DER representation of whatever we receive as an input
:param value:
bytes instance containing something we hope is a certificate
signing request
:returns:
base64-decoded representation of whatever we found in case input
had been something else than DER or something which resembles
DER, in which case we would just return input
"""
try:
value.decode('utf-8')
except UnicodeDecodeError:
# possibly DER-encoded CSR or something similar
return value
value = strip_csr_header(value)
return base64.b64decode(value)
def _convert_scalar(self, value, index=None):
"""
:param value:
either DER csr, base64-encoded csr or an object implementing the
cryptography.CertificateSigningRequest interface
:returns:
an object with the cryptography.CertificateSigningRequest interface
"""
if isinstance(value, unicode):
try:
value = value.encode('ascii')
except UnicodeDecodeError:
raise CertificateOperationError('not a valid CSR')
if isinstance(value, bytes):
# try to extract DER from whatever we got
value = self.__extract_der_from_input(value)
try:
value = crypto_x509.load_der_x509_csr(
value, backend=default_backend())
except ValueError as e:
raise CertificateOperationError(
error=_("Failure decoding Certificate Signing Request:"
" %s") % e)
return super(CertificateSigningRequest, self)._convert_scalar(value)
class Str(Data):
"""
A parameter for Unicode text (stored in the ``unicode`` type).
This class is named *Str* instead of *Unicode* so it's aligned with the
Python v3 ``(str, unicode) => (bytes, str)`` clean-up. See:
http://docs.python.org/3.0/whatsnew/3.0.html
Also see the `Bytes` parameter.
"""
kwargs = Data.kwargs + (
('pattern', (str,), None),
('noextrawhitespace', bool, True),
)
type = unicode
type_error = _('must be Unicode text')
def __init__(self, name, *rules, **kw):
if kw.get('pattern', None) is None:
self.re = None
else:
self.re = re.compile(kw['pattern'], re.UNICODE)
self.re_errmsg = kw.get('pattern_errmsg', None)
super(Str, self).__init__(name, *rules, **kw)
def _convert_scalar(self, value, index=None):
"""
Convert a single scalar value.
"""
if type(value) in self.allowed_types:
return value
if type(value) in (int, float, decimal.Decimal):
return self.type(value)
if type(value) in (tuple, list):
raise ConversionError(name=self.name,
error=ugettext(self.scalar_error))
raise ConversionError(name=self.name, error=ugettext(self.type_error))
def _rule_noextrawhitespace(self, _, value):
"""
Do not allow leading/trailing spaces.
"""
assert type(value) is unicode
if self.noextrawhitespace is False:
return None
if len(value) != len(value.strip()):
return _('Leading and trailing spaces are not allowed')
else:
return None
def _rule_minlength(self, _, value):
"""
Check minlength constraint.
"""
assert type(value) is unicode
if len(value) < self.minlength:
return _('must be at least %(minlength)d characters') % dict(
minlength=self.minlength,
)
else:
return None
def _rule_maxlength(self, _, value):
"""
Check maxlength constraint.
"""
assert type(value) is unicode
if len(value) > self.maxlength:
return _('can be at most %(maxlength)d characters') % dict(
maxlength=self.maxlength,
)
else:
return None
def _rule_length(self, _, value):
"""
Check length constraint.
"""
assert type(value) is unicode
if len(value) != self.length:
return _('must be exactly %(length)d characters') % dict(
length=self.length,
)
else:
return None
def sort_key(self, value):
return value.lower()
class IA5Str(Str):
"""
An IA5String per RFC 4517
"""
def __init__(self, name, *rules, **kw):
super(IA5Str, self).__init__(name, *rules, **kw)
def _convert_scalar(self, value, index=None):
if isinstance(value, str):
for char in value:
if ord(char) > 127:
raise ConversionError(name=self.get_param_name(),
error=_('The character %(char)r is not allowed.') %
dict(char=char,)
)
return super(IA5Str, self)._convert_scalar(value)
class Password(Str):
"""
A parameter for passwords (stored in the ``unicode`` type).
"""
kwargs = Data.kwargs + (
('pattern', (str,), None),
('noextrawhitespace', bool, False),
)
password = True
def _convert_scalar(self, value, index=None):
if isinstance(value, (tuple, list)) and len(value) == 2:
(p1, p2) = value
if p1 != p2:
raise PasswordMismatch(name=self.name)
value = p1
return super(Password, self)._convert_scalar(value)
class Enum(Param):
"""
Base class for parameters with enumerable values.
"""
kwargs = Param.kwargs + (
('values', tuple, tuple()),
)
def __init__(self, name, *rules, **kw):
kw['cli_metavar'] = str([str(v) for v in kw.get('values', tuple())])
super(Enum, self).__init__(name, *rules, **kw)
for (i, v) in enumerate(self.values):
if type(v) not in self.allowed_types:
n = '%s values[%d]' % (self.nice, i)
raise TypeError(
TYPE_ERROR % (n, self.type, v, type(v))
)
if len(self.values) < 1:
raise ValueError(
'%s: list of values must not be empty' % self.nice)
def _rule_values(self, _, value, **kw):
if value not in self.values:
if len(self.values) == 1:
return _("must be '%(value)s'") % dict(value=self.values[0])
else:
values = u', '.join("'%s'" % value for value in self.values)
return _('must be one of %(values)s') % dict(values=values)
else:
return None
class BytesEnum(Enum):
"""
Enumerable for binary data (stored in the ``str`` type).
"""
type = unicode
class StrEnum(Enum):
"""
Enumerable for Unicode text (stored in the ``unicode`` type).
For example:
>>> enum = StrEnum('my_enum', values=(u'One', u'Two', u'Three'))
>>> enum.validate(u'Two', 'cli') is None
True
>>> enum.validate(u'Four', 'cli')
Traceback (most recent call last):
...
ValidationError: invalid 'my_enum': must be one of 'One', 'Two', 'Three'
"""
type = unicode
class IntEnum(Enum):
"""
Enumerable for integer data (stored in the ``int`` type).
"""
type = int
allowed_types = (int,)
type_error = Int.type_error
def _convert_scalar(self, value, index=None):
"""
Convert a single scalar value.
"""
try:
return Int.convert_int(value)
except ValueError:
raise ConversionError(name=self.get_param_name(),
error=ugettext(self.type_error))
class Any(Param):
"""
A parameter capable of holding values of any type. For internal use only.
"""
type = object
def _convert_scalar(self, value, index=None):
return value
def _validate_scalar(self, value, index=None):
for rule in self.all_rules:
error = rule(ugettext, value)
if error is not None:
raise ValidationError(name=self.name, error=error)
class File(Str):
"""Text file parameter type.
Accepts file names and loads their content into the parameter value.
"""
open_mode = 'r'
kwargs = Data.kwargs + (
# valid for CLI, other backends (e.g. webUI) can ignore this
('stdin_if_missing', bool, False),
('noextrawhitespace', bool, False),
)
class BinaryFile(Bytes):
"""Binary file parameter type
"""
open_mode = 'rb'
kwargs = Data.kwargs + (
# valid for CLI, other backends (e.g. webUI) can ignore this
('stdin_if_missing', bool, False),
('noextrawhitespace', bool, False),
)
class DateTime(Param):
"""
DateTime parameter type.
Accepts LDAP Generalized time without in the following format:
'%Y%m%d%H%M%SZ'
Accepts subset of values defined by ISO 8601:
'%Y-%m-%dT%H:%M:%SZ'
'%Y-%m-%dT%H:%MZ'
'%Y-%m-%dZ'
Also accepts above formats using ' ' (space) as a separator instead of 'T'.
Refer to the `man strftime` for the explanations for the %Y,%m,%d,%H.%M,%S.
"""
accepted_formats = [LDAP_GENERALIZED_TIME_FORMAT, # generalized time
'%Y-%m-%dT%H:%M:%SZ', # ISO 8601, second precision
'%Y-%m-%dT%H:%MZ', # ISO 8601, minute precision
'%Y-%m-%dZ', # ISO 8601, date only
'%Y-%m-%d %H:%M:%SZ', # non-ISO 8601, second precision
'%Y-%m-%d %H:%MZ'] # non-ISO 8601, minute precision
type = datetime.datetime
type_error = _('must be datetime value')
def _convert_scalar(self, value, index=None):
if isinstance(value, str):
if value == u'now':
time = datetime.datetime.now(tz=datetime.timezone.utc)
return time
else:
for date_format in self.accepted_formats:
try:
time = datetime.datetime.strptime(value, date_format)
return time
except ValueError:
pass
# If we get here, the strptime call did not succeed for any
# the accepted formats, therefore raise error
error = (_("does not match any of accepted formats: ") +
(', '.join(self.accepted_formats)))
raise ConversionError(name=self.get_param_name(),
error=error)
return super(DateTime, self)._convert_scalar(value)
class AccessTime(Str):
"""
Access time parameter type.
Accepts values conforming to generalizedTime as defined in RFC 4517
section 3.3.13 without time zone information.
"""
def _check_HHMM(self, t):
if len(t) != 4:
raise ValueError('HHMM must be exactly 4 characters long')
if not t.isnumeric():
raise ValueError('HHMM non-numeric')
hh = int(t[0:2])
if hh < 0 or hh > 23:
raise ValueError('HH out of range')
mm = int(t[2:4])
if mm < 0 or mm > 59:
raise ValueError('MM out of range')
def _check_dotw(self, t):
if t.isnumeric():
value = int(t)
if value < 1 or value > 7:
raise ValueError('day of the week out of range')
elif t not in ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'):
raise ValueError('invalid day of the week')
def _check_dotm(self, t, month_num=1, year=4):
if not t.isnumeric():
raise ValueError('day of the month non-numeric')
value = int(t)
if month_num in (1, 3, 5, 7, 8, 10, 12):
if value < 1 or value > 31:
raise ValueError('day of the month out of range')
elif month_num in (4, 6, 9, 11):
if value < 1 or value > 30:
raise ValueError('day of the month out of range')
elif month_num == 2:
if year % 4 == 0 and (year % 100 != 0 or year % 400 == 0):
if value < 1 or value > 29:
raise ValueError('day of the month out of range')
else:
if value < 1 or value > 28:
raise ValueError('day of the month out of range')
def _check_wotm(self, t):
if not t.isnumeric():
raise ValueError('week of the month non-numeric')
value = int(t)
if value < 1 or value > 6:
raise ValueError('week of the month out of range')
def _check_woty(self, t):
if not t.isnumeric():
raise ValueError('week of the year non-numeric')
value = int(t)
if value < 1 or value > 52:
raise ValueError('week of the year out of range')
def _check_doty(self, t):
if not t.isnumeric():
raise ValueError('day of the year non-numeric')
value = int(t)
if value < 1 or value > 365:
raise ValueError('day of the year out of range')
def _check_month_num(self, t):
if not t.isnumeric():
raise ValueError('month number non-numeric')
value = int(t)
if value < 1 or value > 12:
raise ValueError('month number out of range')
def _check_interval(self, t, check_func):
intervals = t.split(',')
for i in intervals:
if not i:
raise ValueError('invalid time range')
values = i.split('-')
if len(values) > 2:
raise ValueError('invalid time range')
for v in values:
check_func(v)
if len(values) == 2:
if int(values[0]) > int(values[1]):
raise ValueError('invalid time range')
def _check_W_spec(self, ts, index):
if ts[index] != 'day':
raise ValueError('invalid week specifier')
index += 1
self._check_interval(ts[index], self._check_dotw)
return index
def _check_M_spec(self, ts, index):
if ts[index] == 'week':
self._check_interval(ts[index + 1], self._check_wotm)
index = self._check_W_spec(ts, index + 2)
elif ts[index] == 'day':
index += 1
self._check_interval(ts[index], self._check_dotm)
else:
raise ValueError('invalid month specifier')
return index
def _check_Y_spec(self, ts, index):
if ts[index] == 'month':
index += 1
self._check_interval(ts[index], self._check_month_num)
index = self._check_M_spec(ts, index + 1)
elif ts[index] == 'week':
self._check_interval(ts[index + 1], self._check_woty)
index = self._check_W_spec(ts, index + 2)
elif ts[index] == 'day':
index += 1
self._check_interval(ts[index], self._check_doty)
else:
raise ValueError('invalid year specifier')
return index
def _check_generalized(self, t):
assert type(t) is unicode
if len(t) not in (10, 12, 14):
raise ValueError('incomplete generalized time')
if not t.isnumeric():
raise ValueError('time non-numeric')
# don't check year value, with time travel and all :)
self._check_month_num(t[4:6])
year_num = int(t[0:4])
month_num = int(t[4:6])
self._check_dotm(t[6:8], month_num, year_num)
if len(t) >= 12:
self._check_HHMM(t[8:12])
else:
self._check_HHMM('%s00' % t[8:10])
if len(t) == 14:
s = int(t[12:14])
if s < 0 or s > 60:
raise ValueError('seconds out of range')
def _check(self, time):
ts = time.split()
if ts[0] == 'absolute':
if len(ts) != 4:
raise ValueError('invalid format, must be \'absolute generalizedTime ~ generalizedTime\'')
self._check_generalized(ts[1])
if ts[2] != '~':
raise ValueError('invalid time range separator')
self._check_generalized(ts[3])
if int(ts[1]) >= int(ts[3]):
raise ValueError('invalid time range')
elif ts[0] == 'periodic':
index = None
if ts[1] == 'yearly':
index = self._check_Y_spec(ts, 2)
elif ts[1] == 'monthly':
index = self._check_M_spec(ts, 2)
elif ts[1] == 'weekly':
index = self._check_W_spec(ts, 2)
elif ts[1] == 'daily':
index = 1
if index is None:
raise ValueError('period must be yearly, monthy or daily, got \'%s\'' % ts[1])
self._check_interval(ts[index + 1], self._check_HHMM)
else:
raise ValueError('time neither absolute or periodic')
def _rule_required(self, _, value):
try:
self._check(value)
except ValueError as e:
raise ValidationError(name=self.get_param_name(), error=e.args[0])
except IndexError:
raise ValidationError(
name=self.get_param_name(), error=ugettext('incomplete time value')
)
class DNParam(Param):
type = DN
def _convert_scalar(self, value, index=None):
"""
Convert a single scalar value.
"""
if type(value) in self.allowed_types:
return value
if type(value) in (tuple, list):
raise ConversionError(name=self.name,
error=ugettext(self.scalar_error))
try:
dn = DN(value)
except Exception as e:
raise ConversionError(name=self.get_param_name(),
error=ugettext(e))
return dn
def create_param(spec):
"""
Create an `Str` instance from the shorthand ``spec``.
This function allows you to create `Str` parameters (the most common) from
a convenient shorthand that defines the parameter name, whether it is
required, and whether it is multivalue. (For the definition of the
shorthand syntax, see the `parse_param_spec()` function.)
If ``spec`` is an ``str`` instance, it will be used to create a new `Str`
parameter, which will be returned. For example:
>>> s = create_param('hometown?')
>>> s
Str('hometown?')
>>> (s.name, s.required, s.multivalue)
('hometown', False, False)
On the other hand, if ``spec`` is already a `Param` instance, it is
returned unchanged. For example:
>>> b = Bytes('cert')
>>> create_param(b) is b
True
As a plugin author, you will not call this function directly (which would
be no more convenient than simply creating the `Str` instance). Instead,
`frontend.Command` will call it for you when it evaluates the
``takes_args`` and ``takes_options`` attributes, and `frontend.Object`
will call it for you when it evaluates the ``takes_params`` attribute.
:param spec: A spec string or a `Param` instance.
"""
if isinstance(spec, Param):
return spec
if type(spec) is not str:
raise TypeError(
TYPE_ERROR % ('spec', (str, Param), spec, type(spec))
)
return Str(spec)
class DNSNameParam(Param):
"""
Domain name parameter type.
:only_absolute a domain name has to be absolute
(makes it absolute from unicode input)
:only_relative a domain name has to be relative
"""
type = DNSName
type_error = _('must be DNS name')
kwargs = Param.kwargs + (
('only_absolute', bool, False),
('only_relative', bool, False),
)
def __init__(self, name, *rules, **kw):
super(DNSNameParam, self).__init__(name, *rules, **kw)
if self.only_absolute and self.only_relative:
raise ValueError('%s: cannot be both absolute and relative' %
self.nice)
def _convert_scalar(self, value, index=None):
if isinstance(value, unicode):
try:
validate_idna_domain(value)
except ValueError as e:
raise ConversionError(name=self.get_param_name(),
error=unicode(e))
value = DNSName(value)
if self.only_absolute and not value.is_absolute():
value = value.make_absolute()
return super(DNSNameParam, self)._convert_scalar(value)
def _rule_only_absolute(self, _, value):
if self.only_absolute and not value.is_absolute():
return _('must be absolute')
else:
return None
def _rule_only_relative(self, _, value):
if self.only_relative and value.is_absolute():
return _('must be relative')
else:
return None
class Dict(Param):
"""
A parameter for dictionary.
"""
type = dict
type_error = _("must be dictionary")
class Principal(Param):
"""
Kerberos principal name
"""
type = kerberos.Principal
type_error = _('must be Kerberos principal')
kwargs = Param.kwargs + (
('require_service', bool, False),
)
@property
def allowed_types(self):
return (self.type, unicode)
def _convert_scalar(self, value, index=None):
if isinstance(value, unicode):
try:
value = kerberos.Principal(value)
except ValueError:
raise ConversionError(
name=self.get_param_name(),
error=_("Malformed principal: '%(value)s'") % dict(
value=value))
return super(Principal, self)._convert_scalar(value)
def _rule_require_service(self, _, value):
if self.require_service and not value.is_service:
raise ValidationError(
name=self.get_param_name(),
error=_("Service principal is required")
)
_map_types = {
# map internal certificate subclass to generic cryptography class
IPACertificate: crypto_x509.Certificate,
# map internal DNS name class to generic dnspython class
DNSName: dns.name.Name,
# DN, Principal have their names mangled in ipaapi.__init__
}
def create_signature(command):
"""Create an inspect.Signature for a command
:param command: ipa plugin instance (server or client)
:return: inspect.Signature instance
"""
signature_params = []
seen = set()
args_options = [
(command.get_args(), inspect.Parameter.POSITIONAL_OR_KEYWORD),
(command.get_options(), inspect.Parameter.KEYWORD_ONLY)
]
for ipaparams, kind in args_options:
for ipaparam in ipaparams:
# filter out duplicates, for example user_del has a preserve flag
# and preserve bool.
if ipaparam.name in seen:
continue
seen.add(ipaparam.name)
# ipalib.plugins.misc.env has wrong type
if not isinstance(ipaparam, Param):
continue
if ipaparam.required:
default = inspect.Parameter.empty
else:
default = ipaparam.default
allowed_types = tuple(
_map_types.get(t, t) for t in ipaparam.allowed_types
)
# ipalib.parameters.DNSNameParam also handles text
if isinstance(ipaparam, DNSNameParam):
allowed_types += (six.text_type,)
ann = typing.Union[allowed_types]
if ipaparam.multivalue:
ann = typing.List[ann]
signature_params.append(
inspect.Parameter(
ipaparam.name, kind, default=default, annotation=ann
)
)
# cannot describe return parameter with typing yet. TypedDict
# is only available with mypy_extension.
signature = inspect.Signature(
signature_params,
return_annotation=typing.Dict[typing.Text, typing.Any]
)
return signature
class SerialNumber(Str):
"""Certificate serial number parameter type
"""
type = str
allowed_types = (str,)
# FIXME: currently unused, perhaps drop it
MAX_VALUE = 340282366920938463463374607431768211456 # 2^128
kwargs = Param.kwargs + (
('minlength', int, 1),
('maxlength', int, 40), # Up to 128-bit values
('length', int, None),
)
def _validate_scalar(self, value, index=None):
super(SerialNumber, self)._validate_scalar(value)
if value.startswith('-'):
raise ValidationError(
name=self.name, error=_('must be at least 0')
)
if not value.isdigit():
if value.lower().startswith('0x'):
try:
int(value[2:], 16)
except ValueError:
raise ValidationError(
name=self.name, error=_(
_('invalid valid hex'),
)
)
else:
raise ValidationError(
name=self.name, error=_(
_('must be an integer'),
)
)
if value == '0':
raise ValidationError(
name=self.name, error=_('invalid serial number 0')
)
| 78,443
|
Python
|
.py
| 1,930
| 30.76943
| 118
| 0.578202
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,656
|
krb_utils.py
|
freeipa_freeipa/ipalib/krb_utils.py
|
# Authors: John Dennis <jdennis@redhat.com>
#
# Copyright (C) 2012 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import time
import re
import gssapi
from ipalib import errors
#-------------------------------------------------------------------------------
# Kerberos error codes
KRB5_CC_NOTFOUND = 2529639053 # Matching credential not found
KRB5_FCC_NOFILE = 2529639107 # No credentials cache found
KRB5KDC_ERR_C_PRINCIPAL_UNKNOWN = 2529638918 # client not found in Kerberos db
KRB5KDC_ERR_S_PRINCIPAL_UNKNOWN = 2529638919 # Server not found in Kerberos database
KRB5KRB_AP_ERR_TKT_EXPIRED = 2529638944 # Ticket expired
KRB5_FCC_PERM = 2529639106 # Credentials cache permissions incorrect
KRB5_CC_FORMAT = 2529639111 # Bad format in credentials cache
KRB5_REALM_CANT_RESOLVE = 2529639132 # Cannot resolve network address for KDC in requested realm
# Cannot contact any KDC for requested realm
KRB5_KDC_UNREACH = 2529639068
# A service is not available that s required to process the request
KRB5KDC_ERR_SVC_UNAVAILABLE = 2529638941
# mechglue/gss_plugin.c: #define MAP_ERROR_BASE 0x04200000
GSSPROXY_MAP_ERROR_BASE = 69206016
# GSSProxy error codes
GSSPROXY_KRB5_FCC_NOFILE = GSSPROXY_MAP_ERROR_BASE + KRB5_FCC_NOFILE
krb_ticket_expiration_threshold = 60*5 # number of seconds to accmodate clock skew
krb5_time_fmt = '%m/%d/%y %H:%M:%S'
ccache_name_re = re.compile(r'^((\w+):)?(.+)')
#-------------------------------------------------------------------------------
def krb5_parse_ccache(ccache_name):
'''
Given a Kerberos ccache name parse it into it's scheme and
location components. Currently valid values for the scheme
are:
* FILE
* MEMORY
The scheme is always returned as upper case. If the scheme
does not exist it defaults to FILE.
:parameters:
ccache_name
The name of the Kerberos ccache.
:returns:
A two-tuple of (scheme, ccache)
'''
match = ccache_name_re.search(ccache_name)
if match:
scheme = match.group(2)
location = match.group(3)
if scheme is None:
scheme = 'FILE'
else:
scheme = scheme.upper()
return scheme, location
else:
raise ValueError('Invalid ccache name = "%s"' % ccache_name)
def krb5_unparse_ccache(scheme, name):
return '%s:%s' % (scheme.upper(), name)
def krb5_format_service_principal_name(service, host, realm):
'''
Given a Kerberos service principal name, the host where the
service is running and a Kerberos realm return the Kerberos V5
service principal name.
:parameters:
service
Service principal name.
host
The DNS name of the host where the service is located.
realm
The Kerberos realm the service exists in.
:returns:
Kerberos V5 service principal name.
'''
return '%s/%s@%s' % (service, host, realm)
def krb5_format_tgt_principal_name(realm):
'''
Given a Kerberos realm return the Kerberos V5 TGT name.
:parameters:
realm
The Kerberos realm the TGT exists in.
:returns:
Kerberos V5 TGT name.
'''
return krb5_format_service_principal_name('krbtgt', realm, realm)
def krb5_format_time(timestamp):
'''
Given a UNIX timestamp format it into a string in the same
manner the MIT Kerberos library does. Kerberos timestamps are
always in local time.
:parameters:
timestamp
Unix timestamp
:returns:
formated string
'''
return time.strftime(krb5_time_fmt, time.localtime(timestamp))
def get_credentials(name=None, ccache_name=None):
'''
Obtains GSSAPI credentials with given principal name from ccache. When no
principal name specified, it retrieves the default one for given
credentials cache.
:parameters:
name
gssapi.Name object specifying principal or None for the default
ccache_name
string specifying Kerberos credentials cache name or None for the
default
:returns:
gssapi.Credentials object
'''
store = None
if ccache_name:
store = {'ccache': ccache_name}
"""
https://datatracker.ietf.org/doc/html/rfc2744.html#section-5.2
gss_acquire_cred:
If credential acquisition is time-consuming for a mechanism, the
mechanism may choose to delay the actual acquisition until the
credential is required (e.g. by gss_init_sec_context or
gss_accept_sec_context). Such mechanism-specific implementation
decisions should be invisible to the calling application; thus a call
of gss_inquire_cred immediately following the call of
gss_acquire_cred must return valid credential data, and may therefore
incur the overhead of a deferred credential acquisition.
So, as gssapi.Credentials() calls only gss_acquire_cred it is not
guaranteed to have valid(not expired) returned creds and all the
callers of this function have to deal with GSSAPI exceptions by
themselves, for example, to handle ExpiredCredentialsError.
"""
return gssapi.Credentials(usage="initiate", name=name, store=store)
def get_principal(ccache_name=None):
'''
Gets default principal name from given credentials cache.
:parameters:
ccache_name
string specifying Kerberos credentials cache name or None for the
default
:returns:
Default principal name as string
:raises:
errors.CCacheError if the principal cannot be retrieved from given
ccache
'''
try:
creds = get_credentials(ccache_name=ccache_name)
return str(creds.name)
except gssapi.exceptions.GSSError as e:
raise errors.CCacheError(message=str(e))
def get_credentials_if_valid(name=None, ccache_name=None):
'''
Obtains GSSAPI credentials with principal name from ccache. When no
principal name specified, it retrieves the default one for given
credentials cache. When the credentials cannot be retrieved or aren't valid
it returns None.
:parameters:
name
gssapi.Name object specifying principal or None for the default
ccache_name
string specifying Kerberos credentials cache name or None for the
default
:returns:
gssapi.Credentials object or None if valid credentials weren't found
'''
try:
creds = get_credentials(name=name, ccache_name=ccache_name)
if creds.lifetime > 0:
return creds
except gssapi.exceptions.GSSError:
return None
return None
| 7,263
|
Python
|
.py
| 183
| 34.650273
| 104
| 0.697971
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,657
|
constants.py
|
freeipa_freeipa/ipalib/constants.py
|
# Authors:
# Martin Nagy <mnagy@redhat.com>
# Jason Gerard DeRose <jderose@redhat.com>
#
# Copyright (C) 2008 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
All constants centralised in one file.
"""
import os
import string
import uuid
import warnings
warnings.filterwarnings(
"ignore",
"TripleDES has been moved to "
"cryptography.hazmat.decrepit.ciphers.algorithms.TripleDES and "
"will be removed from this module in 48.0.0",
category=UserWarning)
from ipaplatform.constants import constants as _constants
from ipapython.dn import DN
from ipapython.fqdn import gethostfqdn
from ipapython.version import VERSION, API_VERSION
from cryptography.hazmat.primitives.ciphers import algorithms, modes
from cryptography.hazmat.backends.openssl.backend import backend
FQDN = gethostfqdn()
# TLS related constants
# * SSL2 and SSL3 are broken.
# * TLS1.0 and TLS1.1 are no longer state of the art.
# * TLS1.2 and 1.3 are secure and working properly
# * Crypto policies restrict TLS range to 1.2 and 1.3. Python 3.6 cannot
# override the crypto policy.
TLS_VERSIONS = [
"ssl2",
"ssl3",
"tls1.0",
"tls1.1",
"tls1.2",
"tls1.3",
]
TLS_VERSION_MINIMAL = "tls1.2"
TLS_VERSION_MAXIMAL = "tls1.3"
TLS_VERSION_DEFAULT_MIN = None
TLS_VERSION_DEFAULT_MAX = None
SD_IPA_API_MESSAGE_ID = uuid.uuid3(uuid.NAMESPACE_DNS, 'IPA.API')
# regular expression NameSpace member names must match:
NAME_REGEX = r'^[a-z][_a-z0-9]*[a-z0-9]$|^[a-z]$'
# Format for ValueError raised when name does not match above regex:
NAME_ERROR = "name must match '%s'; got '%s'"
# Standard format for TypeError message:
TYPE_ERROR = '%s: need a %r; got %r (a %r)'
# Stardard format for TypeError message when a callable is expected:
CALLABLE_ERROR = '%s: need a callable; got %r (which is a %r)'
# Standard format for Exception message when overriding an attribute:
OVERRIDE_ERROR = 'cannot override %s.%s value %r with %r'
# Standard format for AttributeError message when a read-only attribute is
# already locked:
SET_ERROR = 'locked: cannot set %s.%s to %r'
DEL_ERROR = 'locked: cannot delete %s.%s'
# Used for a tab (or indentation level) when formatting for CLI:
CLI_TAB = ' ' # Two spaces
# The section to read in the config files, i.e. [global]
CONFIG_SECTION = 'global'
# The default configuration for api.env
# This is a tuple instead of a dict so that it is immutable.
# To create a dict with this config, just "d = dict(DEFAULT_CONFIG)".
DEFAULT_CONFIG = (
('api_version', API_VERSION),
('version', VERSION),
# Domain, realm, basedn:
# Following values do not have any reasonable default.
# Do not initialize them so the code which depends on them blows up early
# and does not do crazy stuff with default values instead of real ones.
# ('domain', 'example.com'),
# ('realm', 'EXAMPLE.COM'),
# ('basedn', DN(('dc', 'example'), ('dc', 'com'))),
# LDAP containers:
('container_accounts', DN(('cn', 'accounts'))),
('container_user', DN(('cn', 'users'), ('cn', 'accounts'))),
('container_deleteuser', DN(('cn', 'deleted users'), ('cn', 'accounts'), ('cn', 'provisioning'))),
('container_stageuser', DN(('cn', 'staged users'), ('cn', 'accounts'), ('cn', 'provisioning'))),
('container_group', DN(('cn', 'groups'), ('cn', 'accounts'))),
('container_service', DN(('cn', 'services'), ('cn', 'accounts'))),
('container_host', DN(('cn', 'computers'), ('cn', 'accounts'))),
('container_hostgroup', DN(('cn', 'hostgroups'), ('cn', 'accounts'))),
('container_rolegroup', DN(('cn', 'roles'), ('cn', 'accounts'))),
('container_permission', DN(('cn', 'permissions'), ('cn', 'pbac'))),
('container_privilege', DN(('cn', 'privileges'), ('cn', 'pbac'))),
('container_automount', DN(('cn', 'automount'))),
('container_policies', DN(('cn', 'policies'))),
('container_configs', DN(('cn', 'configs'), ('cn', 'policies'))),
('container_roles', DN(('cn', 'roles'), ('cn', 'policies'))),
('container_applications', DN(('cn', 'applications'), ('cn', 'configs'), ('cn', 'policies'))),
('container_policygroups', DN(('cn', 'policygroups'), ('cn', 'configs'), ('cn', 'policies'))),
('container_policylinks', DN(('cn', 'policylinks'), ('cn', 'configs'), ('cn', 'policies'))),
('container_netgroup', DN(('cn', 'ng'), ('cn', 'alt'))),
('container_hbac', DN(('cn', 'hbac'))),
('container_hbacservice', DN(('cn', 'hbacservices'), ('cn', 'hbac'))),
('container_hbacservicegroup', DN(('cn', 'hbacservicegroups'), ('cn', 'hbac'))),
('container_dns', DN(('cn', 'dns'))),
('container_vault', DN(('cn', 'vaults'), ('cn', 'kra'))),
('container_virtual', DN(('cn', 'virtual operations'), ('cn', 'etc'))),
('container_sudorule', DN(('cn', 'sudorules'), ('cn', 'sudo'))),
('container_sudocmd', DN(('cn', 'sudocmds'), ('cn', 'sudo'))),
('container_sudocmdgroup', DN(('cn', 'sudocmdgroups'), ('cn', 'sudo'))),
('container_automember', DN(('cn', 'automember'), ('cn', 'etc'))),
('container_selinux', DN(('cn', 'usermap'), ('cn', 'selinux'))),
('container_s4u2proxy', DN(('cn', 's4u2proxy'), ('cn', 'etc'))),
('container_cifsdomains', DN(('cn', 'ad'), ('cn', 'etc'))),
('container_trusts', DN(('cn', 'trusts'))),
('container_adtrusts', DN(('cn', 'ad'), ('cn', 'trusts'))),
('container_ranges', DN(('cn', 'ranges'), ('cn', 'etc'))),
('container_dna', DN(('cn', 'dna'), ('cn', 'ipa'), ('cn', 'etc'))),
('container_dna_posix_ids', DN(('cn', 'posix-ids'), ('cn', 'dna'), ('cn', 'ipa'), ('cn', 'etc'))),
('container_dna_subordinate_ids', DN(
('cn', 'subordinate-ids'), ('cn', 'dna'), ('cn', 'ipa'), ('cn', 'etc')
)),
('container_realm_domains', DN(('cn', 'Realm Domains'), ('cn', 'ipa'), ('cn', 'etc'))),
('container_otp', DN(('cn', 'otp'))),
('container_radiusproxy', DN(('cn', 'radiusproxy'))),
('container_views', DN(('cn', 'views'), ('cn', 'accounts'))),
('container_masters', DN(('cn', 'masters'), ('cn', 'ipa'), ('cn', 'etc'))),
('container_certprofile', DN(('cn', 'certprofiles'), ('cn', 'ca'))),
('container_topology', DN(('cn', 'topology'), ('cn', 'ipa'), ('cn', 'etc'))),
('container_caacl', DN(('cn', 'caacls'), ('cn', 'ca'))),
('container_locations', DN(('cn', 'locations'), ('cn', 'etc'))),
('container_ca', DN(('cn', 'cas'), ('cn', 'ca'))),
('container_dnsservers', DN(('cn', 'servers'), ('cn', 'dns'))),
('container_custodia', DN(('cn', 'custodia'), ('cn', 'ipa'), ('cn', 'etc'))),
('container_sysaccounts', DN(('cn', 'sysaccounts'), ('cn', 'etc'))),
('container_certmap', DN(('cn', 'certmap'))),
('container_certmaprules', DN(('cn', 'certmaprules'), ('cn', 'certmap'))),
('container_ca_renewal',
DN(('cn', 'ca_renewal'), ('cn', 'ipa'), ('cn', 'etc'))),
('container_subids', DN(('cn', 'subids'), ('cn', 'accounts'))),
('container_idp', DN(('cn', 'idp'))),
('container_passkey', DN(('cn', 'passkeyconfig'), ('cn', 'etc'))),
# Ports, hosts, and URIs:
# Following values do not have any reasonable default.
# Do not initialize them so the code which depends on them blows up early
# and does not do crazy stuff with default values instead of real ones.
# ('server', 'localhost'),
# ('xmlrpc_uri', 'http://localhost:8888/ipa/xml'),
# ('jsonrpc_uri', 'http://localhost:8888/ipa/json'),
# ('ldap_uri', 'ldap://localhost:389'),
('rpc_protocol', 'jsonrpc'),
('ldap_cache', True),
('ldap_cache_size', 100),
('ldap_cache_debug', False),
# Define an inclusive range of SSL/TLS version support
('tls_version_min', TLS_VERSION_DEFAULT_MIN),
('tls_version_max', TLS_VERSION_DEFAULT_MAX),
# Time to wait for a service to start, in seconds.
# Note that systemd has a DefaultTimeoutStartSec of 90 seconds. Higher
# values are not effective unless systemd is reconfigured, too.
('startup_timeout', 120),
# How long http connection should wait for reply [seconds].
('http_timeout', 30),
# How long to wait for an entry to appear on a replica
('replication_wait_timeout', 300),
# How long to wait for a certmonger request to finish
('certmonger_wait_timeout', 300),
# Number of seconds before client should check for schema update.
('schema_ttl', 3600),
# Web Application mount points
('mount_ipa', '/ipa/'),
# WebUI stuff:
('webui_prod', True),
# Session stuff:
('kinit_lifetime', None),
# Debugging:
('verbose', 0),
('debug', False),
('startup_traceback', False),
('mode', 'production'),
('wait_for_dns', 0),
# CA plugin:
('ca_host', FQDN), # Set in Env._finalize_core()
('ca_port', 80),
('ca_agent_port', 443),
# For the following ports, None means a default specific to the installed
# Dogtag version.
('ca_install_port', None),
# Topology plugin
('recommended_max_agmts', 4), # Recommended maximum number of replication
# agreements
# Special CLI:
('prompt_all', False),
('interactive', True),
('fallback', True),
('delegate', False),
# Enable certain optional plugins:
('enable_ra', False),
('ra_plugin', 'selfsign'),
('dogtag_version', 9),
# Used when verifying that the API hasn't changed. Not for production.
('validate_api', False),
# Skip client vs. server API version checking. Can lead to errors/strange
# behavior when newer clients talk to older servers. Use with caution.
('skip_version_check', False),
# Ignore TTL. Perform schema call and download schema if not in cache.
('force_schema_check', False),
# ********************************************************
# The remaining keys are never set from the values here!
# ********************************************************
#
# Env._bootstrap() or Env._finalize_core() will have filled in all the keys
# below by the time DEFAULT_CONFIG is merged in, so the values below are
# never actually used. They are listed both to provide a big picture and
# also so DEFAULT_CONFIG contains at least all the keys that should be
# present after Env._finalize_core() is called.
#
# Each environment variable below is sent to ``object``, which just happens
# to be an invalid value for an environment variable, so if for some reason
# any of these keys were set from the values here, an exception will be
# raised.
# Non-overridable vars set in Env._bootstrap():
('host', FQDN),
('ipalib', object), # The directory containing ipalib/__init__.py
('site_packages', object), # The directory contaning ipalib
('script', object), # sys.argv[0]
('bin', object), # The directory containing the script
('home', object), # os.path.expanduser('~')
# Vars set in Env._bootstrap():
('in_tree', object), # Whether or not running in-tree (bool)
('dot_ipa', object), # ~/.ipa directory
('context', object), # Name of context, default is 'default'
('confdir', object), # Directory containing config files
('env_confdir', None), # conf dir specified by IPA_CONFDIR env variable
('conf', object), # File containing context specific config
('conf_default', object), # File containing context independent config
('plugins_on_demand', object), # Whether to finalize plugins on-demand (bool)
('nss_dir', object), # Path to nssdb, default {confdir}/nssdb
('cache_dir', object), # ~/.cache/ipa directory, may use XDG_CACHE_HOME env
('tls_ca_cert', object), # Path to CA cert file
# Set in Env._finalize_core():
('in_server', object), # Whether or not running in-server (bool)
('logdir', object), # Directory containing log files
('log', object), # Path to context specific log file
)
LDAP_GENERALIZED_TIME_FORMAT = "%Y%m%d%H%M%SZ"
IPA_ANCHOR_PREFIX = ':IPA:'
SID_ANCHOR_PREFIX = ':SID:'
# domains levels
DOMAIN_LEVEL_0 = 0 # compat
DOMAIN_LEVEL_1 = 1 # replica promotion, topology plugin
MIN_DOMAIN_LEVEL = DOMAIN_LEVEL_1
MAX_DOMAIN_LEVEL = DOMAIN_LEVEL_1
DOMAIN_SUFFIX_NAME = 'domain'
CA_SUFFIX_NAME = 'ca'
PKI_GSSAPI_SERVICE_NAME = 'dogtag'
IPA_CA_CN = u'ipa'
IPA_CA_RECORD = "ipa-ca"
IPA_CA_NICKNAME = 'caSigningCert cert-pki-ca'
RENEWAL_CA_NAME = 'dogtag-ipa-ca-renew-agent'
RENEWAL_REUSE_CA_NAME = 'dogtag-ipa-ca-renew-agent-reuse'
RENEWAL_SELFSIGNED_CA_NAME = 'dogtag-ipa-ca-renew-agent-selfsigned'
# The RA agent cert is used for client cert authentication. In the past IPA
# used caServerCert profile, which adds clientAuth and serverAuth EKU. The
# serverAuth EKU caused trouble with NamedConstraints, see RHBZ#1670239.
RA_AGENT_PROFILE = 'caSubsystemCert'
# How long dbus clients should wait for CA certificate RPCs [seconds]
CA_DBUS_TIMEOUT = 120
# Maximum hostname length in Linux
# It's the max length of uname's nodename and return value of gethostname().
MAXHOSTNAMELEN = 64
# DNS name is 255 octets, effectively 253 ASCII characters.
MAXHOSTFQDNLEN = 253
# regexp definitions
PATTERN_GROUPUSER_NAME = (
'(?!^[0-9]+$)^[a-zA-Z0-9_.][a-zA-Z0-9_.-]*[a-zA-Z0-9_.$-]?$'
)
ERRMSG_GROUPUSER_NAME = (
'may only include letters, numbers, _, -, . and $'
', refer to \'ipa help {}\' for complete format '
'description'
)
# Kerberos Anonymous principal name
ANON_USER = 'WELLKNOWN/ANONYMOUS'
# IPA API Framework user
IPAAPI_USER = _constants.IPAAPI_USER
IPAAPI_GROUP = _constants.IPAAPI_GROUP
# Use cache path
USER_CACHE_PATH = (
os.environ.get('XDG_CACHE_HOME') or
os.path.expanduser('~/.cache')
)
SOFTHSM_DNSSEC_TOKEN_LABEL = u'ipaDNSSEC'
# Apache's mod_ssl SSLVerifyDepth value (Maximum depth of CA
# Certificates in Client Certificate verification)
MOD_SSL_VERIFY_DEPTH = '5'
# subuid / subgid counts are hard-coded
# An interval of 65536 uids/gids is required to map nobody (65534).
SUBID_COUNT = 65536
# upper half of uid_t (uint32_t)
SUBID_RANGE_START = 2 ** 31
# theoretical max limit is UINT32_MAX-1 ((2 ** 32) - 2)
# We use a smaller value to keep the topmost subid interval unused.
SUBID_RANGE_MAX = (2 ** 32) - (2 * SUBID_COUNT)
SUBID_RANGE_SIZE = SUBID_RANGE_MAX - SUBID_RANGE_START
# threshold before DNA plugin requests a new range
SUBID_DNA_THRESHOLD = 500
# moved from ipaserver/install/krainstance.py::KRAInstance to avoid duplication
# as per https://pagure.io/freeipa/issue/8795
KRA_TRACKING_REQS = {
'auditSigningCert cert-pki-kra': 'caAuditSigningCert',
'transportCert cert-pki-kra': 'caTransportCert',
'storageCert cert-pki-kra': 'caStorageCert',
}
ALLOWED_NETBIOS_CHARS = string.ascii_uppercase + string.digits + '-'
# vault data wrapping algorithms
VAULT_WRAPPING_3DES = 'des-ede3-cbc'
VAULT_WRAPPING_AES128_CBC = 'aes-128-cbc'
VAULT_WRAPPING_SUPPORTED_ALGOS = (
# new default and supported since pki-kra >= 10.4
VAULT_WRAPPING_AES128_CBC,
)
VAULT_WRAPPING_DEFAULT_ALGO = VAULT_WRAPPING_AES128_CBC
# Add 3DES for backwards compatibility if supported
if getattr(algorithms, 'TripleDES', None):
if backend.cipher_supported(algorithms.TripleDES(
b"\x00" * 8), modes.CBC(b"\x00" * 8)):
VAULT_WRAPPING_SUPPORTED_ALGOS += (VAULT_WRAPPING_3DES,)
| 15,916
|
Python
|
.py
| 336
| 43.636905
| 102
| 0.652406
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,658
|
backend.py
|
freeipa_freeipa/ipalib/backend.py
|
# Authors:
# Jason Gerard DeRose <jderose@redhat.com>
#
# Copyright (C) 2008 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Base classes for all backed-end plugins.
"""
import logging
import threading
import os
from ipalib import plugable
from ipalib.errors import PublicError, InternalError, CommandError
from ipalib.request import context, Connection, destroy_context
logger = logging.getLogger(__name__)
class Backend(plugable.Plugin):
"""
Base class for all backend plugins.
"""
class Connectible(Backend):
"""
Base class for backend plugins that create connections.
In addition to the nicety of providing a standard connection API, all
backend plugins that create connections should use this base class so that
`request.destroy_context()` can properly close all open connections.
"""
def __init__(self, api, shared_instance=False):
Backend.__init__(self, api)
if shared_instance:
self.id = self.name
else:
self.id = '%s_%s' % (self.name, str(id(self)))
def connect(self, *args, **kw):
"""
Create thread-local connection.
"""
if hasattr(context, self.id):
raise Exception(
"{0} is already connected ({1} in {2})".format(
self.name,
self.id,
threading.current_thread().name,
)
)
conn = self.create_connection(*args, **kw)
setattr(context, self.id, Connection(conn, self.disconnect))
assert self.conn is conn
logger.debug('Created connection context.%s', self.id)
def create_connection(self, *args, **kw):
raise NotImplementedError('%s.create_connection()' % self.id)
def disconnect(self):
if not hasattr(context, self.id):
raise Exception(
"{0} is not connected ({1} in {2})".format(
self.name,
self.id,
threading.current_thread().name,
)
)
self.destroy_connection()
delattr(context, self.id)
logger.debug('Destroyed connection context.%s', self.id)
def destroy_connection(self):
raise NotImplementedError('%s.destroy_connection()' % self.id)
def isconnected(self):
"""
Return ``True`` if thread-local connection on `request.context` exists.
"""
return hasattr(context, self.id)
def __get_conn(self):
"""
Return thread-local connection.
"""
if not hasattr(context, self.id):
raise AttributeError(
"{0} is not connected ({1} in {2})".format(
self.name,
self.id,
threading.current_thread().name,
)
)
return getattr(context, self.id).conn
conn = property(__get_conn)
class Executioner(Backend):
def create_context(self, ccache=None, client_ip=None):
"""
client_ip: The IP address of the remote client.
"""
if ccache is not None:
os.environ["KRB5CCNAME"] = ccache
if self.env.in_server:
self.Backend.ldap2.connect(ccache=ccache,
size_limit=None,
time_limit=None)
else:
self.Backend.rpcclient.connect()
if client_ip is not None:
setattr(context, "client_ip", client_ip)
def destroy_context(self):
destroy_context()
def execute(self, _name, *args, **options):
try:
if _name not in self.Command:
raise CommandError(name=_name)
return self.Command[_name](*args, **options)
except PublicError:
raise
except Exception as e:
logger.exception(
'non-public: %s: %s', e.__class__.__name__, str(e)
)
raise InternalError()
finally:
destroy_context()
| 4,716
|
Python
|
.py
| 127
| 28.094488
| 79
| 0.600088
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,659
|
cli.py
|
freeipa_freeipa/ipalib/cli.py
|
# Authors:
# Jason Gerard DeRose <jderose@redhat.com>
#
# Copyright (C) 2008 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Functionality for Command Line Interface.
"""
from __future__ import print_function
import atexit
import builtins
import importlib
import logging
import textwrap
import sys
import getpass
import code
import optparse # pylint: disable=deprecated-module
import os
import pprint
import fcntl
import termios
import struct
import base64
import traceback
try:
import readline
import rlcompleter
except ImportError:
readline = rlcompleter = None
import six
from six.moves import input
from ipalib.util import (
check_client_configuration, get_pager, get_terminal_height, open_in_pager
)
if six.PY3:
unicode = str
if six.PY2:
reload(sys) # pylint: disable=reload-builtin, undefined-variable
sys.setdefaultencoding('utf-8') # pylint: disable=no-member
from ipalib import frontend
from ipalib import backend
from ipalib import plugable
from ipalib.errors import (PublicError, CommandError, HelpError, InternalError,
NoSuchNamespaceError, ValidationError, NotFound,
NotConfiguredError, PromptFailed)
from ipalib.constants import CLI_TAB, LDAP_GENERALIZED_TIME_FORMAT
from ipalib.parameters import File, BinaryFile, Str, Enum, Any, Flag
from ipalib.text import _
from ipalib import api
from ipapython.dnsutil import DNSName
from ipapython.admintool import ScriptError
import datetime
logger = logging.getLogger(__name__)
def to_cli(name):
"""
Takes a Python identifier and transforms it into form suitable for the
Command Line Interface.
"""
assert isinstance(name, str)
return name.replace('_', '-')
def from_cli(cli_name):
"""
Takes a string from the Command Line Interface and transforms it into a
Python identifier.
"""
return str(cli_name).replace('-', '_')
class textui(backend.Backend):
"""
Backend plugin to nicely format output to stdout.
"""
def get_tty_width(self):
"""
Return the width (in characters) of output tty.
If stdout is not a tty, this method will return ``None``.
"""
# /usr/include/asm/termios.h says that struct winsize has four
# unsigned shorts, hence the HHHH
if sys.stdout.isatty():
try:
winsize = fcntl.ioctl(sys.stdout, termios.TIOCGWINSZ,
struct.pack('HHHH', 0, 0, 0, 0))
return struct.unpack('HHHH', winsize)[1]
except IOError:
pass
return None
def max_col_width(self, rows, col=None):
"""
Return the max width (in characters) of a specified column.
For example:
>>> ui = textui(api)
>>> rows = [
... ('a', 'package'),
... ('an', 'egg'),
... ]
>>> ui.max_col_width(rows, col=0) # len('an')
2
>>> ui.max_col_width(rows, col=1) # len('package')
7
>>> ui.max_col_width(['a', 'cherry', 'py']) # len('cherry')
6
"""
if type(rows) not in (list, tuple):
raise TypeError(
'rows: need %r or %r; got %r' % (list, tuple, rows)
)
if len(rows) == 0:
return 0
if col is None:
return max(len(row) for row in rows)
return max(len(row[col]) for row in rows)
def __get_encoding(self, stream):
assert stream in (sys.stdin, sys.stdout)
if getattr(stream, 'encoding', None) is None:
return 'UTF-8'
return stream.encoding
if six.PY2:
def decode(self, value):
"""
Decode text from stdin.
"""
if type(value) is bytes:
encoding = self.__get_encoding(sys.stdin)
return value.decode(encoding)
elif type(value) in (list, tuple):
return tuple(self.decode(v) for v in value)
return value
def encode(self, unicode_text):
"""
Encode text for output to stdout.
"""
assert type(unicode_text) is unicode
encoding = self.__get_encoding(sys.stdout)
return unicode_text.encode(encoding)
else:
def decode(self, value):
return value
def encode(self, value):
return value
def choose_number(self, n, singular, plural=None):
if n == 1 or plural is None:
return singular % n
return plural % n
def encode_binary(self, value):
"""
Convert a binary value to base64. We know a value is binary
if it is a python bytes type, otherwise it is a plain string.
This function also converts datetime and DNSName values to string.
"""
if type(value) is bytes:
return base64.b64encode(value).decode('ascii')
elif type(value) is datetime.datetime:
return value.strftime(LDAP_GENERALIZED_TIME_FORMAT)
elif isinstance(value, DNSName):
return unicode(value)
else:
return value
def print_plain(self, string):
"""
Print exactly like ``print`` statement would.
"""
print(unicode(string))
def print_line(self, text, width=None):
"""
Force printing on a single line, using ellipsis if needed.
For example:
>>> ui = textui(api)
>>> ui.print_line('This line can fit!', width=18)
This line can fit!
>>> ui.print_line('This line wont quite fit!', width=18)
This line wont ...
The above example aside, you normally should not specify the
``width``. When you don't, it is automatically determined by calling
`textui.get_tty_width()`.
"""
if width is None:
width = self.get_tty_width()
if width is not None and width < len(text):
text = text[:width - 3] + '...'
print(unicode(text))
def print_paragraph(self, text, width=None):
"""
Print a paragraph, automatically word-wrapping to tty width.
For example:
>>> text = '''
... Python is a dynamic object-oriented programming language that can
... be used for many kinds of software development.
... '''
>>> ui = textui(api)
>>> ui.print_paragraph(text, width=45)
Python is a dynamic object-oriented
programming language that can be used for
many kinds of software development.
The above example aside, you normally should not specify the
``width``. When you don't, it is automatically determined by calling
`textui.get_tty_width()`.
The word-wrapping is done using the Python ``textwrap`` module. See:
http://docs.python.org/library/textwrap.html
"""
if width is None:
width = self.get_tty_width()
for line in textwrap.wrap(text.strip(), width):
print(line)
def print_indented(self, text, indent=1):
"""
Print at specified indentation level.
For example:
>>> ui = textui(api)
>>> ui.print_indented('One indentation level.')
One indentation level.
>>> ui.print_indented('Two indentation levels.', indent=2)
Two indentation levels.
>>> ui.print_indented('No indentation.', indent=0)
No indentation.
"""
print((CLI_TAB * indent + text))
def print_keyval(self, rows, indent=1):
"""
Print (key = value) pairs, one pair per line.
For example:
>>> items = [
... ('in_server', True),
... ('mode', u'production'),
... ]
>>> ui = textui(api)
>>> ui.print_keyval(items)
in_server = True
mode = u'production'
>>> ui.print_keyval(items, indent=0)
in_server = True
mode = u'production'
Also see `textui.print_indented`.
"""
for (key, value) in rows:
self.print_indented('%s = %r' % (key, self.encode_binary(value)), indent)
def print_attribute(self, attr, value, format='%s: %s', indent=1, one_value_per_line=True):
"""
Print an ldap attribute.
For example:
>>> attr = 'dn'
>>> ui = textui(api)
>>> ui.print_attribute(attr, u'dc=example,dc=com')
dn: dc=example,dc=com
>>> attr = 'objectClass'
>>> ui.print_attribute(attr, [u'top', u'someClass'], one_value_per_line=False)
objectClass: top, someClass
>>> ui.print_attribute(attr, [u'top', u'someClass'])
objectClass: top
objectClass: someClass
"""
assert isinstance(attr, str)
if not isinstance(value, (list, tuple)):
# single-value attribute
self.print_indented(format % (attr, self.encode_binary(value)), indent)
else:
# multi-value attribute
if one_value_per_line:
for v in value:
self.print_indented(format % (attr, self.encode_binary(v)), indent)
else:
value = [self.encode_binary(v) for v in value]
if len(value) > 0 and type(value[0]) in (list, tuple):
# This is where we print failed add/remove members
for l in value:
text = ': '.join(l)
self.print_indented(format % (attr, self.encode_binary(text)), indent)
return
else:
if len(value) > 0:
text = ', '.join(str(v) for v in value)
else:
return
line_len = self.get_tty_width()
if line_len and text:
s_indent = '%s%s' % (
CLI_TAB * indent, ' ' * (len(attr) + 2)
)
line_len -= len(s_indent)
text = textwrap.wrap(
text, line_len, break_long_words=False
)
if len(text) == 0:
text = [u'']
else:
s_indent = u''
text = [text]
self.print_indented(format % (attr, text[0]), indent)
for line in text[1:]:
self.print_plain('%s%s' % (s_indent, line))
def print_entry1(self, entry, indent=1, attr_map={}, attr_order=['dn'],
one_value_per_line=True):
"""
Print an ldap entry dict.
"""
assert isinstance(entry, dict)
assert isinstance(attr_map, dict)
assert isinstance(attr_order, (list, tuple))
def print_attr(a):
if attr in attr_map:
self.print_attribute(
attr_map[attr], entry[attr], indent=indent, one_value_per_line=one_value_per_line
)
else:
self.print_attribute(
attr, entry[attr], indent=indent, one_value_per_line=one_value_per_line
)
for attr in attr_order:
if attr in entry:
print_attr(attr)
del entry[attr]
for attr in sorted(entry):
print_attr(attr)
def print_entries(self, entries, order=None, labels=None, flags=None, print_all=True, format='%s: %s', indent=1):
assert isinstance(entries, (list, tuple))
first = True
for entry in entries:
if not first:
print('')
first = False
self.print_entry(entry, order, labels, flags, print_all, format, indent)
def print_entry(self, entry, order=None, labels=None, flags=None, print_all=True, format='%s: %s', indent=1):
if isinstance(entry, (list, tuple)):
entry = dict(entry)
assert isinstance(entry, dict)
if labels is None:
labels = dict()
one_value_per_line = True
else:
one_value_per_line = False
if order is not None:
for key in order:
if key not in entry:
continue
label = labels.get(key, key)
flag = flags.get(key, [])
value = entry[key]
if ('suppress_empty' in flag and
value in [u'', '', (), [], None]):
continue
if isinstance(value, dict):
if frontend.entry_count(value) == 0:
continue
self.print_indented(format % (label, ''), indent)
self.print_entry(
value, order, labels, flags, print_all, format,
indent=indent+1
)
else:
if isinstance(value, (list, tuple)) and \
all(isinstance(val, dict) for val in value):
# this is a list of entries (dicts), not values
self.print_attribute(label, u'', format, indent)
self.print_entries(value, order, labels, flags, print_all,
format, indent+1)
else:
self.print_attribute(
label, value, format, indent, one_value_per_line
)
del entry[key]
if print_all:
for key in sorted(entry):
label = labels.get(key, key)
self.print_attribute(
key, entry[key], format, indent, one_value_per_line
)
def print_dashed(self, string, above=True, below=True, indent=0, dash='-'):
"""
Print a string with a dashed line above and/or below.
For example:
>>> ui = textui(api)
>>> ui.print_dashed('Dashed above and below.')
-----------------------
Dashed above and below.
-----------------------
>>> ui.print_dashed('Only dashed below.', above=False)
Only dashed below.
------------------
>>> ui.print_dashed('Only dashed above.', below=False)
------------------
Only dashed above.
"""
assert isinstance(dash, str)
assert len(dash) == 1
dashes = dash * len(string)
if above:
self.print_indented(dashes, indent)
self.print_indented(string, indent)
if below:
self.print_indented(dashes, indent)
def print_h1(self, text):
"""
Print a primary header at indentation level 0.
For example:
>>> ui = textui(api)
>>> ui.print_h1('A primary header')
================
A primary header
================
"""
self.print_dashed(text, indent=0, dash='=')
def print_h2(self, text):
"""
Print a secondary header at indentation level 1.
For example:
>>> ui = textui(api)
>>> ui.print_h2('A secondary header')
------------------
A secondary header
------------------
"""
self.print_dashed(text, indent=1, dash='-')
def print_name(self, name):
"""
Print a command name.
The typical use for this is to mark the start of output from a
command. For example, a hypothetical ``show_status`` command would
output something like this:
>>> ui = textui(api)
>>> ui.print_name('show_status')
------------
show-status:
------------
"""
self.print_dashed('%s:' % to_cli(name))
def print_header(self, msg, output):
self.print_dashed(msg % output)
def print_summary(self, msg):
"""
Print a summary at the end of a comand's output.
For example:
>>> ui = textui(api)
>>> ui.print_summary('Added user "jdoe"')
-----------------
Added user "jdoe"
-----------------
"""
self.print_dashed(msg)
def print_count(self, count, singular, plural=None):
"""
Print a summary count.
The typical use for this is to print the number of items returned
by a command, especially when this return count can vary. This
preferably should be used as a summary and should be the final text
a command outputs.
For example:
>>> ui = textui(api)
>>> ui.print_count(1, '%d goose', '%d geese')
-------
1 goose
-------
>>> ui.print_count(['Don', 'Sue'], 'Found %d user', 'Found %d users')
-------------
Found 2 users
-------------
If ``count`` is not an integer, it must be a list or tuple, and then
``len(count)`` is used as the count.
"""
if type(count) is not int:
assert type(count) in (list, tuple, dict)
count = len(count)
self.print_dashed(
self.choose_number(count, singular, plural)
)
def print_error(self, text):
print(' ** %s **' % unicode(text))
def prompt_helper(self, prompt, label, prompt_func=input):
"""Prompt user for input
Handles encoding the prompt and decoding the input.
On end of stream or ctrl+c, raise PromptFailed.
"""
try:
return self.decode(prompt_func(self.encode(prompt)))
except (KeyboardInterrupt, EOFError):
print()
raise PromptFailed(name=label)
def print_prompt_attribute_error(self, attribute, error):
self.print_plain('>>> %s: %s' % (attribute, error))
def prompt(self, label, default=None, get_values=None, optional=False):
"""
Prompt user for input.
"""
# TODO: Add tab completion using readline
if optional:
prompt = u'[%s]' % label
else:
prompt = u'%s' % label
if default is None:
prompt = u'%s: ' % prompt
else:
prompt = u'%s [%s]: ' % (prompt, default)
return self.prompt_helper(prompt, label)
def prompt_yesno(self, label, default=None):
"""
Prompt user for yes/no input. This method returns True/False according
to user response.
Parameter "default" should be True, False or None
If Default parameter is not None, user can enter an empty input instead
of Yes/No answer. Value passed to Default is returned in that case.
If Default parameter is None, user is asked for Yes/No answer until
a correct answer is provided. Answer is then returned.
"""
default_prompt = None
if default is not None:
if default:
default_prompt = "Yes"
else:
default_prompt = "No"
if default_prompt:
prompt = u'%s Yes/No (default %s): ' % (label, default_prompt)
else:
prompt = u'%s Yes/No: ' % label
while True:
data = self.prompt_helper(prompt, label).lower()
if data in (u'yes', u'y'):
return True
elif data in ( u'n', u'no'):
return False
elif default is not None and data == u'':
return default
return default # pylint consinstent return statements
def prompt_password(self, label, confirm=True):
"""
Prompt user for a password or read it in via stdin depending
on whether there is a tty or not.
"""
if sys.stdin.isatty():
prompt = u'%s: ' % unicode(label)
repeat_prompt = unicode(_('Enter %(label)s again to verify: ') % dict(label=label))
while True:
pw1 = self.prompt_helper(prompt, label, prompt_func=getpass.getpass)
if not confirm:
return pw1
pw2 = self.prompt_helper(repeat_prompt, label, prompt_func=getpass.getpass)
if pw1 == pw2:
return pw1
else:
self.print_error(_('Passwords do not match!'))
else:
return self.decode(sys.stdin.readline().strip())
def select_entry(self, entries, format, attrs, display_count=True):
"""
Display a list of lines in with formatting defined in ``format``.
``attrs`` is a list of attributes in the format.
Prompt user for a selection and return the value (index of
``entries`` -1).
If only one entry is provided then always return 0.
Return: 0..n for the index of the selected entry
-1 if all entries should be displayed
-2 to quit, no entries to be displayed
"""
if not self.env.interactive or not sys.stdout.isatty():
return -1
counter = len(entries)
if counter == 0:
raise NotFound(reason=_("No matching entries found"))
i = 1
for e in entries:
# There is no guarantee that all attrs are in any given
# entry
d = {}
for a in attrs:
d[a] = e.get(a, '')
self.print_line("%d: %s" % (i, format % d))
i = i + 1
if display_count:
self.print_count(entries, 'Found %d match', 'Found %d matches')
while True:
try:
resp = self.prompt("Choose one: (1 - %s), a for all, q to quit" % counter)
except EOFError:
return -2
if resp.lower() == "q":
return -2
if resp.lower() == "a":
return -1
try:
selection = int(resp) - 1
if (counter > selection >= 0):
break
except Exception:
# fall through to the error msg
pass
self.print_line("Please enter a number between 1 and %s" % counter)
self.print_line('')
return selection
class help(frontend.Local):
"""
Display help for a command or topic.
"""
class Writer:
"""
Writer abstraction
"""
def __init__(self, outfile):
self.outfile = outfile
self.buffer = []
@property
def buffer_length(self):
length = 0
for line in self.buffer:
length += len(line.split("\n"))
return length
def append(self, string=u""):
self.buffer.append(unicode(string))
def write(self):
pager = get_pager()
if pager and self.buffer_length > get_terminal_height():
data = "\n".join(self.buffer).encode("utf-8")
open_in_pager(data, pager)
else:
try:
for line in self.buffer:
print(line, file=self.outfile)
except IOError:
pass
takes_args = (
Str('command?', cli_name='topic', label=_('Topic or Command'),
doc=_('The topic or command name.')),
)
takes_options = (
Any('outfile?', flags=['no_option']),
)
has_output = tuple()
topic = None
def _get_topic(self, topic):
doc = u''
parent_topic = None
for package in self.api.packages:
module_name = '{0}.{1}'.format(package.__name__, topic)
try:
module = sys.modules[module_name]
except KeyError:
try:
module = importlib.import_module(module_name)
except ImportError:
continue
if module.__doc__ is not None:
doc = unicode(module.__doc__ or '').strip()
try:
parent_topic = module.topic
except AttributeError:
pass
return doc, parent_topic
def _count_topic_mcl(self, topic_name, mod_name):
mcl = max((self._topics[topic_name][1], len(mod_name)))
self._topics[topic_name][1] = mcl
def _on_finalize(self):
# {topic: ["description", mcl, {
# "subtopic": ["description", mcl, [commands]]}]}
# {topic: ["description", mcl, [commands]]}
self._topics = {}
# [builtin_commands]
self._builtins = []
# build help topics
for c in self.api.Command:
if c is not self.api.Command.get_plugin(c.name):
continue
if c.NO_CLI:
continue
if c.topic is not None:
doc, topic_name = self._get_topic(c.topic)
doc = doc.split('\n', 1)[0]
if topic_name is None: # a module without grouping
topic_name = c.topic
if topic_name in self._topics:
self._topics[topic_name][2].append(c)
else:
self._topics[topic_name] = [doc, 0, [c]]
mcl = max((self._topics[topic_name][1], len(c.name)))
self._topics[topic_name][1] = mcl
else: # a module grouped in a topic
topic = self._get_topic(topic_name)
mod_name = c.topic
if topic_name in self._topics:
if mod_name in self._topics[topic_name][2]:
self._topics[topic_name][2][mod_name][2].append(c)
else:
self._topics[topic_name][2][mod_name] = [
doc, 0, [c]]
self._count_topic_mcl(topic_name, mod_name)
# count mcl for for the subtopic
mcl = max((
self._topics[topic_name][2][mod_name][1],
len(c.name)))
self._topics[topic_name][2][mod_name][1] = mcl
else:
self._topics[topic_name] = [
topic[0].split('\n', 1)[0],
0,
{mod_name: [doc, 0, [c]]}]
self._count_topic_mcl(topic_name, mod_name)
else:
self._builtins.append(c)
# compute maximum topic length
topics = list(self._topics) + [c.name for c in self._builtins]
self._mtl = max(len(s) for s in topics)
super(help, self)._on_finalize()
def run(self, key=None, outfile=None, **options):
if outfile is None:
outfile = sys.stdout
writer = self.Writer(outfile)
name = from_cli(key)
if key is None:
self.api.parser.print_help(outfile)
return
if name == "topics":
self.print_topics(outfile)
return
if name in self._topics:
self.print_commands(name, outfile)
elif name in self.Command:
cmd = self.Command[name]
if cmd.NO_CLI:
raise HelpError(topic=name)
self.Backend.cli.build_parser(cmd).print_help(outfile)
elif any(name in t[2] for t in self._topics.values()
if type(t[2]) is dict):
self.print_commands(name, outfile)
elif name == "commands":
mcl = 0
for cmd_plugin in self.Command:
if cmd_plugin is not self.Command.get_plugin(cmd_plugin.name):
continue
if cmd_plugin.NO_CLI:
continue
mcl = max(mcl, len(cmd_plugin.name))
writer.append('{0} {1}'.format(
to_cli(cmd_plugin.name).ljust(mcl), cmd_plugin.summary))
else:
raise HelpError(topic=name)
writer.write()
def print_topics(self, outfile):
writer = self.Writer(outfile)
for t, topic in sorted(self._topics.items()):
writer.append('{0} {1}'.format(
to_cli(t).ljust(self._mtl), topic[0]))
writer.write()
def print_commands(self, topic, outfile):
writer = self.Writer(outfile)
if topic in self._topics and type(self._topics[topic][2]) is dict:
# we want to display topic which has subtopics
for subtopic in self._topics[topic][2]:
doc = self._topics[topic][2][subtopic][0]
mcl = self._topics[topic][1]
writer.append(' {0} {1}'.format(
to_cli(subtopic).ljust(mcl), doc))
else:
# we want to display subtopic or a topic which has no subtopics
if topic in self._topics:
mcl = self._topics[topic][1]
commands = self._topics[topic][2]
else:
commands = []
for v in self._topics.values():
if not isinstance(v[2], dict):
continue
if topic not in v[2]:
continue
mcl = v[2][topic][1]
commands = v[2][topic][2]
break
doc, _topic = self._get_topic(topic)
if topic not in self.Command and len(commands) == 0:
raise HelpError(topic=topic)
writer.append(doc)
if commands:
writer.append()
writer.append(_('Topic commands:'))
for c in commands:
writer.append(
' {0} {1}'.format(
to_cli(c.name).ljust(mcl), c.summary))
writer.append()
writer.append(_('To get command help, use:'))
writer.append(_(' ipa <command> --help'))
writer.append()
writer.write()
class show_mappings(frontend.Command):
"""
Show mapping of LDAP attributes to command-line option.
"""
takes_args = (
Str('command_name',
label=_('Command name'),
),
)
has_output = tuple()
topic = None
def run(self, command_name, **options):
command_name = from_cli(command_name)
if command_name not in self.Command:
raise CommandError(name=command_name)
params = self.Command[command_name].options
out = [('Parameter','LDAP attribute'),
('=========','==============')]
mcl = len(out[0][0])
for param in params():
if param.exclude and 'webui' in param.exclude:
continue
out.append((param.cli_name, param.param_spec))
mcl = max(mcl,len(param.cli_name))
for item in out:
print(to_cli(item[0]).ljust(mcl)+' : '+item[1])
class IPACompleter(rlcompleter.Completer):
def _callable_postfix(self, val, word):
# Don't add '(' postfix for callable API objects
if isinstance(val, (plugable.APINameSpace, plugable.API)):
return word
return super()._callable_postfix(val, word)
class InteractiveConsole(code.InteractiveConsole):
def showtraceback(self):
ei = sys.exc_info()
e = ei[1]
if isinstance(e, PublicError):
self.write('IPA public error exception: %s: %s\n' %
(e.__class__.__name__, str(e)))
else:
super().showtraceback()
class console(frontend.Command):
"""Start the IPA interactive Python console, or run a script.
An IPA API object is initialized and made available
in the `api` global variable.
"""
takes_args = ('filename?',)
has_output = tuple()
topic = None
def _setup_tab_completion(self, local):
readline.parse_and_bind("tab: complete")
# completer with custom locals
readline.set_completer(IPACompleter(local).complete)
# load history
history = os.path.join(api.env.dot_ipa, "console.history")
try:
readline.read_history_file(history)
except OSError:
pass
def save_history():
directory = os.path.dirname(history)
if not os.path.isdir(directory):
os.makedirs(directory)
readline.set_history_length(50)
try:
readline.write_history_file(history)
except OSError:
logger.exception("Unable to store history %s", history)
atexit.register(save_history)
def run(self, filename=None, **options):
local = dict(
api=self.api,
pp=pprint.pprint, # just too convenient
__builtins__=builtins,
)
if filename:
try:
with open(filename) as f:
source = f.read()
except IOError as e:
sys.exit("%s: %s" % (e.filename, e.strerror))
try:
compiled = compile(
source,
filename,
'exec',
flags=print_function.compiler_flag
)
exec(compiled, globals(), local)
except Exception:
traceback.print_exc()
sys.exit(1)
else:
if readline is not None:
self._setup_tab_completion(local)
cons = InteractiveConsole(local)
cons.interact(
"\n".join((
"(Custom IPA interactive Python console)",
" api: IPA API object",
" pp: pretty printer",
)),
exitmsg=None
)
class show_api(frontend.Command):
'Show attributes on dynamic API object'
takes_args = ('namespaces*',)
topic = None
def run(self, namespaces=None):
if namespaces is None:
names = tuple(self.api)
else:
for name in namespaces:
if name not in self.api:
raise NoSuchNamespaceError(name=name)
names = namespaces
lines = self.__traverse(names)
ml = max(len(l[1]) for l in lines)
self.Backend.textui.print_name('run')
first = True
for line in lines:
if line[0] == 0 and not first:
print('')
if first:
first = False
print('%s%s %r' % (
' ' * line[0],
line[1].ljust(ml),
line[2],
))
if len(lines) == 1:
s = '1 attribute shown.'
else:
s = '%d attributes show.' % len(lines)
self.Backend.textui.print_dashed(s)
def __traverse(self, names):
lines = []
for name in names:
namespace = self.api[name]
self.__traverse_namespace('%s' % name, namespace, lines)
return lines
def __traverse_namespace(self, name, namespace, lines, tab=0):
lines.append((tab, name, namespace))
for member_name in namespace:
member = namespace[member_name]
lines.append((tab + 1, member_name, member))
if not hasattr(member, '__iter__'):
continue
for n in member:
attr = member[n]
if isinstance(attr, plugable.APINameSpace) and len(attr) > 0:
self.__traverse_namespace(n, attr, lines, tab + 2)
cli_application_commands = (
help,
console,
show_api,
)
class Collector:
def __init__(self):
object.__setattr__(self, '_Collector__options', {})
def __setattr__(self, name, value):
if name in self.__options:
v = self.__options[name]
if type(v) is tuple:
value = v + (value,)
else:
value = (v, value)
# pylint: disable=unsupported-assignment-operation
self.__options[name] = value
# pylint: enable=unsupported-assignment-operation
object.__setattr__(self, name, value)
def __todict__(self):
return dict(self.__options)
class CLIOptionParserFormatter(optparse.IndentedHelpFormatter):
def format_argument(self, name, help_string):
result = []
opt_width = self.help_position - self.current_indent - 2
if len(name) > opt_width:
name = "%*s%s\n" % (self.current_indent, "", name)
indent_first = self.help_position
else: # start help on same line as name
name = "%*s%-*s " % (self.current_indent, "", opt_width, name)
indent_first = 0
result.append(name)
if help_string:
help_lines = textwrap.wrap(help_string, self.help_width)
result.append("%*s%s\n" % (indent_first, "", help_lines[0]))
result.extend(["%*s%s\n" % (self.help_position, "", line)
for line in help_lines[1:]])
elif name[-1] != "\n":
result.append("\n")
return "".join(result)
class CLIOptionParser(optparse.OptionParser):
"""
This OptionParser subclass adds an ability to print positional
arguments in CLI help. Custom formatter is used to format the argument
list in the same way as OptionParser formats options.
"""
def __init__(self, *args, **kwargs):
self._arguments = []
if 'formatter' not in kwargs:
kwargs['formatter'] = CLIOptionParserFormatter()
optparse.OptionParser.__init__(self, *args, **kwargs)
def format_option_help(self, formatter=None):
"""
Prepend argument help to standard OptionParser's option help
"""
option_help = optparse.OptionParser.format_option_help(self, formatter)
if isinstance(formatter, CLIOptionParserFormatter):
heading = unicode(_("Positional arguments"))
arguments = [formatter.format_heading(heading)]
formatter.indent()
for (name, help_string) in self._arguments:
arguments.append(formatter.format_argument(name, help_string))
formatter.dedent()
if len(arguments) > 1:
# there is more than just the heading
arguments.append(u"\n")
else:
arguments = []
option_help = "".join(arguments) + option_help
return option_help
def add_argument(self, name, help_string):
self._arguments.append((name, help_string))
class cli(backend.Executioner):
"""
Backend plugin for executing from command line interface.
"""
def get_command(self, argv):
"""Given CLI arguments, return the Command to use
On incorrect invocation, prints out a help message and returns None
"""
if len(argv) == 0:
self.Command.help(outfile=sys.stderr)
print(file=sys.stderr)
print('Error: Command not specified', file=sys.stderr)
sys.exit(2)
(key, argv) = (argv[0], argv[1:])
name = from_cli(key)
if name not in self.Command and len(argv) == 0:
try:
self.Command.help(unicode(key), outfile=sys.stderr)
except HelpError:
pass
if name not in self.Command or self.Command[name].NO_CLI:
raise CommandError(name=key)
cmd = self.Command[name]
return cmd
def process_keyword_arguments(self, cmd, kw):
"""Get the keyword arguments for a Command"""
if self.env.interactive:
self.prompt_interactively(cmd, kw)
try:
callbacks = cmd.get_callbacks('interactive_prompt')
except AttributeError:
pass
else:
for callback in callbacks:
callback(cmd, kw)
self.load_files(cmd, kw)
return kw
def run(self, argv):
cmd = self.get_command(argv)
if cmd is None:
return None
name = cmd.full_name
kw = self.parse(cmd, argv[1:])
if not isinstance(cmd, frontend.Local):
self.create_context()
try:
kw = self.process_keyword_arguments(cmd, kw)
result = self.execute(name, **kw)
if callable(cmd.output_for_cli):
for param in cmd.params():
if param.password and param.name in kw:
del kw[param.name]
(args, options) = cmd.params_2_args_options(**kw)
rv = cmd.output_for_cli(self.api.Backend.textui, result, *args, **options)
if rv:
return rv
else:
return 0
finally:
self.destroy_context()
return None
def parse(self, cmd, argv):
parser = self.build_parser(cmd)
(collector, args) = parser.parse_args(argv, Collector())
options = collector.__todict__()
kw = cmd.args_options_2_params(*args, **options)
return dict(self.parse_iter(cmd, kw))
# FIXME: Probably move decoding to Command, use same method regardless of
# request source:
def parse_iter(self, cmd, kw):
"""
Decode param values if appropriate.
"""
for (key, value) in kw.items():
yield (key, self.Backend.textui.decode(value))
def build_parser(self, cmd):
parser = CLIOptionParser(
usage=' '.join(self.usage_iter(cmd)),
description=unicode(cmd.doc),
formatter=IPAHelpFormatter(),
)
option_groups = {}
def _get_option_group(group_name):
"""Get or create an option group for the given name"""
option_group = option_groups.get(group_name)
if option_group is None:
option_group = optparse.OptionGroup(parser, group_name)
parser.add_option_group(option_group)
option_groups[group_name] = option_group
return option_group
for option in cmd.options():
kw = dict(
dest=option.name,
help=unicode(option.doc),
)
if 'no_option' in option.flags:
continue
if option.password and self.env.interactive:
kw['action'] = 'store_true'
elif isinstance(option, Flag):
if option.default is True:
kw['action'] = 'store_false'
else:
kw['action'] = 'store_true'
else:
kw['metavar'] = option.cli_metavar
cli_name = to_cli(option.cli_name)
option_names = ['--%s' % cli_name]
if option.cli_short_name:
option_names.append('-%s' % option.cli_short_name)
opt = optparse.make_option(*option_names, **kw)
if option.option_group is None:
parser.add_option(opt)
else:
_get_option_group(option.option_group).add_option(opt)
if option.deprecated_cli_aliases:
new_kw = dict(kw)
new_kw['help'] = _('Same as --%s') % cli_name
if isinstance(option, Enum):
new_kw['metavar'] = 'VAL'
group = _get_option_group(unicode(_('Deprecated options')))
for alias in option.deprecated_cli_aliases:
name = '--%s' % alias
group.add_option(optparse.make_option(name, **new_kw))
for arg in cmd.args():
name = self.__get_arg_name(arg, format_name=False)
if 'no_option' in arg.flags or name is None:
continue
doc = unicode(arg.doc)
parser.add_argument(name, doc)
return parser
def __get_arg_name(self, arg, format_name=True):
if arg.password:
return None
name = to_cli(arg.cli_name).upper()
if not format_name:
return name
if arg.multivalue:
name = '%s...' % name
if arg.required:
return name
else:
return '[%s]' % name
def usage_iter(self, cmd):
yield 'Usage: %%prog [global-options] %s' % to_cli(cmd.name)
for arg in cmd.args():
name = self.__get_arg_name(arg)
if name is None:
continue
yield name
yield '[options]'
def prompt_interactively(self, cmd, kw):
"""
Interactively prompt for missing or invalid values.
By default this method will only prompt for *required* Param that
have a missing or invalid value. However, if
``self.env.prompt_all`` is ``True``, this method will prompt for any
params that have a missing values, even if the param is optional.
"""
honor_alwaysask = True
for param in cmd.params():
if param.alwaysask and param.name in kw:
honor_alwaysask = False
break
for param in cmd.params():
if (param.required and param.name not in kw) or \
(param.alwaysask and honor_alwaysask) or self.env.prompt_all:
if param.autofill:
kw[param.name] = cmd.get_default_of(param.name, **kw)
if param.name in kw and kw[param.name] is not None:
if param.autofill:
del kw[param.name]
continue
if param.password:
kw[param.name] = self.Backend.textui.prompt_password(
param.label, param.confirm
)
else:
default = cmd.get_default_of(param.name, **kw)
optional = param.alwaysask or not param.required
value = cmd.prompt_param(param,
default=default,
optional=optional,
kw=kw)
if value is not None:
kw[param.name] = value
elif param.password and kw.get(param.name, False) is True:
kw[param.name] = self.Backend.textui.prompt_password(
param.label, param.confirm
)
def load_files(self, cmd, kw):
"""
Load files from File parameters.
This has to be done after all required parameters have been read
(i.e. after prompt_interactively has or would have been called)
AND before they are passed to the command. This is because:
1) we need to be sure no more files are going to be added
2) we load files from the machine where the command was executed
3) the webUI will use a different way of loading files
"""
for p in cmd.params():
if isinstance(p, (File, BinaryFile)):
# FIXME: this only reads the first file
raw = None
if p.name in kw:
if type(kw[p.name]) in (tuple, list):
fname = kw[p.name][0]
else:
fname = kw[p.name]
try:
with open(fname, p.open_mode) as f:
raw = f.read()
except IOError as e:
raise ValidationError(
name=to_cli(p.cli_name),
error='%s: %s:' % (fname, e.args[1])
)
elif p.stdin_if_missing:
try:
if six.PY3 and p.type is bytes:
raw = sys.stdin.buffer.read()
else:
raw = sys.stdin.read()
except IOError as e:
raise ValidationError(
name=to_cli(p.cli_name), error=e.args[1]
)
if raw:
if p.type is bytes:
kw[p.name] = raw
else:
kw[p.name] = self.Backend.textui.decode(raw)
elif p.required:
raise ValidationError(
name=to_cli(p.cli_name), error=_('No file to read')
)
class IPAHelpFormatter(optparse.IndentedHelpFormatter):
"""Formatter suitable for printing IPA command help
The default help formatter reflows text to fit the terminal, but it
ignores line/paragraph breaks.
IPA's descriptions already have correct line breaks. This formatter
doesn't touch them (save for removing initial/trailing whitespace).
"""
def format_description(self, description):
if description:
return description.strip()
else:
return ""
cli_plugins = (
cli,
textui,
console,
help,
show_mappings,
)
def run(api):
error = None
try:
(_options, argv) = api.bootstrap_with_global_options(context='cli')
try:
check_client_configuration(env=api.env)
except ScriptError as e:
sys.exit(e)
for klass in cli_plugins:
api.add_plugin(klass)
api.finalize()
if 'config_loaded' not in api.env and 'help' not in argv:
raise NotConfiguredError()
sys.exit(api.Backend.cli.run(argv))
except KeyboardInterrupt:
print('')
logger.info('operation aborted')
except PublicError as e:
error = e
except Exception as e:
logger.exception('%s: %s', e.__class__.__name__, str(e))
error = InternalError()
if error is not None:
assert isinstance(error, PublicError)
logger.error(error.strerror)
sys.exit(error.rval)
| 50,875
|
Python
|
.py
| 1,295
| 27.310425
| 117
| 0.529537
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,660
|
output.py
|
freeipa_freeipa/ipalib/output.py
|
# Authors:
# Jason Gerard DeRose <jderose@redhat.com>
#
# Copyright (C) 2009 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Simple description of return values.
"""
import six
from ipalib.plugable import ReadOnly, lock
from ipalib.capabilities import client_has_capability
from ipalib.text import _
from ipalib.util import apirepr
if six.PY3:
unicode = str
class Output(ReadOnly):
"""
Simple description of a member in the return value ``dict``.
This class controls both the type of object being returned by
a command as well as how the output will be displayed.
For example, this class defines two return results: an entry
and a value.
>>> from ipalib import crud, output
>>> class user(crud.Update):
...
... has_output = (
... output.Entry('result'),
... output.value,
... )
The order of the values in has_output controls the order of output.
If you have values that you don't want to be printed then add
``'no_display'`` to flags.
The difference between ``'no_display'`` and ``'no_output'`` is
that ``'no_output'`` will prevent a Param value from being returned
at all. ``'no_display'`` will cause the API to return a value, it
simply won't be displayed to the user. This is so some things may
be returned that while not interesting to us, but may be to others.
>>> from ipalib import crud, output
>>> myvalue = output.Output('myvalue', unicode,
... 'Do not print this value', flags=['no_display'],
... )
>>> class user(crud.Update):
...
... has_output = (
... output.Entry('result'),
... myvalue,
... )
"""
type = None
validate = None
doc = None
flags = []
def __init__(self, name, type=None, doc=None, flags=[]):
self.name = name
if type is not None:
if not isinstance(type, tuple):
type = (type,)
self.type = type
if doc is not None:
self.doc = doc
self.flags = flags
lock(self)
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
', '.join(self.__repr_iter())
)
def __repr_iter(self):
yield repr(self.name)
for key in ('type', 'doc', 'flags'):
value = self.__dict__.get(key)
if not value:
continue
if isinstance(value, tuple):
value = apirepr(list(value))
else:
value = repr(value)
yield '%s=%s' % (key, value)
class Entry(Output):
type = dict
doc = _('A dictionary representing an LDAP entry')
emsg = """%s.validate_output() => %s.validate():
output[%r][%d]: need a %r; got a %r: %r"""
class ListOfEntries(Output):
type = (list, tuple)
doc = _('A list of LDAP entries')
def validate(self, cmd, entries, version):
assert isinstance(entries, self.type)
for (i, entry) in enumerate(entries):
if not isinstance(entry, dict):
raise TypeError(emsg % (cmd.name, self.__class__.__name__,
self.name, i, dict, type(entry), entry)
)
class PrimaryKey(Output):
def validate(self, cmd, value, version):
if client_has_capability(version, 'primary_key_types'):
if hasattr(cmd, 'obj') and cmd.obj and cmd.obj.primary_key:
types = cmd.obj.primary_key.allowed_types
else:
types = (unicode,)
types = types + (type(None),)
else:
types = (unicode,)
if not isinstance(value, types):
raise TypeError(
"%s.validate_output() => %s.validate():\n"
" output[%r]: need %r; got %r: %r" % (
cmd.name, self.__class__.__name__, self.name,
types[0], type(value), value))
class ListOfPrimaryKeys(Output):
def validate(self, cmd, values, version):
if client_has_capability(version, 'primary_key_types'):
types = (tuple, list)
else:
types = (unicode,)
if not isinstance(values, types):
raise TypeError(
"%s.validate_output() => %s.validate():\n"
" output[%r]: need %r; got %r: %r" % (
cmd.name, self.__class__.__name__, self.name,
types[0], type(values), values))
if client_has_capability(version, 'primary_key_types'):
if hasattr(cmd, 'obj') and cmd.obj and cmd.obj.primary_key:
types = cmd.obj.primary_key.allowed_types
else:
types = (unicode,)
for (i, value) in enumerate(values):
if not isinstance(value, types):
raise TypeError(emsg % (
cmd.name, self.__class__.__name__, i, self.name,
types[0], type(value), value))
result = Output('result', doc=_('All commands should at least have a result'))
summary = Output('summary', (unicode, type(None)),
_('User-friendly description of action performed')
)
value = PrimaryKey('value', None,
_("The primary_key value of the entry, e.g. 'jdoe' for a user"),
flags=['no_display'],
)
standard = (summary, result)
standard_entry = (
summary,
Entry('result'),
value,
)
standard_list_of_entries = (
summary,
ListOfEntries('result'),
Output('count', int, _('Number of entries returned')),
Output('truncated', bool, _('True if not all results were returned')),
)
standard_delete = (
summary,
Output('result', dict, _('List of deletions that failed')),
value,
)
standard_multi_delete = (
summary,
Output('result', dict, _('List of deletions that failed')),
ListOfPrimaryKeys('value', flags=['no_display']),
)
standard_boolean = (
summary,
Output('result', bool, _('True means the operation was successful')),
value,
)
standard_value = standard_boolean
simple_value = (
summary,
Output('result', bool, _('True means the operation was successful')),
Output('value', unicode, flags=['no_display']),
)
# custom shim for commands like `trustconfig-show`,
# `automember-default-group-*` which put stuff into output['value'] despite not
# having primary key themselves. Designing commands like this is not a very
# good practice, so please do not use this for new code.
simple_entry = (
summary,
Entry('result'),
Output('value', unicode, flags=['no_display']),
)
| 7,231
|
Python
|
.py
| 194
| 30.386598
| 79
| 0.604057
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,661
|
pkcs10.py
|
freeipa_freeipa/ipalib/pkcs10.py
|
from __future__ import print_function
import sys
print(
"ipalib.pkcs10 module is deprecated and will be removed in IPA 4.6. "
"To load CSRs, please, use python-cryptography instead.",
file=sys.stderr
)
| 215
|
Python
|
.py
| 7
| 27.857143
| 73
| 0.73913
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,662
|
kinit.py
|
freeipa_freeipa/ipalib/kinit.py
|
#
# Copyright (C) 2016 FreeIPA Contributors see COPYING for license
#
import logging
import os
import re
import time
import gssapi
from ipapython.kerberos import Principal
from ipaplatform.paths import paths
from ipapython.ipautil import run
from ipalib.constants import PATTERN_GROUPUSER_NAME
from ipalib import krb_utils
from ipalib.util import validate_hostname
logger = logging.getLogger(__name__)
PATTERN_REALM = '@?([a-zA-Z0-9.-]*)$'
PATTERN_PRINCIPAL = '(' + PATTERN_GROUPUSER_NAME[:-1] + ')' + PATTERN_REALM
PATTERN_SERVICE = '([a-zA-Z0-9.-]+)/([a-zA-Z0-9.-]+)' + PATTERN_REALM
user_pattern = re.compile(PATTERN_PRINCIPAL)
service_pattern = re.compile(PATTERN_SERVICE)
def validate_principal(principal):
# TODO: use Principal() to verify value?
if isinstance(principal, Principal):
principal = str(principal)
elif not isinstance(principal, str):
raise RuntimeError('Invalid principal: not a string')
if ('/' in principal) and (' ' in principal):
raise RuntimeError('Invalid principal: bad spacing')
else:
# For a user match in the regex
# username = match[1]
# realm = match[2]
match = user_pattern.match(principal)
if match is None:
match = service_pattern.match(principal)
if match is None:
raise RuntimeError('Invalid principal: cannot parse')
else:
# service = match[1]
hostname = match[2]
# realm = match[3]
try:
validate_hostname(hostname)
except ValueError as e:
raise RuntimeError(str(e))
return principal
def kinit_keytab(principal, keytab, ccache_name=None, config=None, attempts=1):
"""
Given a ccache_path, keytab file and a principal kinit as that user.
The optional parameter 'attempts' specifies how many times the credential
initialization should be attempted in case of non-responsive KDC.
"""
validate_principal(principal)
errors_to_retry = {
krb_utils.KRB5KDC_ERR_SVC_UNAVAILABLE, krb_utils.KRB5_KDC_UNREACH
}
logger.debug("Initializing principal %s using keytab %s",
principal, keytab)
store = {'client_keytab': keytab}
if ccache_name is not None:
logger.debug("using ccache %s", ccache_name)
store['ccache'] = ccache_name
for attempt in range(1, attempts + 1):
old_config = os.environ.get('KRB5_CONFIG')
if config is not None:
os.environ['KRB5_CONFIG'] = config
else:
os.environ.pop('KRB5_CONFIG', None)
try:
name = gssapi.Name(
str(principal), gssapi.NameType.kerberos_principal
)
cred = gssapi.Credentials(name=name, store=store, usage='initiate')
logger.debug("Attempt %d/%d: success", attempt, attempts)
return cred
except gssapi.exceptions.GSSError as e:
if e.min_code not in errors_to_retry: # pylint: disable=no-member
raise
logger.debug("Attempt %d/%d: failed: %s", attempt, attempts, e)
if attempt == attempts:
logger.debug("Maximum number of attempts (%d) reached",
attempts)
raise
logger.debug("Waiting 5 seconds before next retry")
time.sleep(5)
finally:
if old_config is not None:
os.environ['KRB5_CONFIG'] = old_config
else:
os.environ.pop('KRB5_CONFIG', None)
return None
def _run_env(config=None):
"""Common os.environ for kinit
Passes KRB5* and GSS* envs like KRB5_TRACE
"""
env = {"LC_ALL": "C"}
for key, value in os.environ.items():
if key.startswith(("KRB5", "GSS")):
env[key] = value
if config is not None:
env["KRB5_CONFIG"] = config
return env
def kinit_password(principal, password, ccache_name=None, config=None,
armor_ccache_name=None, canonicalize=False,
enterprise=False, lifetime=None):
"""
perform interactive kinit as principal using password. If using FAST for
web-based authentication, use armor_ccache_path to specify http service
ccache.
:param principal: principal name
:param password: user password
:param ccache_name: location of ccache (default: default location)
:param config: path to krb5.conf (default: default location)
:param armor_ccache_name: armor ccache for FAST (-T)
:param canonicalize: request principal canonicalization (-C)
:param enterprise: use enterprise principal (-E)
:param lifetime: request TGT lifetime (-l)
"""
validate_principal(principal)
logger.debug("Initializing principal %s using password", principal)
args = [paths.KINIT]
if ccache_name is not None:
args.extend(['-c', ccache_name])
if armor_ccache_name is not None:
logger.debug("Using armor ccache %s for FAST webauth",
armor_ccache_name)
args.extend(['-T', armor_ccache_name])
if lifetime:
args.extend(['-l', lifetime])
if canonicalize:
logger.debug("Requesting principal canonicalization")
args.append('-C')
if enterprise:
logger.debug("Using enterprise principal")
args.append('-E')
args.extend(['--', str(principal)])
env = _run_env(config)
# this workaround enables us to capture stderr and put it
# into the raised exception in case of unsuccessful authentication
result = run(args, stdin=password, env=env, raiseonerr=False,
capture_error=True)
if result.returncode:
raise RuntimeError(result.error_output)
return result
def kinit_armor(ccache_name, pkinit_anchors=None):
"""
perform anonymous pkinit to obtain anonymous ticket to be used as armor
for FAST.
:param ccache_name: location of the armor ccache (required)
:param pkinit_anchor: if not None, the location of PKINIT anchor file to
use. Otherwise the value from Kerberos client library configuration is
used
:raises: CalledProcessError if the anonymous PKINIT fails
"""
logger.debug("Initializing anonymous ccache")
env = _run_env()
args = [paths.KINIT, '-n', '-c', ccache_name]
if pkinit_anchors is not None:
for pkinit_anchor in pkinit_anchors:
args.extend(['-X', 'X509_anchors=FILE:{}'.format(pkinit_anchor)])
# this workaround enables us to capture stderr and put it
# into the raised exception in case of unsuccessful authentication
return run(args, env=env, raiseonerr=True, capture_error=True)
def kinit_pkinit(
principal,
user_identity,
ccache_name=None,
config=None,
pkinit_anchors=None,
):
"""Perform kinit with X.509 identity (PKINIT)
:param principal: principal name
:param user_identity: X509_user_identity paramemter
:param ccache_name: location of ccache (default: default location)
:param config: path to krb5.conf (default: default location)
:param pkinit_anchor: if not None, the PKINIT anchors to use. Otherwise
the value from Kerberos client library configuration is used. Entries
must be prefixed with FILE: or DIR:
user identity example:
FILE:filename[,keyfilename]
PKCS12:filename
PKCS11:...
DIR:directoryname
:raises: CalledProcessError if PKINIT fails
"""
validate_principal(principal)
logger.debug(
"Initializing principal %s using PKINIT %s", principal, user_identity
)
args = [paths.KINIT]
if ccache_name is not None:
args.extend(['-c', ccache_name])
if pkinit_anchors is not None:
for pkinit_anchor in pkinit_anchors:
assert pkinit_anchor.startswith(("FILE:", "DIR:", "ENV:"))
args.extend(["-X", f"X509_anchors={pkinit_anchor}"])
args.extend(["-X", f"X509_user_identity={user_identity}"])
args.extend(['--', str(principal)])
# this workaround enables us to capture stderr and put it
# into the raised exception in case of unsuccessful authentication
# Unsuccessful pkinit can lead to a password prompt. Send \n to skip
# prompt.
env = _run_env(config)
return run(args, env=env, stdin="\n", raiseonerr=True, capture_error=True)
| 8,443
|
Python
|
.py
| 204
| 33.691176
| 79
| 0.653785
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,663
|
request.py
|
freeipa_freeipa/ipalib/request.py
|
# Authors:
# Rob Crittenden <rcritten@redhat.com>
# Jason Gerard DeRose <jderose@redhat.com>
#
# Copyright (C) 2008 Red Hat
# see file 'COPYING' for use and warranty contextrmation
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Per-request thread-local data.
"""
import contextlib
import threading
from ipalib.base import ReadOnly, lock
from ipalib.constants import CALLABLE_ERROR
# Thread-local storage of most per-request information
context = threading.local()
class _FrameContext:
pass
@contextlib.contextmanager
def context_frame():
try:
frame_back = context.current_frame
except AttributeError:
pass
context.current_frame = _FrameContext()
try:
yield
finally:
try:
context.current_frame = frame_back
except UnboundLocalError:
del context.current_frame
class Connection(ReadOnly):
"""
Base class for connection objects stored on `request.context`.
"""
def __init__(self, conn, disconnect):
self.conn = conn
if not callable(disconnect):
raise TypeError(
CALLABLE_ERROR % ('disconnect', disconnect, type(disconnect))
)
self.disconnect = disconnect
lock(self)
def destroy_context():
"""
Delete all attributes on thread-local `request.context`.
"""
# need to use a list of values, 'cos value.disconnect modifies the dict
for value in list(context.__dict__.values()):
if isinstance(value, Connection):
value.disconnect()
context.__dict__.clear()
| 2,177
|
Python
|
.py
| 65
| 28.907692
| 76
| 0.70734
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,664
|
text.py
|
freeipa_freeipa/ipalib/text.py
|
# Authors:
# Jason Gerard DeRose <jderose@redhat.com>
#
# Copyright (C) 2009 Red Hat
# see file 'COPYING' for use and warranty contextrmation
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Defers gettext translation till request time.
IPA presents some tricky gettext challenges. On the one hand, most translatable
message are defined as class attributes on the plugins, which means these get
evaluated at module-load time. But on the other hand, each request to the
server can be in a different locale, so the actual translation must not occur
till request time.
The `text` module provides a mechanism for for deferred gettext translation. It
was designed to:
1. Allow translatable strings to be marked with the usual ``_()`` and
``ngettext()`` functions so that standard tools like xgettext can still
be used
2. Allow programmers to mark strings in a natural way without burdening them
with details of the deferred translation mechanism
A typical plugin will use the deferred translation like this:
>>> from ipalib import Command, _, ngettext
>>> class my_plugin(Command):
... my_string = _('Hello, %(name)s.')
... my_plural = ngettext('%(count)d goose', '%(count)d geese', 0)
...
With normal gettext usage, the *my_string* and *my_plural* message would be
translated at module-load-time when your ``my_plugin`` class is defined. This
would mean that all message are translated in the locale of the server rather
than the locale of the request.
However, the ``_()`` function above is actually a `GettextFactory` instance,
which when called returns a `Gettext` instance. A `Gettext` instance stores the
message to be translated, and the gettext domain and localedir, but it doesn't
perform the translation till `Gettext.__unicode__()` is called. For example:
>>> my_plugin.my_string
Gettext('Hello, %(name)s.', domain='ipa', localedir=None)
>>> unicode(my_plugin.my_string)
u'Hello, %(name)s.'
Translation can also be performed via the `Gettext.__mod__()` convenience
method. For example, these two are equivalent:
>>> my_plugin.my_string % dict(name='Joe')
u'Hello, Joe.'
>>> unicode(my_plugin.my_string) % dict(name='Joe') # Long form
u'Hello, Joe.'
Translation can also be performed via the `Gettext.format()` convenience
method. For example, these two are equivalent:
>>> my_plugin.my_string = _('Hello, {name}.')
>>> my_plugin.my_string.format(name='Joe')
u'Hello, Joe.'
>>> my_plugin.my_string = _('Hello, {0}.')
>>> my_plugin.my_string.format('Joe')
u'Hello, Joe.'
Similar to ``_()``, the ``ngettext()`` function above is actually an
`NGettextFactory` instance, which when called returns an `NGettext` instance.
An `NGettext` instance stores the singular and plural messages, and the gettext
domain and localedir, but it doesn't perform the translation till
`NGettext.__call__()` is called. For example:
>>> my_plugin.my_plural
NGettext('%(count)d goose', '%(count)d geese', domain='ipa', localedir=None)
>>> my_plugin.my_plural(1)
u'%(count)d goose'
>>> my_plugin.my_plural(2)
u'%(count)d geese'
Translation can also be performed via the `NGettext.__mod__()` convenience
method. For example, these two are equivalent:
>>> my_plugin.my_plural % dict(count=1)
u'1 goose'
>>> my_plugin.my_plural(1) % dict(count=1) # Long form
u'1 goose'
Translation can also be performed via the `NGettext.format()` convenience
method. For example:
>>> my_plugin.my_plural = ngettext('{count} goose', '{count} geese', 0)
>>> my_plugin.my_plural.format(count=1)
u'1 goose'
>>> my_plugin.my_plural.format(count=2)
u'2 geese'
Lastly, 3rd-party plugins can create factories bound to a different gettext
domain. The default domain is ``'ipa'``, which is also the domain of the
standard ``ipalib._()`` and ``ipalib.ngettext()`` factories. But 3rd-party
plugins can create their own factories like this:
>>> from ipalib import GettextFactory, NGettextFactory
>>> _ = GettextFactory(domain='ipa_foo')
>>> ngettext = NGettextFactory(domain='ipa_foo')
>>> class foo(Command):
... msg1 = _('Foo!')
... msg2 = ngettext('%(count)d bar', '%(count)d bars', 0)
...
Notice that these messages are bound to the ``'ipa_foo'`` domain:
>>> foo.msg1
Gettext('Foo!', domain='ipa_foo', localedir=None)
>>> foo.msg2
NGettext('%(count)d bar', '%(count)d bars', domain='ipa_foo', localedir=None)
For additional details, see `GettextFactory` and `Gettext`, and for plural
forms, see `NGettextFactory` and `NGettext`.
"""
import gettext
import six
from ipalib.request import context
if six.PY3:
unicode = str
def create_translation(key):
assert key not in context.__dict__
(domain, localedir) = key
translation = gettext.translation(domain,
localedir=localedir,
languages=getattr(context, 'languages', None),
fallback=True,
)
context.__dict__[key] = translation
return translation
class LazyText:
"""
Base class for deferred translation.
This class is not used directly. See the `Gettext` and `NGettext`
subclasses.
Concatenating LazyText objects with the + operator gives
ConcatenatedLazyText objects.
"""
__slots__ = ('domain', 'localedir', 'key', 'args')
__hash__ = None
def __init__(self, domain=None, localedir=None):
"""
Initialize.
:param domain: The gettext domain in which this message will be
translated, e.g. ``'ipa'`` or ``'ipa_3rd_party'``; default is
``None``
:param localedir: The directory containing the gettext translations,
e.g. ``'/usr/share/locale/'``; default is ``None``, in which case
gettext will use the default system locale directory.
"""
self.domain = domain
self.localedir = localedir
self.key = (domain, localedir)
self.args = None
def __eq__(self, other):
"""
Return ``True`` if this instances is equal to *other*.
Note that this method cannot be used on the `LazyText` base class itself
as subclasses must define an *args* instance attribute.
"""
if type(other) is not self.__class__:
return False
return self.args == other.args
def __ne__(self, other):
"""
Return ``True`` if this instances is not equal to *other*.
Note that this method cannot be used on the `LazyText` base class itself
as subclasses must define an *args* instance attribute.
"""
return not self.__eq__(other)
def __add__(self, other):
return ConcatenatedLazyText(self) + other
def __radd__(self, other):
return other + ConcatenatedLazyText(self)
@six.python_2_unicode_compatible
class Gettext(LazyText):
"""
Deferred translation using ``gettext.ugettext()``.
Normally the `Gettext` class isn't used directly and instead is created via
a `GettextFactory` instance. However, for illustration, we can create one
like this:
>>> msg = Gettext('Hello, %(name)s.')
When you create a `Gettext` instance, the message is stored on the *msg*
attribute:
>>> msg.msg
'Hello, %(name)s.'
No translation is performed till `Gettext.__unicode__()` is called. This
will translate *msg* using ``gettext.ugettext()``, which will return the
translated string as a Python ``unicode`` instance. For example:
>>> unicode(msg)
u'Hello, %(name)s.'
`Gettext.__unicode__()` should be called at request time, which in a
nutshell means it should be called from within your plugin's
``Command.execute()`` method. `Gettext.__unicode__()` will perform the
translation based on the locale of the current request.
`Gettext.__mod__()` is a convenience method for Python "percent" string
formatting. It will translate your message using `Gettext.__unicode__()`
and then perform the string substitution on the translated message. For
example, these two are equivalent:
>>> msg % dict(name='Joe')
u'Hello, Joe.'
>>> unicode(msg) % dict(name='Joe') # Long form
u'Hello, Joe.'
`Gettext.format()` is a convenience method for Python string formatting.
It will translate your message using `Gettext.__unicode__()` and then
perform the string substitution on the translated message. For example,
these two are equivalent:
>>> msg = Gettext('Hello, {name}.')
>>> msg.format(name='Joe')
u'Hello, Joe.'
>>> msg = Gettext('Hello, {0}.')
>>> msg.format('Joe')
u'Hello, Joe.'
See `GettextFactory` for additional details. If you need to pick between
singular and plural form, use `NGettext` instances via the
`NGettextFactory`.
"""
__slots__ = ('msg')
def __init__(self, msg, domain=None, localedir=None):
super(Gettext, self).__init__(domain, localedir)
self.msg = msg
self.args = (msg, domain, localedir)
def __repr__(self):
return '%s(%r, domain=%r, localedir=%r)' % (self.__class__.__name__,
self.msg, self.domain, self.localedir)
def as_unicode(self):
"""
Translate this message and return as a ``unicode`` instance.
"""
if self.key in context.__dict__:
t = context.__dict__[self.key]
else:
t = create_translation(self.key)
if six.PY2:
return t.ugettext(self.msg)
else:
return t.gettext(self.msg)
def __str__(self):
return unicode(self.as_unicode())
def __json__(self):
return unicode(self)
def __mod__(self, kw):
return unicode(self) % kw
def format(self, *args, **kwargs):
return unicode(self).format(*args, **kwargs)
def expandtabs(self, tabsize=8):
"""Compatibility for sphinx prepare_docstring()"""
return str(self).expandtabs(tabsize)
@six.python_2_unicode_compatible
class FixMe(Gettext):
"""
Non-translated place-holder for UI labels.
`FixMe` is a subclass of `Gettext` and is used for automatically created
place-holder labels. It generally behaves exactly like `Gettext` except no
translation is ever performed.
`FixMe` allows programmers to get plugins working without first filling in
all the labels that will ultimately be required, while at the same time it
creates conspicuous looking UI labels that remind the programmer to
"fix-me!". For example, the typical usage would be something like this:
>>> class Plugin:
... label = None
... def __init__(self):
... self.name = self.__class__.__name__
... if self.label is None:
... self.label = FixMe(self.name + '.label')
... assert isinstance(self.label, Gettext)
...
>>> class user(Plugin):
... pass # Oops, we didn't set user.label yet
...
>>> u = user()
>>> u.label
FixMe('user.label')
Note that as `FixMe` is a subclass of `Gettext`, is passes the above type
check using ``isinstance()``.
Calling `FixMe.__unicode__()` performs no translation, but instead returns
said conspicuous looking label:
>>> unicode(u.label)
u'<user.label>'
For more examples of how `FixMe` is used, see `ipalib.parameters`.
"""
__slots__ = tuple()
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.msg)
def __str__(self):
return u'<%s>' % self.msg
class NGettext(LazyText):
"""
Deferred translation for plural forms using ``gettext.ungettext()``.
Normally the `NGettext` class isn't used directly and instead is created via
a `NGettextFactory` instance. However, for illustration, we can create one
like this:
>>> msg = NGettext('%(count)d goose', '%(count)d geese')
When you create an `NGettext` instance, the singular and plural forms of
your message are stored on the *singular* and *plural* instance attributes:
>>> msg.singular
'%(count)d goose'
>>> msg.plural
'%(count)d geese'
The translation and number selection isn't performed till
`NGettext.__call__()` is called. This will translate and pick the correct
number using ``gettext.ungettext()``. As a callable, an `NGettext` instance
takes a single argument, an integer specifying the count. For example:
>>> msg(0)
u'%(count)d geese'
>>> msg(1)
u'%(count)d goose'
>>> msg(2)
u'%(count)d geese'
`NGettext.__mod__()` is a convenience method for Python "percent" string
formatting. It can only be used if your substitution ``dict`` contains the
count in a ``'count'`` item. For example:
>>> msg % dict(count=0)
u'0 geese'
>>> msg % dict(count=1)
u'1 goose'
>>> msg % dict(count=2)
u'2 geese'
Alternatively, these longer forms have the same effect as the three examples
above:
>>> msg(0) % dict(count=0)
u'0 geese'
>>> msg(1) % dict(count=1)
u'1 goose'
>>> msg(2) % dict(count=2)
u'2 geese'
A ``KeyError`` is raised if your substitution ``dict`` doesn't have a
``'count'`` item. For example:
>>> msg2 = NGettext('%(num)d goose', '%(num)d geese')
>>> msg2 % dict(num=0)
Traceback (most recent call last):
...
KeyError: 'count'
However, in this case you can still use the longer, explicit form for string
substitution:
>>> msg2(0) % dict(num=0)
u'0 geese'
`NGettext.format()` is a convenience method for Python string formatting.
It can only be used if your substitution ``dict`` contains the count in a
``'count'`` item. For example:
>>> msg = NGettext('{count} goose', '{count} geese')
>>> msg.format(count=0)
u'0 geese'
>>> msg.format(count=1)
u'1 goose'
>>> msg.format(count=2)
u'2 geese'
A ``KeyError`` is raised if your substitution ``dict`` doesn't have a
``'count'`` item. For example:
>>> msg2 = NGettext('{num} goose', '{num} geese')
>>> msg2.format(num=0)
Traceback (most recent call last):
...
KeyError: 'count'
However, in this case you can still use the longer, explicit form for
string substitution:
>>> msg2(0).format(num=0)
u'0 geese'
See `NGettextFactory` for additional details.
"""
__slots__ = ('singular', 'plural')
def __init__(self, singular, plural, domain=None, localedir=None):
super(NGettext, self).__init__(domain, localedir)
self.singular = singular
self.plural = plural
self.args = (singular, plural, domain, localedir)
def __repr__(self):
return '%s(%r, %r, domain=%r, localedir=%r)' % (self.__class__.__name__,
self.singular, self.plural, self.domain, self.localedir)
def __mod__(self, kw):
count = kw['count']
return self(count) % kw
def format(self, *args, **kwargs):
count = kwargs['count']
return self(count).format(*args, **kwargs)
def __call__(self, count):
if self.key in context.__dict__:
t = context.__dict__[self.key]
else:
t = create_translation(self.key)
if six.PY2:
return t.ungettext(self.singular, self.plural, count)
else:
return t.ngettext(self.singular, self.plural, count)
@six.python_2_unicode_compatible
class ConcatenatedLazyText:
"""Concatenation of multiple strings, or any objects convertible to unicode
Used to concatenate several LazyTexts together.
This allows large strings like help text to be split, so translators
do not have to re-translate the whole text when only a small part changes.
Additional strings may be added to the end with the + or += operators.
"""
def __init__(self, *components):
self.components = list(components)
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.components)
def __str__(self):
return u''.join(unicode(c) for c in self.components)
def __json__(self):
return unicode(self)
def __mod__(self, kw):
return unicode(self) % kw
def format(self, *args, **kwargs):
return unicode(self).format(*args, **kwargs)
def __add__(self, other):
if isinstance(other, ConcatenatedLazyText):
return ConcatenatedLazyText(*self.components + other.components)
else:
return ConcatenatedLazyText(*self.components + [other])
def __radd__(self, other):
if isinstance(other, ConcatenatedLazyText):
return ConcatenatedLazyText(*other.components + self.components)
else:
return ConcatenatedLazyText(*[other] + self.components)
def expandtabs(self, tabsize=8):
"""Compatibility for sphinx prepare_docstring()"""
return str(self).expandtabs(tabsize)
class GettextFactory:
"""
Factory for creating ``_()`` functions.
A `GettextFactory` allows you to mark translatable messages that are
evaluated at initialization time, but deferred their actual translation till
request time.
When you create a `GettextFactory` you can provide a specific gettext
*domain* and *localedir*. By default the *domain* will be ``'ipa'`` and
the *localedir* will be ``None``. Both are available via instance
attributes of the same name. For example:
>>> _ = GettextFactory()
>>> _.domain
'ipa'
>>> _.localedir is None
True
When the *localedir* is ``None``, gettext will use the default system
localedir (typically ``'/usr/share/locale/'``). In general, you should
**not** provide a *localedir*... it is intended only to support in-tree
testing.
Third party plugins will most likely want to use a different gettext
*domain*. For example:
>>> _ = GettextFactory(domain='ipa_3rd_party')
>>> _.domain
'ipa_3rd_party'
When you call your `GettextFactory` instance, it will return a `Gettext`
instance associated with the same *domain* and *localedir*. For example:
>>> my_msg = _('Hello world')
>>> my_msg.domain
'ipa_3rd_party'
>>> my_msg.localedir is None
True
The message isn't translated till `Gettext.__unicode__()` is called, which
should be done during each request. See the `Gettext` class for additional
details.
"""
def __init__(self, domain='ipa', localedir=None):
"""
Initialize.
:param domain: The gettext domain in which this message will be
translated, e.g. ``'ipa'`` or ``'ipa_3rd_party'``; default is
``'ipa'``
:param localedir: The directory containing the gettext translations,
e.g. ``'/usr/share/locale/'``; default is ``None``, in which case
gettext will use the default system locale directory.
"""
self.domain = domain
self.localedir = localedir
def __repr__(self):
return '%s(domain=%r, localedir=%r)' % (self.__class__.__name__,
self.domain, self.localedir)
def __call__(self, msg):
return Gettext(msg, self.domain, self.localedir)
class NGettextFactory(GettextFactory):
"""
Factory for creating ``ngettext()`` functions.
`NGettextFactory` is similar to `GettextFactory`, except `NGettextFactory`
is for plural forms.
So that standard tools like xgettext can find your plural forms, you should
reference your `NGettextFactory` instance using a variable named
*ngettext*. For example:
>>> ngettext = NGettextFactory()
>>> ngettext
NGettextFactory(domain='ipa', localedir=None)
When you call your `NGettextFactory` instance to create a deferred
translation, you provide the *singular* message, the *plural* message, and
a dummy *count*. An `NGettext` instance will be returned. For example:
>>> my_msg = ngettext('%(count)d goose', '%(count)d geese', 0)
>>> my_msg
NGettext('%(count)d goose', '%(count)d geese', domain='ipa', localedir=None)
The *count* is ignored (because the translation is deferred), but you should
still provide it so parsing tools aren't confused. For consistency, it is
recommended to always provide ``0`` for the *count*.
See `NGettext` for details on how the deferred translation is later
performed. See `GettextFactory` for details on setting a different gettext
*domain* (likely needed for 3rd-party plugins).
"""
def __call__(self, singular, plural, count):
return NGettext(singular, plural, self.domain, self.localedir)
# Process wide factories:
_ = GettextFactory()
ngettext = NGettextFactory()
ugettext = _
| 21,213
|
Python
|
.py
| 491
| 37.808554
| 80
| 0.665435
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,665
|
__init__.py
|
freeipa_freeipa/ipalib/__init__.py
|
# Authors:
# Jason Gerard DeRose <jderose@redhat.com>
#
# Copyright (C) 2008 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
Package containing the core library.
=============================
Tutorial for Plugin Authors
=============================
This tutorial will introduce you to writing plugins for freeIPA v2. It does
not cover every detail, but it provides enough to get you started and is
heavily cross-referenced with further documentation that (hopefully) fills
in the missing details.
In addition to this tutorial, the many built-in plugins in `ipalib.plugins`
and `ipaserver.plugins` provide real-life examples of how to write good
plugins.
----------------------------
How this tutorial is written
----------------------------
The code examples in this tutorial are presented as if entered into a Python
interactive interpreter session. As such, when you create a real plugin in
a source file, a few details will be different (in addition to the fact that
you will never include the ``>>>`` nor ``...`` that the interpreter places at
the beginning of each line of code).
The tutorial examples all have this pattern:
::
>>> from ipalib import Command, create_api
>>> api = create_api()
>>> class my_command(Command):
... pass
...
>>> api.add_plugin(my_command)
>>> api.finalize()
In the tutorial we call `create_api()` to create an *example* instance
of `plugable.API` to work with. But a real plugin will simply use
``ipalib.api``, the standard run-time instance of `plugable.API`.
A real plugin will have this pattern:
::
from ipalib import Command, Registry, api
register = Registry()
@register()
class my_command(Command):
pass
As seen above, also note that in a real plugin you will *not* call
`plugable.API.finalize()`. When in doubt, look at some of the built-in
plugins for guidance, like those in `ipalib.plugins`.
If you don't know what the Python *interactive interpreter* is, or are
confused about what this *Python* is in the first place, then you probably
should start with the Python tutorial:
http://docs.python.org/tutorial/index.html
------------------------------------
First steps: A simple command plugin
------------------------------------
Our first example will create the most basic command plugin possible. This
command will be seen in the list of command plugins, but it wont be capable
of actually doing anything yet.
A command plugin simultaneously adds a new command that can be called through
the command-line ``ipa`` script *and* adds a new XML-RPC method... the two are
one in the same, simply invoked in different ways.
A freeIPA plugin is a Python class, and when you create a plugin, you register
this class itself (instead of an instance of the class). To be a command
plugin, your plugin must subclass from `frontend.Command` (or from a subclass
thereof). Here is our first example:
>>> from ipalib import Command, create_api
>>> api = create_api()
>>> class my_command(Command): # Step 1, define class
... """My example plugin."""
...
>>> api.add_plugin(my_command) # Step 2, register class
Notice that we are registering the ``my_command`` class itself, not an
instance of ``my_command``.
Until `plugable.API.finalize()` is called, your plugin class has not been
instantiated nor does the ``Command`` namespace yet exist. For example:
>>> hasattr(api, 'Command')
False
>>> api.finalize() # plugable.API.finalize()
>>> hasattr(api.Command, 'my_command')
True
>>> api.Command.my_command.doc
Gettext('My example plugin.', domain='ipa', localedir=None)
Notice that your plugin instance is accessed through an attribute named
``my_command``, the same name as your plugin class name.
------------------------------
Make your command do something
------------------------------
This simplest way to make your example command plugin do something is to
implement a ``run()`` method, like this:
>>> class my_command(Command):
... """My example plugin with run()."""
...
... def run(self, **options):
... return dict(result='My run() method was called!')
...
>>> api = create_api()
>>> api.add_plugin(my_command)
>>> api.finalize()
>>> api.Command.my_command(version=u'2.47') # Call your command
{'result': 'My run() method was called!'}
When `frontend.Command.__call__()` is called, it first validates any arguments
and options your command plugin takes (if any) and then calls its ``run()``
method.
------------------------
Forwarding vs. execution
------------------------
However, unlike the example above, a typical command plugin will implement an
``execute()`` method instead of a ``run()`` method. Your command plugin can
be loaded in two distinct contexts:
1. In a *client* context - Your command plugin is only used to validate
any arguments and options it takes, and then ``self.forward()`` is
called, which forwards the call over XML-RPC to an IPA server where
the actual work is done.
2. In a *server* context - Your same command plugin validates any
arguments and options it takes, and then ``self.execute()`` is called,
which you should implement to perform whatever work your plugin does.
The base `frontend.Command.run()` method simply dispatches the call to
``self.execute()`` if ``self.env.in_server`` is True, or otherwise
dispatches the call to ``self.forward()``.
For example, say you have a command plugin like this:
>>> class my_command(Command):
... """Forwarding vs. execution."""
...
... def forward(self, **options):
... return dict(
... result='forward(): in_server=%r' % self.env.in_server
... )
...
... def execute(self, **options):
... return dict(
... result='execute(): in_server=%r' % self.env.in_server
... )
...
The ``options`` will contain a dict of command options. One option is added
automatically: ``version``. It contains the API version of the client.
In order to maintain forward compatibility, you should always specify the
API version current at the time you're writing your client.
If ``my_command`` is loaded in a *client* context, ``forward()`` will be
called:
>>> api = create_api()
>>> api.env.in_server = False # run() will dispatch to forward()
>>> api.add_plugin(my_command)
>>> api.finalize()
>>> api.Command.my_command(version=u'2.47') # Call your command plugin
{'result': 'forward(): in_server=False'}
On the other hand, if ``my_command`` is loaded in a *server* context,
``execute()`` will be called:
>>> api = create_api()
>>> api.env.in_server = True # run() will dispatch to execute()
>>> api.add_plugin(my_command)
>>> api.finalize()
>>> api.Command.my_command(version=u'2.47') # Call your command plugin
{'result': 'execute(): in_server=True'}
Normally there should be no reason to override `frontend.Command.forward()`,
but, as above, it can be done for demonstration purposes. In contrast, there
*is* a reason you might want to override `frontend.Command.run()`: if it only
makes sense to execute your command locally, if it should never be forwarded
to the server. In this case, you should implement your *do-stuff* in the
``run()`` method instead of in the ``execute()`` method.
For example, the ``ipa`` command line script has a ``help`` command
(`ipalib.cli.help`) that is specific to the command-line-interface and should
never be forwarded to the server.
---------------
Backend plugins
---------------
There are two types of plugins:
1. *Frontend plugins* - These are loaded in both the *client* and *server*
contexts. These need to be installed with any application built atop
the `ipalib` library. The built-in frontend plugins can be found in
`ipalib.plugins`. The ``my_command`` example above is a frontend
plugin.
2. *Backend plugins* - These are only loaded in a *server* context and
only need to be installed on the IPA server. The built-in backend
plugins can be found in `ipaserver.plugins`.
Backend plugins should provide a set of methods that standardize how IPA
interacts with some external system or library. For example, all interaction
with LDAP is done through the ``ldap`` backend plugin defined in
`ipaserver.plugins.b_ldap`. As a good rule of thumb, anytime you need to
import some package that is not part of the Python standard library, you
should probably interact with that package via a corresponding backend
plugin you implement.
Backend plugins are much more free-form than command plugins. Aside from a
few reserved attribute names, you can define arbitrary public methods on your
backend plugin.
Here is a simple example:
>>> from ipalib import Backend
>>> class my_backend(Backend):
... """My example backend plugin."""
...
... def do_stuff(self):
... """Part of your API."""
... return 'Stuff got done.'
...
>>> api = create_api()
>>> api.add_plugin(my_backend)
>>> api.finalize()
>>> api.Backend.my_backend.do_stuff()
'Stuff got done.'
-------------------------------
How your command should do work
-------------------------------
We now return to our ``my_command`` plugin example.
Plugins are separated into frontend and backend plugins so that there are not
unnecessary dependencies required by an application that only uses `ipalib` and
its built-in frontend plugins (and then forwards over XML-RPC for execution).
But how do we avoid introducing additional dependencies? For example, the
``user_add`` command needs to talk to LDAP to add the user, yet we want to
somehow load the ``user_add`` plugin on client machines without requiring the
``python-ldap`` package (Python bindings to openldap) to be installed. To
answer that, we consult our golden rule:
**The golden rule:** A command plugin should implement its ``execute()``
method strictly via calls to methods on one or more backend plugins.
So the module containing the ``user_add`` command does not itself import the
Python LDAP bindings, only the module containing the ``ldap`` backend plugin
does that, and the backend plugins are only installed on the server. The
``user_add.execute()`` method, which is only called when in a server context,
is implemented as a series of calls to methods on the ``ldap`` backend plugin.
When `plugable.Plugin.__init__()` is called, each plugin stores a reference to
the `plugable.API` instance it has been loaded into. So your plugin can
access the ``my_backend`` plugin as ``self.api.Backend.my_backend``.
Additionally, convenience attributes are set for each namespace, so your
plugin can also access the ``my_backend`` plugin as simply
``self.Backend.my_backend``.
This next example will tie everything together. First we create our backend
plugin:
>>> api = create_api()
>>> api.env.in_server = True # We want to execute, not forward
>>> class my_backend(Backend):
... """My example backend plugin."""
...
... def do_stuff(self):
... """my_command.execute() calls this."""
... return 'my_backend.do_stuff() indeed did do stuff!'
...
>>> api.add_plugin(my_backend)
Second, we have our frontend plugin, the command:
>>> class my_command(Command):
... """My example command plugin."""
...
... def execute(self, **options):
... """Implemented against Backend.my_backend"""
... return dict(result=self.Backend.my_backend.do_stuff())
...
>>> api.add_plugin(my_command)
Lastly, we call ``api.finalize()`` and see what happens when we call
``my_command()``:
>>> api.finalize()
>>> api.Command.my_command(version=u'2.47')
{'result': 'my_backend.do_stuff() indeed did do stuff!'}
When not in a server context, ``my_command.execute()`` never gets called, so
it never tries to access the non-existent backend plugin at
``self.Backend.my_backend.`` To emphasize this point, here is one last
example:
>>> api = create_api()
>>> api.env.in_server = False # We want to forward, not execute
>>> class my_command(Command):
... """My example command plugin."""
...
... def execute(self, **options):
... """Same as above."""
... return dict(result=self.Backend.my_backend.do_stuff())
...
... def forward(self, **options):
... return dict(result='Just my_command.forward() getting called here.')
...
>>> api.add_plugin(my_command)
>>> api.finalize()
Notice that the ``my_backend`` plugin has certainly not be registered:
>>> hasattr(api.Backend, 'my_backend')
False
And yet we can call ``my_command()``:
>>> api.Command.my_command(version=u'2.47')
{'result': 'Just my_command.forward() getting called here.'}
----------------------------------------
Calling other commands from your command
----------------------------------------
It can be useful to have your ``execute()`` method call other command plugins.
Among other things, this allows for meta-commands that conveniently call
several other commands in a single operation. For example:
>>> api = create_api()
>>> api.env.in_server = True # We want to execute, not forward
>>> class meta_command(Command):
... """My meta-command plugin."""
...
... def execute(self, **options):
... """Calls command_1(), command_2()"""
... msg = '%s; %s.' % (
... self.Command.command_1()['result'],
... self.Command.command_2()['result'],
... )
... return dict(result=msg)
>>> class command_1(Command):
... def execute(self, **options):
... return dict(result='command_1.execute() called')
...
>>> class command_2(Command):
... def execute(self, **options):
... return dict(result='command_2.execute() called')
...
>>> api.add_plugin(meta_command)
>>> api.add_plugin(command_1)
>>> api.add_plugin(command_2)
>>> api.finalize()
>>> api.Command.meta_command(version=u'2.47')
{'result': 'command_1.execute() called; command_2.execute() called.'}
Because this is quite useful, we are going to revise our golden rule somewhat:
**The revised golden rule:** A command plugin should implement its
``execute()`` method strictly via what it can access through ``self.api``,
most likely via the backend plugins in ``self.api.Backend`` (which can also
be conveniently accessed as ``self.Backend``).
-----------------------------------------------
Defining arguments and options for your command
-----------------------------------------------
You can define a command that will accept specific arguments and options.
For example:
>>> from ipalib import Str
>>> class nudge(Command):
... """Takes one argument, one option"""
...
... takes_args = ('programmer',)
...
... takes_options = (Str('stuff', default=u'documentation'))
...
... def execute(self, programmer, **kw):
... return dict(
... result='%s, go write more %s!' % (programmer, kw['stuff'])
... )
...
>>> api = create_api()
>>> api.env.in_server = True
>>> api.add_plugin(nudge)
>>> api.finalize()
>>> api.Command.nudge(u'Jason', version=u'2.47')
{'result': u'Jason, go write more documentation!'}
>>> api.Command.nudge(u'Jason', stuff=u'unit tests', version=u'2.47')
{'result': u'Jason, go write more unit tests!'}
The ``args`` and ``options`` attributes are `plugable.NameSpace` instances
containing a command's arguments and options, respectively, as you can see:
>>> list(api.Command.nudge.args) # Iterates through argument names
['programmer']
>>> api.Command.nudge.args.programmer
Str('programmer')
>>> list(api.Command.nudge.options) # Iterates through option names
['stuff', 'version']
>>> api.Command.nudge.options.stuff
Str('stuff', default=u'documentation')
>>> api.Command.nudge.options.stuff.default
u'documentation'
The 'version' option is added to commands automatically.
The arguments and options must not contain colliding names. They are both
merged together into the ``params`` attribute, another `plugable.NameSpace`
instance, as you can see:
>>> api.Command.nudge.params
NameSpace(<3 members>, sort=False)
>>> list(api.Command.nudge.params) # Iterates through the param names
['programmer', 'stuff', 'version']
When calling a command, its positional arguments can also be provided as
keyword arguments, and in any order. For example:
>>> api.Command.nudge(stuff=u'lines of code', programmer=u'Jason', version=u'2.47')
{'result': u'Jason, go write more lines of code!'}
When a command plugin is called, the values supplied for its parameters are
put through a sophisticated processing pipeline that includes steps for
normalization, type conversion, validation, and dynamically constructing
the defaults for missing values. The details wont be covered here; however,
here is a quick teaser:
>>> from ipalib import Int
>>> class create_player(Command):
... takes_options = (
... 'first',
... 'last',
... Str('nick',
... normalizer=lambda value: value.lower(),
... default_from=lambda first, last: first[0] + last,
... ),
... Int('points', default=0),
... )
...
>>> cp = create_player()
>>> cp.finalize()
>>> cp.convert(points=u' 1000 ')
{'points': 1000}
>>> cp.normalize(nick=u'NickName')
{'nick': u'nickname'}
>>> cp.get_default(first=u'Jason', last=u'DeRose')
{'nick': u'jderose', 'points': 0}
For the full details on the parameter system, see the
`frontend.parse_param_spec()` function, and the `frontend.Param` and
`frontend.Command` classes.
---------------------------------------
Allowed return values from your command
---------------------------------------
The return values from your command can be rendered by different user
interfaces (CLI, web-UI); furthermore, a call to your command can be
transparently forwarded over the network (XML-RPC, JSON). As such, the return
values from your command must be usable by the least common denominator.
Your command should return only simple data types and simple data structures,
the kinds that can be represented in an XML-RPC request or in the JSON format.
The return values from your command's ``execute()`` method can include only
the following:
Simple scalar values:
These can be ``str``, ``unicode``, ``int``, and ``float`` instances,
plus the ``True``, ``False``, and ``None`` constants.
Simple compound values:
These can be ``dict``, ``list``, and ``tuple`` instances. These
compound values must contain only the simple scalar values above or
other simple compound values. These compound values can also be empty.
For our purposes here, the ``list`` and ``tuple`` types are equivalent
and can be used interchangeably.
Also note that your ``execute()`` method should not contain any ``print``
statements or otherwise cause any output on ``sys.stdout``. Your command can
(and should) produce log messages by using a module-level logger (see below).
To learn more about XML-RPC (XML Remote Procedure Call), see:
http://docs.python.org/library/xmlrpclib.html
http://en.wikipedia.org/wiki/XML-RPC
To learn more about JSON (Java Script Object Notation), see:
http://docs.python.org/library/json.html
http://www.json.org/
---------------------------------------
How your command should print to stdout
---------------------------------------
As noted above, your command should not print anything while in its
``execute()`` method. So how does your command format its output when
called from the ``ipa`` script?
After the `cli.CLI.run_cmd()` method calls your command, it will call your
command's ``output_for_cli()`` method (if you have implemented one).
If you implement an ``output_for_cli()`` method, it must have the following
signature:
::
output_for_cli(textui, result, *args, **options)
textui
An object implementing methods for outputting to the console.
Currently the `ipalib.cli.textui` plugin is passed, which your method
can also access as ``self.Backend.textui``. However, in case this
changes in the future, your method should use the instance passed to
it in this first argument.
result
This is the return value from calling your command plugin. Depending
upon how your command is implemented, this is probably the return
value from your ``execute()`` method.
args
The arguments your command was called with. If your command takes no
arguments, you can omit this. You can also explicitly list your
arguments rather than using the generic ``*args`` form.
options
The options your command was called with. If your command takes no
options, you can omit this. If your command takes any options, you
must use the ``**options`` form as they will be provided strictly as
keyword arguments.
For example, say we setup a command like this:
>>> class show_items(Command):
...
... takes_args = ('key?',)
...
... takes_options = (Flag('reverse'),)
...
... def execute(self, key, **options):
... items = dict(
... fruit=u'apple',
... pet=u'dog',
... city=u'Berlin',
... )
... if key in items:
... return dict(result=items[key])
... items = [
... (k, items[k]) for k in sorted(items, reverse=options['reverse'])
... ]
... return dict(result=items)
...
... def output_for_cli(self, textui, result, key, **options):
... result = result['result']
... if key is not None:
... textui.print_plain('%s = %r' % (key, result))
... else:
... textui.print_name(self.name)
... textui.print_keyval(result)
... format = '%d items'
... if options['reverse']:
... format += ' (in reverse order)'
... textui.print_count(result, format)
...
>>> api = create_api()
>>> api.bootstrap(in_server=True) # We want to execute, not forward
>>> api.add_plugin(show_items)
>>> api.finalize()
Normally when you invoke the ``ipa`` script, `cli.CLI.load_plugins()` will
register the `cli.textui` backend plugin, but for the sake of our example,
we will just create an instance here:
>>> from ipalib import cli
>>> textui = cli.textui() # We'll pass this to output_for_cli()
Now for what we are concerned with in this example, calling your command
through the ``ipa`` script basically will do the following:
>>> result = api.Command.show_items()
>>> api.Command.show_items.output_for_cli(textui, result, None, reverse=False)
-----------
show-items:
-----------
city = u'Berlin'
fruit = u'apple'
pet = u'dog'
-------
3 items
-------
Similarly, calling it with ``reverse=True`` would result in the following:
>>> result = api.Command.show_items(reverse=True)
>>> api.Command.show_items.output_for_cli(textui, result, None, reverse=True)
-----------
show-items:
-----------
pet = u'dog'
fruit = u'apple'
city = u'Berlin'
--------------------------
3 items (in reverse order)
--------------------------
Lastly, providing a ``key`` would result in the following:
>>> result = api.Command.show_items(u'city')
>>> api.Command.show_items.output_for_cli(textui, result, 'city', reverse=False)
city = u'Berlin'
See the `ipalib.cli.textui` plugin for a description of its methods.
------------------------
Logging from your plugin
------------------------
Plugins should log through a module-level logger.
For example:
>>> import logging
>>> logger = logging.getLogger(__name__)
>>> class paint_house(Command):
...
... takes_args = 'color'
...
... def execute(self, color, **options):
... """Uses logger.error()"""
... if color not in ('red', 'blue', 'green'):
... logger.error("I don't have %s paint!", color) # Log error
... return
... return 'I painted the house %s.' % color
...
Some basic knowledge of the Python ``logging`` module might be helpful. See:
http://docs.python.org/library/logging.html
The important thing to remember is that your plugin should not configure
logging itself, but should instead simply use the module-level logger.
Also see the `plugable.API.bootstrap()` method for details on how the logging
is configured.
---------------------
Environment variables
---------------------
Plugins access configuration variables and run-time information through
``self.api.env`` (or for convenience, ``self.env`` is equivalent). This
attribute is a refences to the `ipalib.config.Env` instance created in
`plugable.API.__init__()`.
After `API.bootstrap()` has been called, the `Env` instance will be populated
with all the environment information used by the built-in plugins.
This will be called before any plugins are registered, so plugin authors can
assume these variables will all exist by the time the module containing their
plugin (or plugins) is imported.
`Env._bootstrap()`, which is called by `API.bootstrap()`, will create several
run-time variables that cannot be overridden in configuration files or through
command-line options. Here is an overview of this run-time information:
============= ============================= =======================
Key Example value Description
============= ============================= =======================
bin '/usr/bin' Dir. containing script
dot_ipa '/home/jderose/.ipa' User config directory
home os.path.expanduser('~') User home dir.
ipalib '.../site-packages/ipalib' Dir. of ipalib package
mode 'unit_test' The mode ipalib is in
script sys.argv[0] Path of script
site_packages '.../python2.5/site-packages' Dir. containing ipalib/
============= ============================= =======================
If your plugin requires new environment variables *and* will be included in
the freeIPA built-in plugins, you should add the defaults for your variables
in `ipalib.constants.DEFAULT_CONFIG`. Also, you should consider whether your
new environment variables should have any auto-magic logic to determine their
values if they haven't already been set by the time `config.Env._bootstrap()`,
`config.Env._finalize_core()`, or `config.Env._finalize()` is called.
On the other hand, if your plugin requires new environment variables and will
be installed in a 3rd-party package, your plugin should set these variables
in the module it is defined in.
`config.Env` values work on a first-one-wins basis... after a value has been
set, it can not be overridden with a new value. As any variables can be set
using the command-line ``-e`` global option or set in a configuration file,
your module must check whether a variable has already been set before
setting its default value. For example:
>>> if 'message_of_the_day' not in api.env:
... api.env.message_of_the_day = 'Hello, world!'
...
Your plugin can access any environment variables via ``self.env``.
For example:
>>> class motd(Command):
... """Print message of the day."""
...
... def execute(self, **options):
... return dict(result=self.env.message)
...
>>> api = create_api()
>>> api.bootstrap(in_server=True, message='Hello, world!')
>>> api.add_plugin(motd)
>>> api.finalize()
>>> api.Command.motd(version=u'2.47')
{'result': u'Hello, world!'}
Also see the `plugable.API.bootstrap_with_global_options()` method.
---------------------------------------------
Indispensable ipa script commands and options
---------------------------------------------
The ``console`` command will launch a custom interactive Python interpreter
session. The global environment will have an ``api`` variable, which is the
standard `plugable.API` instance found at ``ipalib.api``. All plugins will
have been loaded (well, except the backend plugins if ``in_server`` is False)
and ``api`` will be fully initialized. To launch the console from within the
top-level directory in the source tree, just run ``ipa console`` from a
terminal, like this:
::
$ ./ipa console
By default, ``in_server`` is False. If you want to start the console in a
server context (so that all the backend plugins are loaded), you can use the
``-e`` option to set the ``in_server`` environment variable, like this:
::
$ ./ipa -e in_server=True console
You can specify multiple environment variables by including the ``-e`` option
multiple times, like this:
::
$ ./ipa -e in_server=True -e mode=dummy console
The space after the ``-e`` is optional. This is equivalent to the above command:
::
$ ./ipa -ein_server=True -emode=dummy console
The ``env`` command will print out the full environment in key=value pairs,
like this:
::
$ ./ipa env
If you use the ``--server`` option, it will forward the call to the server
over XML-RPC and print out what the environment is on the server, like this:
::
$ ./ipa env --server
The ``plugins`` command will show details of all the plugin that are loaded,
like this:
::
$ ./ipa plugins
-----------------------------------
Learning more about freeIPA plugins
-----------------------------------
To learn more about writing freeIPA plugins, you should:
1. Look at some of the built-in plugins, like the frontend plugins in
`ipalib.plugins.f_user` and the backend plugins in
`ipaserver.plugins.b_ldap`.
2. Learn about the base classes for frontend plugins in `ipalib.frontend`.
3. Learn about the core plugin framework in `ipalib.plugable`.
Furthermore, the freeIPA plugin architecture was inspired by the Bazaar plugin
architecture. Although the two are different enough that learning how to
write plugins for Bazaar will not particularly help you write plugins for
freeIPA, some might be interested in the documentation on writing plugins for
Bazaar, available here:
http://bazaar-vcs.org/WritingPlugins
If nothing else, we just want to give credit where credit is deserved!
However, freeIPA does not use any *code* from Bazaar... it merely borrows a
little inspiration.
--------------------------
A note on docstring markup
--------------------------
Lastly, a quick note on markup: All the Python docstrings in freeIPA v2
(including this tutorial) use the *reStructuredText* markup language. For
information on reStructuredText, see:
http://docutils.sourceforge.net/rst.html
For information on using reStructuredText markup with epydoc, see:
http://epydoc.sourceforge.net/manual-othermarkup.html
--------------------------------------------------
Next steps: get involved with freeIPA development!
--------------------------------------------------
The freeIPA team is always interested in feedback and contribution from the
community. To get involved with freeIPA, see the *Contribute* page on
freeIPA.org:
http://freeipa.org/page/Contribute
'''
from ipapython.version import VERSION as __version__
def _enable_warnings(error=False):
"""Enable additional warnings during development
"""
import ctypes
import warnings
# get reference to Py_BytesWarningFlag from Python CAPI
byteswarnings = ctypes.c_int.in_dll(
ctypes.pythonapi, 'Py_BytesWarningFlag')
if byteswarnings.value >= 2:
# bytes warnings flag already set to error
return
# default warning mode for all modules: warn once per location
warnings.simplefilter('default', BytesWarning)
if error:
byteswarnings.value = 2
action = 'error'
else:
byteswarnings.value = 1
action = 'default'
module = '(ipa.*|__main__)'
warnings.filterwarnings(action, category=BytesWarning, module=module)
warnings.filterwarnings(action, category=DeprecationWarning,
module=module)
# call this as early as possible
if 'git' in __version__:
_enable_warnings(False)
# noqa: E402
from ipalib import plugable
from ipalib.backend import Backend
from ipalib.frontend import Command, LocalOrRemote, Updater
from ipalib.frontend import Object, Method
from ipalib.crud import Create, Retrieve, Update, Delete, Search
from ipalib.parameters import (
DefaultFrom, Bool, Flag, Int, Decimal, Bytes, Str, IA5Str,
Password, DNParam, SerialNumber
)
from ipalib.parameters import (BytesEnum, StrEnum, IntEnum, AccessTime, File,
DateTime, DNSNameParam)
from ipalib.errors import SkipPluginModule
from ipalib.text import _, ngettext, GettextFactory, NGettextFactory
Registry = plugable.Registry
class API(plugable.API):
bases = (Command, Object, Method, Backend, Updater)
def __enter__(self):
"""Context manager for IPA API
The context manager connects the backend connect on enter and
disconnects on exit. The process must have access to a valid Kerberos
ticket or have automatic authentication with a keytab or gssproxy
set up. The connection type depends on ``in_server`` and ``context``
options. Server connections use LDAP while clients use JSON-RPC over
HTTPS.
The context manager also finalizes the API object, in case it hasn't
been finalized yet. It is possible to use a custom API object. In
that case, the global API object must be finalized, first. Some
options like logging only apply to global ``ipalib.api`` object.
Usage with global api object::
import os
import ipalib
try:
from ipalib import kinit
except ImportError:
from ipalib.install import kinit
# set a custom ccache to isolate from the environment
ccache_name = "FILE:/path/to/tmp/service.ccache"
os.environ["KRB5CCNAME"] = ccache_name
# optional: automatic authentication with a KRB5 keytab
os.environ["KRB5_CLIENT_KTNAME"] = "/path/to/service.keytab"
# or with password
kinit.kinit_passwd("username", "password", ccache_name)
# optional: override settings (once per process)
overrides = {}
ipalib.api.bootstrap(**overrides)
with ipalib.api as api:
host = api.Command.host_show(api.env.host)
user = api.Command.user_show("admin")
"""
# Several IPA module require api.env at import time, some even
# a fully finalized ipalib.ap, e.g. register() with MethodOverride.
if self is not api and not api.isdone("finalize"):
raise RuntimeError("global ipalib.api must be finalized first.")
# initialize this api
if not self.isdone("finalize"):
self.finalize()
# connect backend, server and client use different backends.
if self.env.in_server:
conn = self.Backend.ldap2
else:
conn = self.Backend.rpcclient
if conn.isconnected():
raise RuntimeError("API is already connected")
else:
conn.connect()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Disconnect backend on exit"""
if self.env.in_server:
conn = self.Backend.ldap2
else:
conn = self.Backend.rpcclient
if conn.isconnected():
conn.disconnect()
@property
def packages(self):
if self.env.in_server:
# server packages are not published on pypi.org
# pylint: disable=useless-suppression
# pylint: disable=import-error,ipa-forbidden-import
import ipaserver.plugins
# pylint: enable=import-error,ipa-forbidden-import
# pylint: enable=useless-suppression
result = (
ipaserver.plugins,
)
else:
import ipaclient.remote_plugins # pylint: disable=cyclic-import
import ipaclient.plugins
result = (
ipaclient.remote_plugins.get_package(self),
ipaclient.plugins,
)
if self.env.context in ('installer', 'updates'):
# server packages are not published on pypi.org
# pylint: disable=useless-suppression
# pylint: disable=import-error,ipa-forbidden-import
import ipaserver.install.plugins
# pylint: enable=import-error,ipa-forbidden-import
# pylint: enable=useless-suppression
result += (ipaserver.install.plugins,)
return result
def create_api(mode='dummy'):
"""
Return standard `plugable.API` instance.
This standard instance allows plugins that subclass from the following
base classes:
- `frontend.Command`
- `frontend.Object`
- `frontend.Method`
- `backend.Backend`
"""
api = API()
if mode is not None:
api.env.mode = mode
assert mode != 'production'
return api
api = create_api(mode=None)
| 37,675
|
Python
|
.py
| 830
| 42.012048
| 83
| 0.672448
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,666
|
rpc.py
|
freeipa_freeipa/ipalib/rpc.py
|
# Authors:
# Jason Gerard DeRose <jderose@redhat.com>
# Rob Crittenden <rcritten@redhat.com>
#
# Copyright (C) 2008 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
RPC client and shared RPC client/server functionality.
This module adds some additional functionality on top of the ``xmlrpc.client``
module in the Python standard library (``xmlrpclib`` in Python 2).
For documentation on the ``xmlrpclib`` module, see:
http://docs.python.org/2/library/xmlrpclib.html
Also see the `ipaserver.rpcserver` module.
"""
from __future__ import absolute_import
from decimal import Decimal
import datetime
import logging
import os
import locale
import base64
import json
import re
import socket
import gzip
import urllib
from ssl import SSLError
from cryptography import x509 as crypto_x509
import gssapi
from dns.exception import DNSException
import six
from ipalib.backend import Connectible
from ipalib.constants import LDAP_GENERALIZED_TIME_FORMAT
from ipalib.errors import (errors_by_code, UnknownError, NetworkError,
XMLRPCMarshallError, JSONError)
from ipalib import errors, capabilities
from ipalib.request import context, Connection
from ipalib.x509 import Encoding as x509_Encoding
from ipapython import ipautil
from ipapython import session_storage
from ipapython.cookie import Cookie
from ipapython.dnsutil import DNSName, query_srv
from ipalib.text import _
from ipalib.util import create_https_connection
from ipalib.krb_utils import (
KRB5KDC_ERR_S_PRINCIPAL_UNKNOWN,
KRB5KRB_AP_ERR_TKT_EXPIRED,
KRB5_FCC_PERM,
KRB5_FCC_NOFILE,
KRB5_CC_FORMAT,
KRB5_REALM_CANT_RESOLVE,
KRB5_CC_NOTFOUND,
get_principal,
)
from ipapython.dn import DN
from ipapython.kerberos import Principal
from ipalib.capabilities import VERSION_WITHOUT_CAPABILITIES
from ipalib import api
from ipalib.ipajson import json_encode_binary, json_decode_binary
# The XMLRPC client is in "six.moves.xmlrpc_client", but pylint
# cannot handle that
try:
from xmlrpclib import (
Binary,
Fault,
DateTime,
dumps,
loads,
ServerProxy,
Transport,
ProtocolError,
MININT,
MAXINT,
)
except ImportError:
from xmlrpc.client import (
Binary,
Fault,
DateTime,
dumps,
loads,
ServerProxy,
Transport,
ProtocolError,
MININT,
MAXINT,
)
# pylint: disable=import-error
if six.PY3:
from http.client import RemoteDisconnected
else:
from httplib import BadStatusLine as RemoteDisconnected
# pylint: enable=import-error
if six.PY3:
unicode = str
logger = logging.getLogger(__name__)
COOKIE_NAME = 'ipa_session'
CCACHE_COOKIE_KEY = 'X-IPA-Session-Cookie'
def update_persistent_client_session_data(principal, data):
'''
Given a principal create or update the session data for that
principal in the persistent secure storage.
Raises ValueError if unable to perform the action for any reason.
'''
try:
session_storage.store_data(principal, CCACHE_COOKIE_KEY, data)
except Exception as e:
raise ValueError(str(e))
def read_persistent_client_session_data(principal):
'''
Given a principal return the stored session data for that
principal from the persistent secure storage.
Raises ValueError if unable to perform the action for any reason.
'''
try:
return session_storage.get_data(principal, CCACHE_COOKIE_KEY)
except Exception as e:
raise ValueError(str(e))
def delete_persistent_client_session_data(principal):
'''
Given a principal remove the session data for that
principal from the persistent secure storage.
Raises ValueError if unable to perform the action for any reason.
'''
try:
session_storage.remove_data(principal, CCACHE_COOKIE_KEY)
except Exception as e:
raise ValueError(str(e))
def xml_wrap(value, version):
"""
Wrap all ``str`` in ``xmlrpc.client.Binary``.
Because ``xmlrpc.client.dumps()`` will itself convert all ``unicode``
instances into UTF-8 encoded ``str`` instances, we don't do it here.
So in total, when encoding data for an XML-RPC packet, the following
transformations occur:
* All ``str`` instances are treated as binary data and are wrapped in
an ``xmlrpc.client.Binary()`` instance.
* Only ``unicode`` instances are treated as character data. They get
converted to UTF-8 encoded ``str`` instances (although as mentioned,
not by this function).
Also see `xml_unwrap()`.
:param value: The simple scalar or simple compound value to wrap.
"""
if type(value) in (list, tuple):
return tuple(xml_wrap(v, version) for v in value)
if isinstance(value, dict):
return dict(
(k, xml_wrap(v, version)) for (k, v) in value.items()
)
if type(value) is bytes:
return Binary(value)
if type(value) is Decimal:
# transfer Decimal as a string
return unicode(value)
if isinstance(value, int) and (value < MININT or value > MAXINT):
return unicode(value)
if isinstance(value, DN):
return str(value)
# Encode datetime.datetime objects as xmlrpc.client.DateTime objects
if isinstance(value, datetime.datetime):
if capabilities.client_has_capability(version, 'datetime_values'):
return DateTime(value)
else:
return value.strftime(LDAP_GENERALIZED_TIME_FORMAT)
if isinstance(value, DNSName):
if capabilities.client_has_capability(version, 'dns_name_values'):
return {'__dns_name__': unicode(value)}
else:
return unicode(value)
if isinstance(value, Principal):
return unicode(value)
if isinstance(value, crypto_x509.Certificate):
return base64.b64encode(
value.public_bytes(x509_Encoding.DER)).decode('ascii')
if isinstance(value, crypto_x509.CertificateSigningRequest):
return base64.b64encode(
value.public_bytes(x509_Encoding.DER)).decode('ascii')
assert type(value) in (unicode, float, int, bool, type(None))
return value
def xml_unwrap(value, encoding='UTF-8'):
"""
Unwrap all ``xmlrpc.Binary``, decode all ``str`` into ``unicode``.
When decoding data from an XML-RPC packet, the following transformations
occur:
* The binary payloads of all ``xmlrpc.client.Binary`` instances are
returned as ``str`` instances.
* All ``str`` instances are treated as UTF-8 encoded Unicode strings.
They are decoded and the resulting ``unicode`` instance is returned.
Also see `xml_wrap()`.
:param value: The value to unwrap.
:param encoding: The Unicode encoding to use (defaults to ``'UTF-8'``).
"""
if isinstance(value, (unicode, int, float, bool)):
# most common first
return value
elif value is None:
return value
elif isinstance(value, bytes):
return value.decode(encoding)
elif isinstance(value, (list, tuple)):
return tuple(xml_unwrap(v, encoding) for v in value)
elif isinstance(value, dict):
if '__dns_name__' in value:
return DNSName(value['__dns_name__'])
else:
return dict(
(k, xml_unwrap(v, encoding)) for (k, v) in value.items()
)
elif isinstance(value, Binary):
assert type(value.data) is bytes
return value.data
elif isinstance(value, DateTime):
# xmlprc DateTime is converted to string of %Y%m%dT%H:%M:%S format
return datetime.datetime.strptime(str(value), "%Y%m%dT%H:%M:%S")
raise TypeError(value)
def xml_dumps(params, version, methodname=None, methodresponse=False,
encoding='UTF-8'):
"""
Encode an XML-RPC data packet, transparently wraping ``params``.
This function will wrap ``params`` using `xml_wrap()` and will
then encode the XML-RPC data packet using ``xmlrpc.client.dumps()``
(from the Python standard library).
For documentation on the ``xmlrpc.client.dumps()`` function, see:
http://docs.python.org/library/xmlrpc.client.html#convenience-functions
Also see `xml_loads()`.
:param params: A ``tuple`` or an ``xmlrpc.client.Fault`` instance.
:param methodname: The name of the method to call if this is a request.
:param methodresponse: Set this to ``True`` if this is a response.
:param encoding: The Unicode encoding to use (defaults to ``'UTF-8'``).
"""
if type(params) is tuple:
params = xml_wrap(params, version)
else:
assert isinstance(params, Fault)
return dumps(
params,
methodname=methodname,
methodresponse=methodresponse,
encoding=encoding,
allow_none=True,
)
def decode_fault(e, encoding='UTF-8'):
assert isinstance(e, Fault)
if isinstance(e.faultString, bytes):
return Fault(e.faultCode, e.faultString.decode(encoding))
return e
def xml_loads(data, encoding='UTF-8'):
"""
Decode the XML-RPC packet in ``data``, transparently unwrapping its params.
This function will decode the XML-RPC packet in ``data`` using
``xmlrpc.client.loads()`` (from the Python standard library). If ``data``
contains a fault, ``xmlrpc.client.loads()`` will itself raise an
``xmlrpc.client.Fault`` exception.
Assuming an exception is not raised, this function will then unwrap the
params in ``data`` using `xml_unwrap()`. Finally, a
``(params, methodname)`` tuple is returned containing the unwrapped params
and the name of the method being called. If the packet contains no method
name, ``methodname`` will be ``None``.
For documentation on the ``xmlrpc.client.loads()`` function, see:
http://docs.python.org/2/library/xmlrpclib.html#convenience-functions
Also see `xml_dumps()`.
:param data: The XML-RPC packet to decode.
"""
try:
(params, method) = loads(data)
return (xml_unwrap(params), method)
except Fault as e:
raise decode_fault(e)
class DummyParser:
def __init__(self):
self.data = []
def feed(self, data):
self.data.append(data)
def close(self):
return b''.join(self.data)
class MultiProtocolTransport(Transport):
"""Transport that handles both XML-RPC and JSON"""
def __init__(self, *args, **kwargs):
Transport.__init__(self)
self.protocol = kwargs.get('protocol', None)
def getparser(self):
if self.protocol == 'json':
parser = DummyParser()
return parser, parser
else:
return Transport.getparser(self)
def send_content(self, connection, request_body):
if self.protocol == 'json':
connection.putheader("Content-Type", "application/json")
else:
connection.putheader("Content-Type", "text/xml")
# gzip compression would be set up here, but we have it turned off
# (encode_threshold is None)
connection.putheader("Content-Length", str(len(request_body)))
connection.endheaders(request_body)
class LanguageAwareTransport(MultiProtocolTransport):
"""Transport sending Accept-Language header"""
def get_host_info(self, host):
host, extra_headers, x509 = MultiProtocolTransport.get_host_info(
self, host)
try:
lang = locale.setlocale(
locale.LC_MESSAGES, ''
).split('.', maxsplit=1)[0].lower()
except locale.Error:
# fallback to default locale
lang = 'en_us'
if not isinstance(extra_headers, list):
extra_headers = []
extra_headers.append(
('Accept-Language', lang.replace('_', '-'))
)
extra_headers.append(
('Referer', 'https://%s/ipa/xml' % str(host))
)
return (host, extra_headers, x509)
class SSLTransport(LanguageAwareTransport):
"""Handles an HTTPS transaction to an XML-RPC server."""
def make_connection(self, host):
host, self._extra_headers, _x509 = self.get_host_info(host)
if self._connection and host == self._connection[0]:
logger.debug("HTTP connection keep-alive (%s)", host)
return self._connection[1]
conn = create_https_connection(
host, 443,
getattr(context, 'ca_certfile', None),
tls_version_min=api.env.tls_version_min,
tls_version_max=api.env.tls_version_max)
conn.connect()
logger.debug("New HTTP connection (%s)", host)
self._connection = host, conn
return self._connection[1]
class KerbTransport(SSLTransport):
"""
Handles Kerberos Negotiation authentication to an XML-RPC server.
"""
flags = [gssapi.RequirementFlag.mutual_authentication,
gssapi.RequirementFlag.out_of_sequence_detection]
def __init__(self, *args, **kwargs):
SSLTransport.__init__(self, *args, **kwargs)
self._sec_context = None
self.service = kwargs.pop("service", "HTTP")
self.ccache = kwargs.pop("ccache", None)
def _handle_exception(self, e, service=None):
minor = e.min_code
if minor == KRB5KDC_ERR_S_PRINCIPAL_UNKNOWN:
raise errors.ServiceError(service=service)
elif minor == KRB5_FCC_NOFILE:
raise errors.NoCCacheError()
elif minor == KRB5KRB_AP_ERR_TKT_EXPIRED:
raise errors.TicketExpired()
elif minor == KRB5_FCC_PERM:
raise errors.BadCCachePerms()
elif minor == KRB5_CC_FORMAT:
raise errors.BadCCacheFormat()
elif minor == KRB5_REALM_CANT_RESOLVE:
raise errors.CannotResolveKDC()
elif minor == KRB5_CC_NOTFOUND:
raise errors.CCacheError()
else:
raise errors.KerberosError(message=unicode(e))
def _get_host(self):
return self._connection[0]
def _remove_extra_header(self, name):
for (h, v) in self._extra_headers:
if h == name:
self._extra_headers.remove((h, v))
break
def get_auth_info(self, use_cookie=True):
"""
Two things can happen here. If we have a session we will add
a cookie for that. If not we will set an Authorization header.
"""
if not isinstance(self._extra_headers, list):
self._extra_headers = []
# Remove any existing Cookie first
self._remove_extra_header('Cookie')
if use_cookie:
session_cookie = getattr(context, 'session_cookie', None)
if session_cookie:
self._extra_headers.append(('Cookie', session_cookie))
return
# Set the remote host principal
host = self._get_host()
service = self.service + "@" + host.split(':')[0]
try:
creds = None
if self.ccache:
creds = gssapi.Credentials(usage='initiate',
store={'ccache': self.ccache})
name = gssapi.Name(service, gssapi.NameType.hostbased_service)
self._sec_context = gssapi.SecurityContext(creds=creds, name=name,
flags=self.flags)
response = self._sec_context.step()
except gssapi.exceptions.GSSError as e:
self._handle_exception(e, service=service)
self._set_auth_header(response)
def _set_auth_header(self, token):
# Remove any existing authorization header first
self._remove_extra_header('Authorization')
if token:
self._extra_headers.append(
(
"Authorization",
"negotiate %s" % base64.b64encode(token).decode("ascii"),
)
)
def _auth_complete(self, response):
if self._sec_context:
header = response.getheader('www-authenticate', '')
token = None
for field in header.split(','):
k, _dummy, v = field.strip().partition(' ')
if k.lower() == 'negotiate':
try:
token = base64.b64decode(v.encode('ascii'))
break
# b64decode raises TypeError on invalid input
except (TypeError, UnicodeError):
pass
if not token:
raise errors.KerberosError(
message=u"No valid Negotiate header in server response")
token = self._sec_context.step(token=token)
if self._sec_context.complete:
self._sec_context = None
return True
self._set_auth_header(token)
return False
elif response.status == 401:
self.get_auth_info(use_cookie=False)
return False
return True
# pylint: disable=inconsistent-return-statements
# pylint does not properly manage the _handle_exception call
# that is always raising an exception
def single_request(self, host, handler, request_body, verbose=0):
# Based on Python 2.7's xmllib.Transport.single_request
try:
h = self.make_connection(host)
if verbose:
h.set_debuglevel(1)
self.get_auth_info()
while True:
if six.PY2:
# pylint: disable=no-value-for-parameter
self.send_request(h, handler, request_body)
# pylint: enable=no-value-for-parameter
self.send_host(h, host)
self.send_user_agent(h)
self.send_content(h, request_body)
response = h.getresponse(buffering=True)
else:
self.__send_request(h, host, handler,
request_body, verbose)
response = h.getresponse()
if response.status != 200:
# Must read response (even if it is empty)
# before sending another request.
#
# https://docs.python.org/3/library/http.client.html
# #http.client.HTTPConnection.getresponse
#
# https://pagure.io/freeipa/issue/7752
#
response.read()
if response.status == 401:
if not self._auth_complete(response):
continue
raise ProtocolError(
host + handler,
response.status, response.reason,
response.msg)
self.verbose = verbose
if not self._auth_complete(response):
continue
return self.parse_response(response)
except gssapi.exceptions.GSSError as e:
self._handle_exception(e)
except RemoteDisconnected:
# keep-alive connection was terminated by remote peer, close
# connection and let transport handle reconnect for us.
self.close()
logger.debug("HTTP server has closed connection (%s)", host)
raise
except BaseException:
# Unexpected exception may leave connections in a bad state.
self.close()
logger.debug("HTTP connection destroyed (%s)",
host, exc_info=True)
raise
# pylint: enable=inconsistent-return-statements
if six.PY3:
def __send_request(self, connection, host, handler,
request_body, debug):
# Based on xmlrpc.client.Transport.send_request
headers = self._extra_headers[:]
if debug:
connection.set_debuglevel(1)
if self.accept_gzip_encoding and gzip:
connection.putrequest("POST", handler,
skip_accept_encoding=True)
connection.putheader("Accept-Encoding", "gzip")
headers.append(("Accept-Encoding", "gzip"))
else:
connection.putrequest("POST", handler)
headers.append(("User-Agent", self.user_agent))
self.send_headers(connection, headers)
self.send_content(connection, request_body)
return connection
# Find all occurrences of the expiry component
expiry_re = re.compile(r'.*?(&expiry=\d+).*?')
def _slice_session_cookie(self, session_cookie):
# Keep only the cookie value and strip away all other info.
# This is to reduce the churn on FILE ccaches which grow every time we
# set new data. The expiration time for the cookie is set in the
# encrypted data anyway and will be enforced by the server
http_cookie = session_cookie.http_cookie()
# We also remove the "expiry" part from the data which is not required
for exp in self.expiry_re.findall(http_cookie):
http_cookie = http_cookie.replace(exp, '')
return http_cookie
def store_session_cookie(self, cookie_header):
'''
Given the contents of a Set-Cookie header scan the header and
extract each cookie contained within until the session cookie
is located. Examine the session cookie if the domain and path
are specified, if not update the cookie with those values from
the request URL. Then write the session cookie into the key
store for the principal. If the cookie header is None or the
session cookie is not present in the header no action is
taken.
Context Dependencies:
The per thread context is expected to contain:
principal
The current pricipal the HTTP request was issued for.
request_url
The URL of the HTTP request.
'''
if cookie_header is None:
return
principal = getattr(context, 'principal', None)
request_url = getattr(context, 'request_url', None)
logger.debug("received Set-Cookie (%s)'%s'", type(cookie_header),
cookie_header)
if not isinstance(cookie_header, list):
cookie_header = [cookie_header]
# Search for the session cookie
session_cookie = None
try:
for cookie in cookie_header:
session_cookie = (
Cookie.get_named_cookie_from_string(
cookie, COOKIE_NAME, request_url,
timestamp=datetime.datetime.now(
tz=datetime.timezone.utc))
)
if session_cookie is not None:
break
except Exception as e:
logger.error("unable to parse cookie header '%s': %s",
cookie_header, e)
return
if session_cookie is None:
return
cookie_string = self._slice_session_cookie(session_cookie)
logger.debug("storing cookie '%s' for principal %s",
cookie_string, principal)
try:
update_persistent_client_session_data(principal, cookie_string)
except Exception:
# Not fatal, we just can't use the session cookie we were sent.
pass
def parse_response(self, response):
if six.PY2:
header = response.msg.getheaders('Set-Cookie')
else:
header = response.msg.get_all('Set-Cookie')
self.store_session_cookie(header)
return SSLTransport.parse_response(self, response)
class DelegatedKerbTransport(KerbTransport):
"""
Handles Kerberos Negotiation authentication and TGT delegation to an
XML-RPC server.
"""
flags = [gssapi.RequirementFlag.delegate_to_peer,
gssapi.RequirementFlag.mutual_authentication,
gssapi.RequirementFlag.out_of_sequence_detection]
class RPCClient(Connectible):
"""
Forwarding backend plugin for XML-RPC client.
Also see the `ipaserver.rpcserver.xmlserver` plugin.
"""
# Values to set on subclasses:
session_path = None
server_proxy_class = ServerProxy
protocol = None
env_rpc_uri_key = None
def get_url_list(self, rpc_uri):
"""
Create a list of urls consisting of the available IPA servers.
"""
# the configured URL defines what we use for the discovered servers
(_scheme, _netloc, path, _params,
_query, _fragment) = urllib.parse.urlparse(rpc_uri)
servers = []
name = '_ldap._tcp.%s.' % self.env.domain
try:
answers = query_srv(name)
except DNSException:
answers = []
for answer in answers:
server = str(answer.target).rstrip(".")
servers.append('https://%s%s' % (
ipautil.format_netloc(server), path))
# make sure the configured master server is there just once and
# it is the first one.
if rpc_uri in servers:
servers.remove(rpc_uri)
servers.insert(0, rpc_uri)
return servers
def get_session_cookie_from_persistent_storage(self, principal):
'''
Retrieves the session cookie for the given principal from the
persistent secure storage. Returns None if not found or unable
to retrieve the session cookie for any reason, otherwise
returns a Cookie object containing the session cookie.
'''
# Get the session data, it should contain a cookie string
# (possibly with more than one cookie).
try:
cookie_string = read_persistent_client_session_data(principal)
if cookie_string is None:
return None
cookie_string = cookie_string.decode('utf-8')
except Exception as e:
logger.debug('Error reading client session data: %s', e)
return None
# Search for the session cookie within the cookie string
try:
session_cookie = Cookie.get_named_cookie_from_string(
cookie_string, COOKIE_NAME,
timestamp=datetime.datetime.now(tz=datetime.timezone.utc))
except Exception as e:
logger.debug(
'Error retrieving cookie from the persistent storage: %s',
e)
return None
return session_cookie
def apply_session_cookie(self, url):
'''
Attempt to load a session cookie for the current principal
from the persistent secure storage. If the cookie is
successfully loaded adjust the input url's to point to the
session path and insert the session cookie into the per thread
context for later insertion into the HTTP request. If the
cookie is not successfully loaded then the original url is
returned and the per thread context is not modified.
Context Dependencies:
The per thread context is expected to contain:
principal
The current pricipal the HTTP request was issued for.
The per thread context will be updated with:
session_cookie
A cookie string to be inserted into the Cookie header
of the HTPP request.
'''
original_url = url
principal = getattr(context, 'principal', None)
session_cookie = self.get_session_cookie_from_persistent_storage(
principal)
if session_cookie is None:
logger.debug("failed to find session_cookie in persistent storage "
"for principal '%s'",
principal)
return original_url
else:
logger.debug("found session_cookie in persistent storage for "
"principal '%s', cookie: '%s'",
principal, session_cookie)
# Decide if we should send the cookie to the server
try:
session_cookie.http_return_ok(original_url)
except Cookie.Expired as e:
logger.debug("deleting session data for principal '%s': %s",
principal, e)
try:
delete_persistent_client_session_data(principal)
except Exception:
pass
return original_url
except Cookie.URLMismatch as e:
logger.debug("not sending session cookie, URL mismatch: %s", e)
return original_url
except Exception as e:
logger.error("not sending session cookie, unknown error: %s", e)
return original_url
# O.K. session_cookie is valid to be returned, stash it away where it
# will get included in a HTTP Cookie headed sent to the server.
logger.debug("setting session_cookie into context '%s'",
session_cookie.http_cookie())
setattr(context, 'session_cookie', session_cookie.http_cookie())
# Form the session URL by substituting the session path
# into the original URL
scheme, netloc, path, params, query, fragment = urllib.parse.urlparse(
original_url)
path = self.session_path
session_url = urllib.parse.urlunparse((scheme, netloc, path,
params, query, fragment))
return session_url
def create_connection(self, ccache=None, verbose=None, fallback=None,
delegate=None, ca_certfile=None):
if verbose is None:
verbose = self.api.env.verbose
if fallback is None:
fallback = self.api.env.fallback
if delegate is None:
delegate = self.api.env.delegate
if ca_certfile is None:
ca_certfile = self.api.env.tls_ca_cert
context.ca_certfile = ca_certfile
rpc_uri = self.env[self.env_rpc_uri_key]
try:
principal = get_principal(ccache_name=ccache)
stored_principal = getattr(context, 'principal', None)
if principal != stored_principal:
try:
delattr(context, 'session_cookie')
except AttributeError:
pass
setattr(context, 'principal', principal)
# We have a session cookie, try using the session URI to see if it
# is still valid
if not delegate:
rpc_uri = self.apply_session_cookie(rpc_uri)
except (errors.CCacheError, ValueError):
# No session key, do full Kerberos auth
pass
urls = self.get_url_list(rpc_uri)
proxy_kw = {
'allow_none': True,
'encoding': 'UTF-8',
'verbose': verbose
}
for url in urls:
# should we get ProtocolError (=> error in HTTP response) and
# 401 (=> Unauthorized), we'll be re-trying with new session
# cookies several times
for _try_num in range(0, 5):
if url.startswith('https://'):
if delegate:
transport_class = DelegatedKerbTransport
else:
transport_class = KerbTransport
else:
transport_class = LanguageAwareTransport
proxy_kw['transport'] = transport_class(
protocol=self.protocol, service='HTTP', ccache=ccache)
logger.debug('trying %s', url)
setattr(context, 'request_url', url)
serverproxy = self.server_proxy_class(url, **proxy_kw)
if len(urls) == 1:
# if we have only 1 server and then let the
# main requester handle any errors. This also means it
# must handle a 401 but we save a ping.
return serverproxy
try:
command = getattr(serverproxy, 'ping')
try:
command([], {})
except Fault as e:
e = decode_fault(e)
if e.faultCode in errors_by_code:
error = errors_by_code[e.faultCode]
raise error(message=e.faultString)
else:
raise UnknownError(
code=e.faultCode,
error=e.faultString,
server=url,
)
# We don't care about the response, just that we got one
return serverproxy
except errors.KerberosError:
# kerberos error on one server is likely on all
raise
except ProtocolError as e:
if hasattr(context, 'session_cookie') and e.errcode == 401:
# Unauthorized. Remove the session and try again.
delattr(context, 'session_cookie')
try:
delete_persistent_client_session_data(principal)
except Exception:
# This shouldn't happen if we have a session but
# it isn't fatal.
pass
# try the same url once more with a new session cookie
continue
if not fallback:
raise
else:
logger.info(
'Connection to %s failed with %s', url, e)
# try the next url
break
except Exception as e:
if not fallback:
raise
else:
logger.info(
'Connection to %s failed with %s', url, e)
# try the next url
break
# finished all tries but no serverproxy was found
raise NetworkError(uri=_('any of the configured servers'),
error=', '.join(urls))
def destroy_connection(self):
conn = getattr(context, self.id, None)
if conn is not None:
conn = conn.conn._ServerProxy__transport
conn.close()
def _call_command(self, command, params):
"""Call the command with given params"""
# For XML, this method will wrap/unwrap binary values
# For JSON we do that in the proxy
return command(*params)
def forward(self, name, *args, **kw):
"""
Forward call to command named ``name`` over XML-RPC.
This method will encode and forward an XML-RPC request, and will then
decode and return the corresponding XML-RPC response.
:param command: The name of the command being forwarded.
:param args: Positional arguments to pass to remote command.
:param kw: Keyword arguments to pass to remote command.
"""
server = getattr(context, 'request_url', None)
command = getattr(self.conn, name)
params = [args, kw]
# we'll be trying to connect multiple times with a new session cookie
# each time should we be getting UNAUTHORIZED error from the server
max_tries = 5
for try_num in range(0, max_tries):
logger.debug("[try %d]: Forwarding '%s' to %s server '%s'",
try_num + 1, name, self.protocol, server)
try:
return self._call_command(command, params)
except Fault as e:
e = decode_fault(e)
logger.debug('Caught fault %d from server %s: %s', e.faultCode,
server, e.faultString)
if e.faultCode in errors_by_code:
error = errors_by_code[e.faultCode]
raise error(message=e.faultString)
raise UnknownError(
code=e.faultCode,
error=e.faultString,
server=server,
)
except ProtocolError as e:
# By catching a 401 here we can detect the case where we have
# a single IPA server and the session is invalid. Otherwise
# we always have to do a ping().
session_cookie = getattr(context, 'session_cookie', None)
if session_cookie and e.errcode == 401:
# Unauthorized. Remove the session and try again.
delattr(context, 'session_cookie')
try:
principal = getattr(context, 'principal', None)
delete_persistent_client_session_data(principal)
except Exception as e2:
# This shouldn't happen if we have a session
# but it isn't fatal.
logger.debug("Error trying to remove persisent "
"session data: %s", e2)
# Create a new serverproxy with the non-session URI
serverproxy = self.create_connection(
os.environ.get('KRB5CCNAME'), self.env.verbose,
self.env.fallback, self.env.delegate)
setattr(context, self.id,
Connection(serverproxy, self.disconnect))
# try to connect again with the new session cookie
continue
raise NetworkError(uri=server, error=e.errmsg)
except (SSLError, socket.error) as e:
raise NetworkError(uri=server, error=str(e))
except (OverflowError, TypeError) as e:
raise XMLRPCMarshallError(error=str(e))
raise NetworkError(
uri=server,
error=_("Exceeded number of tries to forward a request."))
class xmlclient(RPCClient):
session_path = '/ipa/session/xml'
server_proxy_class = ServerProxy
protocol = 'xml'
env_rpc_uri_key = 'xmlrpc_uri'
def _call_command(self, command, params):
version = params[1].get('version', VERSION_WITHOUT_CAPABILITIES)
params = xml_wrap(params, version)
result = command(*params)
return xml_unwrap(result)
class JSONServerProxy:
def __init__(self, uri, transport, encoding, verbose, allow_none):
split_uri = urllib.parse.urlsplit(uri)
if split_uri.scheme not in ("http", "https"):
raise IOError("unsupported XML-RPC protocol")
self.__host = split_uri.netloc
self.__handler = split_uri.path
self.__transport = transport
assert encoding == 'UTF-8'
assert allow_none
self.__verbose = verbose
# FIXME: Some of our code requires ServerProxy internals.
# But, xmlrpc.client.ServerProxy's _ServerProxy__transport can be
# accessed by calling serverproxy('transport')
self._ServerProxy__transport = transport
def __request(self, name, args):
print_json = self.__verbose >= 2
payload = {'method': unicode(name), 'params': args, 'id': 0}
version = args[1].get('version', VERSION_WITHOUT_CAPABILITIES)
payload = json_encode_binary(
payload, version, pretty_print=print_json)
if print_json:
logger.info(
'Request: %s',
payload
)
response = self.__transport.request(
self.__host,
self.__handler,
payload.encode('utf-8'),
verbose=self.__verbose >= 3,
)
if print_json:
logger.info(
'Response: %s',
json.dumps(json.loads(response), sort_keys=True, indent=4)
)
try:
response = json_decode_binary(response)
except ValueError as e:
raise JSONError(error=str(e))
error = response.get('error')
if error:
try:
error_class = errors_by_code[error['code']]
except KeyError:
raise UnknownError(
code=error.get('code'),
error=error.get('message'),
server=self.__host,
)
else:
kw = error.get('data', {})
kw['message'] = error['message']
raise error_class(**kw)
return response['result']
def __getattr__(self, name):
def _call(*args):
return self.__request(name, args)
return _call
class jsonclient(RPCClient):
session_path = '/ipa/session/json'
server_proxy_class = JSONServerProxy
protocol = 'json'
env_rpc_uri_key = 'jsonrpc_uri'
| 41,978
|
Python
|
.py
| 978
| 31.587935
| 79
| 0.593851
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,667
|
plugable.py
|
freeipa_freeipa/ipalib/plugable.py
|
# Authors:
# Jason Gerard DeRose <jderose@redhat.com>
#
# Copyright (C) 2008 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Plugin framework.
The classes in this module make heavy use of Python container emulation. If
you are unfamiliar with this Python feature, see
http://docs.python.org/ref/sequence-types.html
"""
from collections.abc import Mapping
import logging
import operator
import re
import sys
import threading
import os
from os import path
import optparse # pylint: disable=deprecated-module
import textwrap
import collections
import importlib
import six
from ipalib import errors
from ipalib.config import Env
from ipalib.text import _
from ipalib.util import classproperty
from ipalib.base import ReadOnly, lock, islocked
from ipalib.constants import DEFAULT_CONFIG
from ipapython import ipa_log_manager, ipautil
from ipapython.ipa_log_manager import (
LOGGING_FORMAT_FILE,
LOGGING_FORMAT_STDERR)
from ipapython.version import VERSION, API_VERSION, DEFAULT_PLUGINS
if six.PY3:
unicode = str
logger = logging.getLogger(__name__)
# FIXME: Updated constants.TYPE_ERROR to use this clearer format from wehjit:
TYPE_ERROR = '%s: need a %r; got a %r: %r'
# FIXME: This function has no unit test
def find_modules_in_dir(src_dir):
"""
Iterate through module names found in ``src_dir``.
"""
if not (os.path.abspath(src_dir) == src_dir and os.path.isdir(src_dir)):
return
if os.path.islink(src_dir):
return
suffix = '.py'
for name in sorted(os.listdir(src_dir)):
if not name.endswith(suffix):
continue
pyfile = os.path.join(src_dir, name)
if not os.path.isfile(pyfile):
continue
module = name[:-len(suffix)]
if module == '__init__':
continue
yield module
class Registry:
"""A decorator that makes plugins available to the API
Usage::
register = Registry()
@register()
class obj_mod(...):
...
For forward compatibility, make sure that the module-level instance of
this object is named "register".
"""
def __init__(self):
self.__registry = collections.OrderedDict()
def __call__(self, **kwargs):
def register(plugin):
"""
Register the plugin ``plugin``.
:param plugin: A subclass of `Plugin` to attempt to register.
"""
if not callable(plugin):
raise TypeError('plugin must be callable; got %r' % plugin)
# Raise DuplicateError if this exact class was already registered:
if plugin in self.__registry:
raise errors.PluginDuplicateError(plugin=plugin)
# The plugin is okay, add to __registry:
self.__registry[plugin] = dict(kwargs, plugin=plugin)
return plugin
return register
def __iter__(self):
return iter(self.__registry.values())
class Plugin(ReadOnly):
"""
Base class for all plugins.
"""
version = '1'
def __init__(self, api):
assert api is not None
self.__api = api
self.__finalize_called = False
self.__finalized = False
self.__finalize_lock = threading.RLock()
@classmethod
def __name_getter(cls):
return cls.__name__
# you know nothing, pylint
name = classproperty(__name_getter)
@classmethod
def __full_name_getter(cls):
return '{}/{}'.format(cls.name, cls.version)
full_name = classproperty(__full_name_getter)
@classmethod
def __bases_getter(cls):
return cls.__bases__
bases = classproperty(__bases_getter)
@classmethod
def __doc_getter(cls):
return cls.__doc__
doc = classproperty(__doc_getter)
@classmethod
def __summary_getter(cls):
doc = cls.doc
if not _(doc).msg:
return '<%s.%s>' % (cls.__module__, cls.__name__)
else:
return unicode(doc).split('\n\n', 1)[0].strip()
summary = classproperty(__summary_getter)
@property
def api(self):
"""
Return `API` instance passed to `__init__()`.
"""
return self.__api
# FIXME: for backward compatibility only
@property
def env(self):
return self.__api.env
# FIXME: for backward compatibility only
@property
def Backend(self):
return self.__api.Backend
# FIXME: for backward compatibility only
@property
def Command(self):
return self.__api.Command
def finalize(self):
"""
Finalize plugin initialization.
This method calls `_on_finalize()` and locks the plugin object.
Subclasses should not override this method. Custom finalization is done
in `_on_finalize()`.
"""
with self.__finalize_lock:
assert self.__finalized is False
if self.__finalize_called:
# No recursive calls!
return
self.__finalize_called = True
self._on_finalize()
self.__finalized = True
if not self.__api.is_production_mode():
lock(self)
def _on_finalize(self):
"""
Do custom finalization.
This method is called from `finalize()`. Subclasses can override this
method in order to add custom finalization.
"""
def ensure_finalized(self):
"""
Finalize plugin initialization if it has not yet been finalized.
"""
with self.__finalize_lock:
if not self.__finalized:
self.finalize()
class finalize_attr:
"""
Create a stub object for plugin attribute that isn't set until the
finalization of the plugin initialization.
When the stub object is accessed, it calls `ensure_finalized()` to make
sure the plugin initialization is finalized. The stub object is expected
to be replaced with the actual attribute value during the finalization
(preferably in `_on_finalize()`), otherwise an `AttributeError` is
raised.
This is used to implement on-demand finalization of plugin
initialization.
"""
__slots__ = ('name', 'value')
def __init__(self, name, value=None):
self.name = name
self.value = value
def __get__(self, obj, cls):
if obj is None or obj.api is None:
return self.value
obj.ensure_finalized()
try:
return getattr(obj, self.name)
except RuntimeError:
# If the actual attribute value is not set in _on_finalize(),
# getattr() calls __get__() again, which leads to infinite
# recursion. This can happen only if the plugin is written
# badly, so advise the developer about that instead of giving
# them a generic "maximum recursion depth exceeded" error.
raise AttributeError(
"attribute '%s' of plugin '%s' was not set in finalize()" % (self.name, obj.name)
)
def __repr__(self):
"""
Return 'module_name.class_name()' representation.
This representation could be used to instantiate this Plugin
instance given the appropriate environment.
"""
return '%s.%s()' % (
self.__class__.__module__,
self.__class__.__name__
)
class APINameSpace(Mapping):
def __init__(self, api, base):
self.__api = api
self.__base = base
self.__plugins = None
self.__plugins_by_key = None
def __enumerate(self):
if self.__plugins is not None and self.__plugins_by_key is not None:
return
default_map = self.__api._API__default_map
plugins = set()
key_dict = self.__plugins_by_key = {}
for plugin in self.__api._API__plugins:
if not any(issubclass(b, self.__base) for b in plugin.bases):
continue
plugins.add(plugin)
key_dict[plugin] = plugin
key_dict[plugin.name, plugin.version] = plugin
key_dict[plugin.full_name] = plugin
if plugin.version == default_map.get(plugin.name, '1'):
key_dict[plugin.name] = plugin
self.__plugins = sorted(plugins, key=operator.attrgetter('full_name'))
def __len__(self):
self.__enumerate()
return len(self.__plugins)
def __contains__(self, key):
self.__enumerate()
return key in self.__plugins_by_key
def __iter__(self):
self.__enumerate()
return iter(self.__plugins)
def __dir__(self):
# include plugins for readline tab completion and in dir()
self.__enumerate()
names = super().__dir__()
names.extend(p.name for p in self)
names.sort()
return names
def get_plugin(self, key):
self.__enumerate()
return self.__plugins_by_key[key]
def __getitem__(self, key):
plugin = self.get_plugin(key)
return self.__api._get(plugin)
def __call__(self):
return six.itervalues(self)
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(key)
class API(ReadOnly):
"""
Dynamic API object through which `Plugin` instances are accessed.
"""
def __init__(self):
super(API, self).__init__()
self.__plugins = set()
self.__plugins_by_key = {}
self.__default_map = {}
self.__instances = {}
self.__next = {}
self.__done = set()
self.env = Env()
@property
def bases(self):
raise NotImplementedError
@property
def packages(self):
raise NotImplementedError
def __len__(self):
"""
Return the number of plugin namespaces in this API object.
"""
return len(self.bases)
def __iter__(self):
"""
Iterate (in ascending order) through plugin namespace names.
"""
return (base.__name__ for base in self.bases)
def __contains__(self, name):
"""
Return True if this API object contains plugin namespace ``name``.
:param name: The plugin namespace name to test for membership.
"""
return name in set(self)
def __getitem__(self, name):
"""
Return the plugin namespace corresponding to ``name``.
:param name: The name of the plugin namespace you wish to retrieve.
"""
if name in self:
try:
return getattr(self, name)
except AttributeError:
pass
raise KeyError(name)
def __call__(self):
"""
Iterate (in ascending order by name) through plugin namespaces.
"""
for name in self:
try:
yield getattr(self, name)
except AttributeError:
raise KeyError(name)
def is_production_mode(self):
"""
If the object has self.env.mode defined and that mode is
production return True, otherwise return False.
"""
return getattr(self.env, 'mode', None) == 'production'
def __doing(self, name):
if name in self.__done:
raise Exception(
'%s.%s() already called' % (self.__class__.__name__, name)
)
self.__done.add(name)
def __do_if_not_done(self, name):
if name not in self.__done:
getattr(self, name)()
def isdone(self, name):
return name in self.__done
def bootstrap(self, parser=None, **overrides):
"""
Initialize environment variables and logging.
"""
self.__doing('bootstrap')
self.env._bootstrap(**overrides)
self.env._finalize_core(**dict(DEFAULT_CONFIG))
# Add the argument parser
if not parser:
parser = self.build_global_parser()
self.parser = parser
root_logger = logging.getLogger()
# If logging has already been configured somewhere else (like in the
# installer), don't add handlers or change levels:
if root_logger.handlers or self.env.validate_api:
return
if self.env.debug:
level = logging.DEBUG
else:
level = logging.INFO
root_logger.setLevel(level)
for attr in self.env:
match = re.match(r'^log_logger_level_'
r'(debug|info|warn|warning|error|critical|\d+)$',
attr)
if not match:
continue
level = ipa_log_manager.convert_log_level(match.group(1))
value = getattr(self.env, attr)
regexps = re.split(r'\s*,\s*', value)
# Add the regexp, it maps to the configured level
for regexp in regexps:
root_logger.addFilter(ipa_log_manager.Filter(regexp, level))
# Add stderr handler:
level = logging.INFO
if self.env.debug:
level = logging.DEBUG
else:
if self.env.context == 'cli':
if self.env.verbose > 0:
level = logging.INFO
else:
level = logging.WARNING
handler = logging.StreamHandler()
handler.setLevel(level)
handler.setFormatter(ipa_log_manager.Formatter(LOGGING_FORMAT_STDERR))
root_logger.addHandler(handler)
# check after logging is set up but before we create files.
fse = sys.getfilesystemencoding()
if fse.lower() not in {'utf-8', 'utf8'}:
raise errors.SystemEncodingError(encoding=fse)
# Add file handler:
if self.env.mode in ('dummy', 'unit_test'):
return # But not if in unit-test mode
if self.env.log is None:
return
log_dir = path.dirname(self.env.log)
if not path.isdir(log_dir):
try:
os.makedirs(log_dir)
except OSError:
logger.error('Could not create log_dir %r', log_dir)
return
level = logging.INFO
if self.env.debug:
level = logging.DEBUG
if self.env.log is not None:
try:
handler = logging.FileHandler(self.env.log)
except IOError as e:
logger.error('Cannot open log file %r: %s', self.env.log, e)
else:
handler.setLevel(level)
handler.setFormatter(
ipa_log_manager.Formatter(LOGGING_FORMAT_FILE)
)
root_logger.addHandler(handler)
def build_global_parser(self, parser=None, context=None):
"""
Add global options to an optparse.OptionParser instance.
"""
def config_file_callback(option, opt, value, parser):
if not os.path.isfile(value):
parser.error(
_("%(filename)s: file not found") % dict(filename=value))
parser.values.conf = value
if parser is None:
parser = optparse.OptionParser(
add_help_option=False,
formatter=IPAHelpFormatter(),
usage='%prog [global-options] COMMAND [command-options]',
description='Manage an IPA domain',
version=('VERSION: %s, API_VERSION: %s' %
(VERSION, API_VERSION)),
epilog='\n'.join([
'See "ipa help topics" for available help topics.',
'See "ipa help <TOPIC>" for more information on '
+ 'a specific topic.',
'See "ipa help commands" for the full list of commands.',
'See "ipa <COMMAND> --help" for more information on '
+ 'a specific command.',
]))
parser.disable_interspersed_args()
parser.add_option("-h", "--help", action="help",
help='Show this help message and exit')
parser.add_option('-e', dest='env', metavar='KEY=VAL', action='append',
help='Set environment variable KEY to VAL',
)
parser.add_option('-c', dest='conf', metavar='FILE', action='callback',
callback=config_file_callback, type='string',
help='Load configuration from FILE.',
)
parser.add_option('-d', '--debug', action='store_true',
help='Produce full debuging output',
)
parser.add_option('--delegate', action='store_true',
help='Delegate the TGT to the IPA server',
)
parser.add_option('-v', '--verbose', action='count',
help='Produce more verbose output. A second -v displays the XML-RPC request',
)
if context == 'cli':
parser.add_option('-a', '--prompt-all', action='store_true',
help='Prompt for ALL values (even if optional)'
)
parser.add_option('-n', '--no-prompt', action='store_false',
dest='interactive',
help='Prompt for NO values (even if required)'
)
parser.add_option('-f', '--no-fallback', action='store_false',
dest='fallback',
help='Only use the server configured in /etc/ipa/default.conf'
)
return parser
def bootstrap_with_global_options(self, parser=None, context=None):
parser = self.build_global_parser(parser, context)
(options, args) = parser.parse_args()
overrides = {}
if options.env is not None:
assert type(options.env) is list
for item in options.env:
try:
values = item.split('=', 1)
except ValueError:
# FIXME: this should raise an IPA exception with an
# error code.
# --Jason, 2008-10-31
pass
if len(values) == 2:
(key, value) = values
overrides[str(key.strip())] = value.strip()
else:
raise errors.OptionError(_('Unable to parse option {item}'
.format(item=item)))
for key in ('conf', 'debug', 'verbose', 'prompt_all', 'interactive',
'fallback', 'delegate'):
value = getattr(options, key, None)
if value is not None:
overrides[key] = value
if hasattr(options, 'prod'):
overrides['webui_prod'] = options.prod
if context is not None:
overrides['context'] = context
self.bootstrap(parser, **overrides)
return (options, args)
def load_plugins(self):
"""
Load plugins from all standard locations.
`API.bootstrap` will automatically be called if it hasn't been
already.
"""
self.__doing('load_plugins')
self.__do_if_not_done('bootstrap')
if self.env.mode in ('dummy', 'unit_test'):
return
for package in self.packages:
self.add_package(package)
# FIXME: This method has no unit test
def add_package(self, package):
"""
Add plugin modules from the ``package``.
:param package: A package from which to add modules.
"""
package_name = package.__name__
package_file = package.__file__
package_dir = path.dirname(path.abspath(package_file))
parent = sys.modules[package_name.rpartition('.')[0]]
parent_dir = path.dirname(path.abspath(parent.__file__))
if parent_dir == package_dir:
raise errors.PluginsPackageError(
name=package_name, file=package_file
)
logger.debug("importing all plugin modules in %s...", package_name)
modules = getattr(package, 'modules', find_modules_in_dir(package_dir))
modules = ['.'.join((package_name, mname)) for mname in modules]
for name in modules:
logger.debug("importing plugin module %s", name)
try:
module = importlib.import_module(name)
except errors.SkipPluginModule as e:
logger.debug("skipping plugin module %s: %s", name, e.reason)
continue
except Exception:
tb = self.env.startup_traceback
if tb:
logger.exception("could not load plugin module %s", name)
raise
try:
self.add_module(module)
except errors.PluginModuleError as e:
logger.debug("%s", e)
def add_module(self, module):
"""
Add plugins from the ``module``.
:param module: A module from which to add plugins.
"""
try:
register = module.register
except AttributeError:
pass
else:
if isinstance(register, Registry):
for kwargs in register:
self.add_plugin(**kwargs)
return
raise errors.PluginModuleError(name=module.__name__)
def add_plugin(self, plugin, override=False, no_fail=False):
"""
Add the plugin ``plugin``.
:param plugin: A subclass of `Plugin` to attempt to add.
:param override: If true, override an already added plugin.
"""
if not callable(plugin):
raise TypeError('plugin must be callable; got %r' % plugin)
# Find the base class or raise SubclassError:
for base in plugin.bases:
if issubclass(base, self.bases):
break
else:
raise errors.PluginSubclassError(
plugin=plugin,
bases=self.bases,
)
# Check override:
prev = self.__plugins_by_key.get(plugin.full_name)
if prev:
if not override:
if no_fail:
return
else:
# Must use override=True to override:
raise errors.PluginOverrideError(
base=base.__name__,
name=plugin.name,
plugin=plugin,
)
self.__plugins.remove(prev)
self.__next[plugin] = prev
else:
if override:
if no_fail:
return
else:
# There was nothing already registered to override:
raise errors.PluginMissingOverrideError(
base=base.__name__,
name=plugin.name,
plugin=plugin,
)
# The plugin is okay, add to sub_d:
self.__plugins.add(plugin)
self.__plugins_by_key[plugin.full_name] = plugin
def finalize(self):
"""
Finalize the registration, instantiate the plugins.
`API.bootstrap` will automatically be called if it hasn't been
already.
"""
self.__doing('finalize')
self.__do_if_not_done('load_plugins')
if self.env.env_confdir is not None:
if self.env.env_confdir == self.env.confdir:
logger.info(
"IPA_CONFDIR env sets confdir to '%s'.", self.env.confdir)
for plugin in self.__plugins:
if not self.env.validate_api:
if plugin.full_name not in DEFAULT_PLUGINS:
continue
else:
try:
default_version = self.__default_map[plugin.name]
except KeyError:
pass
else:
# Technicall plugin.version is not an API version. The
# APIVersion class can handle plugin versions. It's more
# lean than pkg_resource.parse_version().
version = ipautil.APIVersion(plugin.version)
default_version = ipautil.APIVersion(default_version)
if version < default_version:
continue
self.__default_map[plugin.name] = plugin.version
production_mode = self.is_production_mode()
for base in self.bases:
for plugin in self.__plugins:
if not any(issubclass(b, base) for b in plugin.bases):
continue
if not self.env.plugins_on_demand:
self._get(plugin)
name = base.__name__
if not production_mode:
assert not hasattr(self, name)
setattr(self, name, APINameSpace(self, base))
for instance in six.itervalues(self.__instances):
if not production_mode:
assert instance.api is self
if not self.env.plugins_on_demand:
instance.ensure_finalized()
if not production_mode:
assert islocked(instance)
if not production_mode:
lock(self)
def _get(self, plugin):
if not callable(plugin):
raise TypeError('plugin must be callable; got %r' % plugin)
if plugin not in self.__plugins:
raise KeyError(plugin)
try:
instance = self.__instances[plugin]
except KeyError:
instance = self.__instances[plugin] = plugin(self)
return instance
def get_plugin_next(self, plugin):
if not callable(plugin):
raise TypeError('plugin must be callable; got %r' % plugin)
return self.__next[plugin]
class IPAHelpFormatter(optparse.IndentedHelpFormatter):
def format_epilog(self, epilog):
text_width = self.width - self.current_indent
indent = " " * self.current_indent
lines = epilog.splitlines()
result = '\n'.join(
textwrap.fill(line, text_width, initial_indent=indent,
subsequent_indent=indent)
for line in lines)
return '\n%s\n' % result
| 27,012
|
Python
|
.py
| 696
| 28.054598
| 101
| 0.567898
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,668
|
frontend.py
|
freeipa_freeipa/ipalib/frontend.py
|
# Authors:
# Jason Gerard DeRose <jderose@redhat.com>
#
# Copyright (C) 2008 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Base classes for all front-end plugins.
"""
import logging
import six
from ipapython.version import API_VERSION
from ipapython.ipautil import APIVersion
from ipalib.base import NameSpace
from ipalib.plugable import Plugin, APINameSpace
from ipalib.parameters import create_param, Param, Str, Flag
from ipalib.parameters import create_signature
from ipalib.output import Output, Entry, ListOfEntries
from ipalib.text import _
from ipalib.errors import (
ZeroArgumentError,
MaxArgumentError,
OverlapError,
VersionError,
OptionError,
ValidationError,
ConversionError,
)
from ipalib import errors, messages
from ipalib.request import context, context_frame
from ipalib.util import classproperty, classobjectproperty, json_serialize
from ipalib.constants import SD_IPA_API_MESSAGE_ID
if six.PY3:
unicode = str
logger = logging.getLogger(__name__)
RULE_FLAG = 'validation_rule'
def rule(obj):
assert not hasattr(obj, RULE_FLAG)
setattr(obj, RULE_FLAG, True)
return obj
def is_rule(obj):
return callable(obj) and getattr(obj, RULE_FLAG, False) is True
def entry_count(entry):
"""
Return the number of entries in an entry. This is primarly for the
failed output parameter so we don't print empty values.
We also use this to determine if a non-zero return value is needed.
"""
num_entries = 0
for f in entry:
if type(entry[f]) is dict:
num_entries = num_entries + entry_count(entry[f])
else:
num_entries = num_entries + len(entry[f])
return num_entries
class HasParam(Plugin):
"""
Base class for plugins that have `Param` `NameSpace` attributes.
Subclasses of `HasParam` will on one or more attributes store `NameSpace`
instances containing zero or more `Param` instances. These parameters might
describe, for example, the arguments and options a command takes, or the
attributes an LDAP entry can include, or whatever else the subclass sees
fit.
Although the interface a subclass must implement is very simple, it must
conform to a specific naming convention: if you want a namespace
``SubClass.foo``, you must define a ``Subclass.takes_foo`` attribute and a
``SubCLass.get_foo()`` method, and you may optionally define a
``SubClass.check_foo()`` method.
A quick big-picture example
===========================
Say you want the ``options`` instance attribute on your subclass to be a
`Param` `NameSpace`... then according to the enforced naming convention,
your subclass must define a ``takes_options`` attribute and a
``get_options()`` method. For example:
>>> from ipalib import Str, Int
>>> class Example(HasParam):
...
... options = None # This will be replaced with your namespace
...
... takes_options = (Str('one'), Int('two'))
...
... def get_options(self):
... return self._get_param_iterable('options')
...
>>> eg = Example()
The ``Example.takes_options`` attribute is a ``tuple`` defining the
parameters you want your ``Example.options`` namespace to contain. Your
``Example.takes_options`` attribute will be accessed via
`HasParam._get_param_iterable()`, which, among other things, enforces the
``('takes_' + name)`` naming convention. For example:
>>> eg._get_param_iterable('options')
(Str('one'), Int('two'))
The ``Example.get_options()`` method simply returns
``Example.takes_options`` by calling `HasParam._get_param_iterable()`. Your
``Example.get_options()`` method will be called via
`HasParam._filter_param_by_context()`, which, among other things, enforces
the ``('get_' + name)`` naming convention. For example:
>>> list(eg._filter_param_by_context('options'))
[Str('one'), Int('two')]
At this point, the ``eg.options`` instance attribute is still ``None``:
>>> eg.options is None
True
`HasParam._create_param_namespace()` will create the ``eg.options``
namespace from the parameters yielded by
`HasParam._filter_param_by_context()`. For example:
>>> eg._create_param_namespace('options')
>>> eg.options
NameSpace(<2 members>, sort=False)
>>> list(eg.options) # Like dict.__iter__()
['one', 'two']
Your subclass can optionally define a ``check_options()`` method to perform
sanity checks. If it exists, the ``check_options()`` method is called by
`HasParam._create_param_namespace()` with a single value, the `NameSpace`
instance it created. For example:
>>> class Example2(Example):
...
... def check_options(self, namespace):
... for param in namespace(): # Like dict.itervalues()
... if param.name == 'three':
... raise ValueError("I dislike the param 'three'")
... print ' ** Looks good! **' # Note output below
...
>>> eg = Example2()
>>> eg._create_param_namespace('options')
** Looks good! **
>>> eg.options
NameSpace(<2 members>, sort=False)
However, if we subclass again and add a `Param` named ``'three'``:
>>> class Example3(Example2):
...
... takes_options = (Str('one'), Int('two'), Str('three'))
...
>>> eg = Example3()
>>> eg._create_param_namespace('options')
Traceback (most recent call last):
...
ValueError: I dislike the param 'three'
>>> eg.options is None # eg.options was not set
True
The Devil and the details
=========================
In the above example, ``takes_options`` is a ``tuple``, but it can also be
a param spec (see `create_param()`), or a callable that returns an iterable
containing one or more param spec. Regardless of how ``takes_options`` is
defined, `HasParam._get_param_iterable()` will return a uniform iterable,
conveniently hiding the details.
The above example uses the simplest ``get_options()`` method possible, but
you could instead implement a ``get_options()`` method that would, for
example, produce (or withhold) certain parameters based on the whether
certain plugins are loaded.
Think of ``takes_options`` as declarative, a simple definition of *what*
parameters should be included in the namespace. You should only implement
a ``takes_options()`` method if a `Param` must reference attributes on your
plugin instance (for example, for validation rules); you should not use a
``takes_options()`` method to filter the parameters or add any other
procedural behaviour.
On the other hand, think of the ``get_options()`` method as imperative, a
procedure for *how* the parameters should be created and filtered. In the
example above the *how* just returns the *what* unchanged, but arbitrary
logic can be implemented in the ``get_options()`` method. For example, you
might filter certain parameters from ``takes_options`` base on some
criteria, or you might insert additional parameters provided by other
plugins.
The typical use case for using ``get_options()`` this way is to procedurally
generate the arguments and options for all the CRUD commands operating on a
specific LDAP object: the `Object` plugin defines the possible LDAP entry
attributes (as `Param`), and then the CRUD commands intelligently build
their ``args`` and ``options`` namespaces based on which attribute is the
primary key. In this way new LDAP attributes (aka parameters) can be added
to the single point of definition (the `Object` plugin), and all the
corresponding CRUD commands pick up these new parameters without requiring
modification. For an example of how this is done, see the
`ipalib.crud.Create` base class.
However, there is one type of filtering you should not implement in your
``get_options()`` method, because it's already provided at a higher level:
you should not filter parameters based on the value of ``api.env.context``
nor (preferably) on any values in ``api.env``.
`HasParam._filter_param_by_context()` already does this by calling
`Param.use_in_context()` for each parameter. Although the base
`Param.use_in_context()` implementation makes a decision solely on the value
of ``api.env.context``, subclasses can override this with implementations
that consider arbitrary ``api.env`` values.
"""
# HasParam is the base class for most frontend plugins, that make it to users
# This flag indicates that the command should not be available in the cli
NO_CLI = False
def _get_param_iterable(self, name, verb='takes'):
"""
Return an iterable of params defined by the attribute named ``name``.
A sequence of params can be defined one of three ways: as a ``tuple``;
as a callable that returns an iterable; or as a param spec (a `Param` or
``str`` instance). This method returns a uniform iterable regardless of
how the param sequence was defined.
For example, when defined with a tuple:
>>> class ByTuple(HasParam):
... takes_args = (Param('foo'), Param('bar'))
...
>>> by_tuple = ByTuple()
>>> list(by_tuple._get_param_iterable('args'))
[Param('foo'), Param('bar')]
Or you can define your param sequence with a callable when you need to
reference attributes on your plugin instance (for validation rules,
etc.). For example:
>>> class ByCallable(HasParam):
... def takes_args(self):
... yield Param('foo', self.validate_foo)
... yield Param('bar', self.validate_bar)
...
... def validate_foo(self, _, value, **kw):
... if value != 'Foo':
... return _("must be 'Foo'")
...
... def validate_bar(self, _, value, **kw):
... if value != 'Bar':
... return _("must be 'Bar'")
...
>>> by_callable = ByCallable()
>>> list(by_callable._get_param_iterable('args'))
[Param('foo', validate_foo), Param('bar', validate_bar)]
Lastly, as a convenience for when a param sequence contains a single
param, your defining attribute may a param spec (either a `Param`
or an ``str`` instance). For example:
>>> class BySpec(HasParam):
... takes_args = Param('foo')
... takes_options = 'bar?'
...
>>> by_spec = BySpec()
>>> list(by_spec._get_param_iterable('args'))
[Param('foo')]
>>> list(by_spec._get_param_iterable('options'))
['bar?']
For information on how an ``str`` param spec is interpreted, see the
`create_param()` and `parse_param_spec()` functions in the
`ipalib.parameters` module.
Also see `HasParam._filter_param_by_context()`.
"""
src_name = verb + '_' + name
src = getattr(self, src_name, None)
if type(src) is tuple:
return src
if isinstance(src, (Param, str)):
return (src,)
if callable(src):
return src()
if src is None:
return tuple()
raise TypeError(
'%s.%s must be a tuple, callable, or spec; got %r' % (
self.name, src_name, src
)
)
def _filter_param_by_context(self, name, env=None):
"""
Filter params on attribute named ``name`` by environment ``env``.
For example:
>>> from ipalib.config import Env
>>> class Example(HasParam):
...
... takes_args = (
... Str('foo_only', include=['foo']),
... Str('not_bar', exclude=['bar']),
... 'both',
... )
...
... def get_args(self):
... return self._get_param_iterable('args')
...
...
>>> eg = Example()
>>> foo = Env(context='foo')
>>> bar = Env(context='bar')
>>> another = Env(context='another')
>>> (foo.context, bar.context, another.context)
(u'foo', u'bar', u'another')
>>> list(eg._filter_param_by_context('args', foo))
[Str('foo_only', include=['foo']), Str('not_bar', exclude=['bar']), Str('both')]
>>> list(eg._filter_param_by_context('args', bar))
[Str('both')]
>>> list(eg._filter_param_by_context('args', another))
[Str('not_bar', exclude=['bar']), Str('both')]
"""
env = getattr(self, 'env', env)
get_name = 'get_' + name
if not hasattr(self, get_name):
raise NotImplementedError(
'%s.%s()' % (self.name, get_name)
)
get = getattr(self, get_name)
if not callable(get):
raise TypeError(
'%s.%s must be a callable; got %r' % (self.name, get_name, get)
)
for spec in get():
param = create_param(spec)
if env is None or param.use_in_context(env):
yield param
def _create_param_namespace(self, name, env=None):
namespace = NameSpace(
self._filter_param_by_context(name, env),
sort=False
)
if not self.api.is_production_mode():
check = getattr(self, 'check_' + name, None)
if callable(check):
check(namespace)
setattr(self, name, namespace)
@property
def context(self):
return context.current_frame
_callback_registry = {}
class Command(HasParam):
"""
A public IPA atomic operation.
All plugins that subclass from `Command` will be automatically available
as a CLI command and as an XML-RPC method.
Plugins that subclass from Command are registered in the ``api.Command``
namespace. For example:
>>> from ipalib import create_api
>>> api = create_api()
>>> class my_command(Command):
... pass
...
>>> api.add_plugin(my_command)
>>> api.finalize()
>>> list(api.Command)
[<class '__main__.my_command'>]
>>> api.Command.my_command # doctest:+ELLIPSIS
ipalib.frontend.my_command()
This class's subclasses allow different types of callbacks to be added and
removed to them.
Registering a callback is done either by ``register_callback``, or by
defining a ``<type>_callback`` method.
Subclasses should define the `callback_types` attribute as a tuple of
allowed callback types.
"""
takes_options = tuple()
takes_args = tuple()
# Create stubs for attributes that are set in _on_finalize()
args = Plugin.finalize_attr('args')
options = Plugin.finalize_attr('options')
params = Plugin.finalize_attr('params')
params_by_default = Plugin.finalize_attr('params_by_default')
obj = None
use_output_validation = True
output = Plugin.finalize_attr('output')
has_output = ('result',)
output_params = Plugin.finalize_attr('output_params')
has_output_params = tuple()
internal_options = tuple()
msg_summary = None
msg_truncated = _('Results are truncated, try a more specific search')
callback_types = ('interactive_prompt',)
api_version = API_VERSION
@classmethod
def __topic_getter(cls):
return cls.__module__.rpartition('.')[2]
topic = classproperty(__topic_getter)
@classobjectproperty
@classmethod
def __signature__(cls, obj):
# signature is cached on the class object
if hasattr(cls, "_signature"):
return cls._signature
# can only create signature for 'final' classes
# help(api.Command.user_show) breaks because pydoc inspects parent
# classes and baseuser plugin is not a registered object.
if cls.__subclasses__():
cls._signature = None
return None
# special, rare case: user calls help() on a plugin class instead of
# an instance
if obj is None:
from ipalib import api # pylint: disable=cyclic-import
obj = cls(api=api)
cls._signature = signature = create_signature(obj)
return signature
@property
def forwarded_name(self):
return self.full_name
def __call__(self, *args, **options):
"""
Perform validation and then execute the command.
If not in a server context, the call will be forwarded over
XML-RPC and the executed an the nearest IPA server.
"""
self.ensure_finalized()
with context_frame():
self.context.principal = getattr(context, 'principal', None)
return self.__do_call(*args, **options)
def __audit_to_journal(self, func, params, result):
if getattr(context, 'audit_action', None) != func:
return
setattr(context, 'audit_action', None)
from systemd import journal
from ipalib.ipajson import json_encode_binary
args_opts = dict([*self._safe_args_and_params(**params)])
json_encoded = json_encode_binary(args_opts, API_VERSION,
pretty_print=False)
actor = self.context.principal or "[autobind]"
conn = getattr(self.api.Backend, 'ldap2', None)
if conn is not None:
conn_id = conn.id
else:
conn_id = '[no_connection_id]'
journal.send(
"[%s] %s: %s: %s [%s] %s"
% (
"IPA.API",
actor,
func,
result,
conn_id,
json_encoded
),
PRIORITY=journal.LOG_NOTICE,
SYSLOG_IDENTIFIER=self.api.env.script,
MESSAGE_ID=SD_IPA_API_MESSAGE_ID,
IPA_API_COMMAND=func,
IPA_API_PARAMS=json_encoded,
IPA_API_RESULT=result,
IPA_API_ACTOR=actor
)
def __do_call(self, *args, **options):
self.context.__messages = []
if 'version' in options:
self.verify_client_version(unicode(options['version']))
elif self.api.env.skip_version_check and not self.api.env.in_server:
options['version'] = u'2.0'
else:
options['version'] = self.api_version
if self.api.env.in_server:
# add message only on server side
self.add_message(
messages.VersionMissing(server_version=self.api_version))
params = self.args_options_2_params(*args, **options)
logger.debug(
'raw: %s(%s)', self.name, ', '.join(self._repr_iter(**params))
)
if self.api.env.in_server:
params.update(self.get_default(**params))
params = self.normalize(**params)
params = self.convert(**params)
logger.debug(
'%s(%s)', self.name, ', '.join(self._repr_iter(**params))
)
if self.api.env.in_server:
self.validate(**params)
if all([self.name != "console",
not getattr(context, "audit_action", None)]):
setattr(context, "audit_action", self.name)
(args, options) = self.params_2_args_options(**params)
try:
ret = self.run(*args, **options)
except Exception as e:
if self.api.env.in_server:
self.__audit_to_journal(self.name, params, type(e).__name__)
raise
if isinstance(ret, dict):
for message in self.context.__messages:
messages.add_message(options['version'], ret, message)
if (
isinstance(ret, dict)
and 'summary' in self.output
and 'summary' not in ret
):
ret['summary'] = self.get_summary_default(ret)
if self.use_output_validation and (self.output or ret is not None):
self.validate_output(ret, options['version'])
if self.api.env.in_server:
self.__audit_to_journal(self.name, params, 'SUCCESS')
return ret
def add_message(self, message):
self.context.__messages.append(message)
def _safe_args_and_params(self, **params):
"""
Iterate through *safe* values of args and options
This method uses `parameters.Param.safe_value()` to mask
passwords when logging. It yields tuples of (name, value)
of the arguments and options.
"""
for arg in self.args():
value = params.get(arg.name, None)
yield (arg.name, arg.safe_value(value))
for option in self.options():
if option.name not in params:
continue
value = params[option.name]
yield (option.name, option.safe_value(value))
def _repr_iter(self, **params):
"""
Iterate through ``repr()`` of *safe* values of args and options.
This method uses `parameters.Param.safe_value()` to mask passwords when
logging. Logging the exact call is extremely useful, but we obviously
don't want to log the cleartext password.
For example:
>>> class my_cmd(Command):
... takes_args = ('login',)
... takes_options=(Password('passwd'),)
...
>>> c = my_cmd()
>>> c.finalize()
>>> list(c._repr_iter(login=u'Okay.', passwd=u'Private!'))
["u'Okay.'", "passwd=u'********'"]
"""
for arg in self.args():
value = params.get(arg.name, None)
yield repr(arg.safe_value(value))
for option in self.options():
if option.name not in params:
continue
value = params[option.name]
yield '%s=%r' % (option.name, option.safe_value(value))
def args_options_2_params(self, *args, **options):
"""
Merge (args, options) into params.
"""
if self.max_args is not None and len(args) > self.max_args:
if self.max_args == 0:
raise ZeroArgumentError(name=self.name)
raise MaxArgumentError(name=self.name, count=self.max_args)
params = dict(self.__options_2_params(options))
if len(args) > 0:
arg_kw = dict(self.__args_2_params(args))
intersection = set(arg_kw).intersection(params)
if len(intersection) > 0:
raise OverlapError(names=sorted(intersection))
params.update(arg_kw)
return params
def __args_2_params(self, values):
multivalue = False
for (i, arg) in enumerate(self.args()):
assert not multivalue
if len(values) > i:
if arg.multivalue:
multivalue = True
if len(values) == i + 1 and type(values[i]) in (list, tuple):
yield (arg.name, values[i])
else:
yield (arg.name, values[i:])
else:
yield (arg.name, values[i])
else:
break
def __options_2_params(self, options):
for name in self.params:
if name in options:
yield (name, options.pop(name))
# If any options remain, they are either internal or unknown
unused_keys = set(options).difference(self.internal_options)
if unused_keys:
raise OptionError(_('Unknown option: %(option)s'),
option=unused_keys.pop())
def args_options_2_entry(self, *args, **options):
"""
Creates a LDAP entry from attributes in args and options.
"""
kw = self.args_options_2_params(*args, **options)
return dict(self.__attributes_2_entry(kw))
def __attributes_2_entry(self, kw):
for name in self.params:
if self.params[name].attribute and name in kw:
value = kw[name]
if isinstance(value, tuple):
yield (name, list(value))
else:
yield (name, kw[name])
def params_2_args_options(self, **params):
"""
Split params into (args, options).
"""
args = tuple()
options = dict(self.__params_2_options(params))
is_arg = True
for name in self.args:
try:
value = params[name]
except KeyError:
is_arg = False
continue
if is_arg:
args += (value,)
else:
options[name] = value
return (args, options)
def __params_2_options(self, params):
for name in self.options:
if name in params:
yield(name, params[name])
def prompt_param(self, param, default=None, optional=False, kw=dict(),
label=None):
"""
Prompts the user for the value of given parameter.
Returns the parameter instance.
"""
if label is None:
label = param.label
while True:
raw = self.Backend.textui.prompt(label, default, optional=optional)
# Backend.textui.prompt does not fill in the default value,
# we have to do it ourselves
if not raw.strip():
return None
try:
return param(raw, **kw)
except (ValidationError, ConversionError) as e:
# Display error and prompt again
self.Backend.textui.print_prompt_attribute_error(unicode(label),
unicode(e.error))
def normalize(self, **kw):
"""
Return a dictionary of normalized values.
For example:
>>> class my_command(Command):
... takes_options = (
... Param('first', normalizer=lambda value: value.lower()),
... Param('last'),
... )
...
>>> c = my_command()
>>> c.finalize()
>>> c.normalize(first=u'JOHN', last=u'DOE')
{'last': u'DOE', 'first': u'john'}
"""
return dict(
(k, self.params[k].normalize(v)) for (k, v) in kw.items()
)
def convert(self, **kw):
"""
Return a dictionary of values converted to correct type.
>>> from ipalib import Int
>>> class my_command(Command):
... takes_args = (
... Int('one'),
... 'two',
... )
...
>>> c = my_command()
>>> c.finalize()
>>> c.convert(one=1, two=2)
{'two': u'2', 'one': 1}
"""
return dict(
(k, self.params[k].convert(v)) for (k, v) in kw.items()
)
def get_default(self, _params=None, **kw):
"""
Return a dictionary of defaults for all missing required values.
For example:
>>> from ipalib import Str
>>> class my_command(Command):
... takes_args = Str('color', default=u'Red')
...
>>> c = my_command()
>>> c.finalize()
>>> c.get_default()
{'color': u'Red'}
>>> c.get_default(color=u'Yellow')
{}
"""
if _params is None:
_params = [p.name for p in self.params()
if p.name not in kw and (p.required or p.autofill)]
return dict(self.__get_default_iter(_params, kw))
def get_default_of(self, _name, **kw):
"""
Return default value for parameter `_name`.
"""
default = dict(self.__get_default_iter([_name], kw))
return default.get(_name)
def __get_default_iter(self, params, kw):
"""
Generator method used by `Command.get_default` and `Command.get_default_of`.
"""
# Find out what additional parameters are needed to dynamically create
# the default values with default_from.
dep = set()
for param in reversed(self.params_by_default):
if param.name in params or param.name in dep:
if param.default_from is None:
continue
for name in param.default_from.keys:
dep.add(name)
for param in self.params_by_default():
default = None
hasdefault = False
if param.name in dep:
if param.name in kw:
# Parameter is specified, convert and validate the value.
value = param(kw[param.name], **kw)
if self.api.env.in_server:
param.validate(value, supplied=True)
kw[param.name] = value
else:
# Parameter is not specified, use default value. Convert
# and validate the value, it might not be returned so
# there's no guarantee it will be converted and validated
# later.
default = param(None, **kw)
if self.api.env.in_server:
param.validate(default)
if default is not None:
kw[param.name] = default
hasdefault = True
if param.name in params:
if not hasdefault:
# Default value is not available from the previous step,
# get it now. At this point it is certain that the value
# will be returned, so let the caller care about conversion
# and validation.
default = param.get_default(**kw)
if default is not None:
yield (param.name, default)
def validate(self, **kw):
"""
Validate all values.
If any value fails the validation, `ipalib.errors.ValidationError`
(or a subclass thereof) will be raised.
"""
for param in self.params():
value = kw.get(param.name, None)
param.validate(value, supplied=param.name in kw)
def verify_client_version(self, client_version):
"""
Compare the version the client provided to the version of the
server.
If the client major version does not match then return an error.
If the client minor version is less than or equal to the server
then let the request proceed.
"""
server_apiver = APIVersion(self.api_version)
try:
client_apiver = APIVersion(client_version)
except ValueError:
raise VersionError(cver=client_version,
sver=self.api_version,
server=self.env.xmlrpc_uri)
if client_apiver.major != server_apiver.major:
raise VersionError(cver=client_version,
sver=self.api_version,
server=self.env.xmlrpc_uri)
def run(self, *args, **options):
"""
Dispatch to `Command.execute` or `Command.forward`.
If running in a server context, `Command.execute` is called and the
actually work this command performs is executed locally.
If running in a non-server context, `Command.forward` is called,
which forwards this call over RPC to the exact same command
on the nearest IPA server and the actual work this command
performs is executed remotely.
"""
if self.api.env.in_server:
return self.execute(*args, **options)
return self.forward(*args, **options)
def execute(self, *args, **kw):
"""
Perform the actual work this command does.
This method should be implemented only against functionality
in self.api.Backend. For example, a hypothetical
user_add.execute() might be implemented like this:
>>> class user_add(Command):
... def execute(self, **kw):
... return self.api.Backend.ldap.add(**kw)
...
"""
raise NotImplementedError('%s.execute()' % self.name)
def forward(self, *args, **kw):
"""
Forward call over RPC to this same command on server.
"""
try:
return self.Backend.rpcclient.forward(self.forwarded_name,
*args, **kw)
except errors.RequirementError as e:
if self.api.env.context != 'cli':
raise
name = getattr(e, 'name', None)
if name is None or name not in self.params:
raise
raise errors.RequirementError(name=self.params[name].cli_name)
def _on_finalize(self):
"""
Finalize plugin initialization.
This method creates the ``args``, ``options``, and ``params``
namespaces. This is not done in `Command.__init__` because
subclasses (like `crud.Add`) might need to access other plugins
loaded in self.api to determine what their custom `Command.get_args`
and `Command.get_options` methods should yield.
"""
self._create_param_namespace('args')
if len(self.args) == 0 or not self.args[-1].multivalue:
self.max_args = len(self.args)
else:
self.max_args = None
self._create_param_namespace('options')
params_nosort = tuple(self.args()) + tuple(self.options())
def get_key(p):
if p.required:
if p.sortorder < 0:
return p.sortorder
if p.default_from is None:
return 0
return 1
return 2
self.params = NameSpace(
sorted(params_nosort, key=get_key),
sort=False
)
# Sort params so that the ones with default_from come after the ones
# that the default_from might depend on and save the result in
# params_by_default namespace.
params = []
for i in params_nosort:
pos = len(params)
for j in params_nosort:
if j.default_from is None:
continue
if i.name not in j.default_from.keys:
continue
try:
pos = min(pos, params.index(j))
except ValueError:
pass
params.insert(pos, i)
self.params_by_default = NameSpace(params, sort=False)
self.output = NameSpace(self._iter_output(), sort=False)
self._create_param_namespace('output_params')
super(Command, self)._on_finalize()
def _iter_output(self):
if type(self.has_output) is not tuple:
raise TypeError('%s.has_output: need a %r; got a %r: %r' % (
self.name, tuple, type(self.has_output), self.has_output)
)
for (i, o) in enumerate(self.has_output):
if isinstance(o, str):
o = Output(o)
if not isinstance(o, Output):
raise TypeError('%s.has_output[%d]: need a %r; got a %r: %r' % (
self.name, i, (str, Output), type(o), o)
)
yield o
def get_args(self):
"""
Iterate through parameters for ``Command.args`` namespace.
This method gets called by `HasParam._create_param_namespace()`.
Subclasses can override this to customize how the arguments are
determined. For an example of why this can be useful, see the
`ipalib.crud.Create` subclass.
"""
yield from self._get_param_iterable('args')
def check_args(self, args):
"""
Sanity test for args namespace.
This method gets called by `HasParam._create_param_namespace()`.
"""
optional = False
multivalue = False
for arg in args():
if optional and arg.required:
raise ValueError(
'%s: required argument after optional in %s arguments %s' % (arg.name,
self.name, [x.param_spec for x in args()])
)
if multivalue:
raise ValueError(
'%s: only final argument can be multivalue' % arg.name
)
if not arg.required:
optional = True
if arg.multivalue:
multivalue = True
def get_options(self):
"""
Iterate through parameters for ``Command.options`` namespace.
This method gets called by `HasParam._create_param_namespace()`.
For commands that return entries two special options are generated:
--all makes the command retrieve/display all attributes
--raw makes the command display attributes as they are stored
Subclasses can override this to customize how the arguments are
determined. For an example of why this can be useful, see the
`ipalib.crud.Create` subclass.
"""
yield from self._get_param_iterable('options')
for o in self.has_output:
if isinstance(o, (Entry, ListOfEntries)):
yield Flag('all',
cli_name='all',
doc=_('Retrieve and print all attributes from the server. Affects command output.'),
exclude='webui',
flags=['no_output'],
)
yield Flag('raw',
cli_name='raw',
doc=_('Print entries as stored on the server. Only affects output format.'),
exclude='webui',
flags=['no_output'],
)
break
yield Str('version?',
doc=_('Client version. Used to determine if server will accept request.'),
exclude='webui',
flags=['no_option', 'no_output'],
)
def validate_output(self, output, version=API_VERSION):
"""
Validate the return value to make sure it meets the interface contract.
"""
nice = '%s.validate_output()' % self.name
if not isinstance(output, dict):
raise TypeError('%s: need a %r; got a %r: %r' % (
nice, dict, type(output), output)
)
expected_set = set(self.output)
actual_set = set(output) - set(['messages'])
if expected_set != actual_set:
missing = expected_set - actual_set
if missing:
raise ValueError('%s: missing keys %r in %r' % (
nice, sorted(missing), output)
)
extra = actual_set - expected_set
if extra:
raise ValueError('%s: unexpected keys %r in %r' % (
nice, sorted(extra), output)
)
for o in self.output():
value = output[o.name]
if not (o.type is None or isinstance(value, o.type)):
raise TypeError('%s:\n output[%r]: need %r; got %r: %r' % (
nice, o.name, o.type, type(value), value)
)
if callable(o.validate):
o.validate(self, value, version)
def get_output_params(self):
yield from self._get_param_iterable('output_params', verb='has')
def get_summary_default(self, output):
if self.msg_summary:
return self.msg_summary % output
else:
return None
def log_messages(self, output):
logger_functions = dict(
debug=logger.debug,
info=logger.info,
warning=logger.warning,
error=logger.error,
)
for message in output.get('messages', ()):
try:
function = logger_functions[message['type']]
except KeyError:
logger.error('Server sent a message with a wrong type')
function = logger.error
function(message.get('message'))
def output_for_cli(self, textui, output, *args, **options):
"""
Generic output method. Prints values the output argument according
to their type and self.output.
Entry attributes are labeled and printed in the order specified in
self.output_params. Attributes that aren't present in
self.output_params are not printed unless the command was invokend
with the --all option. Attribute labelling is disabled if the --raw
option was given.
Subclasses can override this method, if custom output is needed.
"""
if not isinstance(output, dict):
return None
rv = 0
self.log_messages(output)
order = []
labels = {}
flags = {}
for p in self.output_params():
order.append(p.name)
labels[p.name] = unicode(p.label)
flags[p.name] = p.flags
if options.get('all', False):
order.insert(0, 'dn')
print_all = True
else:
print_all = False
if options.get('raw', False):
labels = None
for o in self.output:
outp = self.output[o]
if 'no_display' in outp.flags:
continue
result = output.get(o)
if o == 'value':
continue
if o.lower() == 'count' and result == 0:
rv = 1
elif o.lower() == 'failed':
if entry_count(result) == 0:
# Don't display an empty failed list
continue
# Return an error to the shell
rv = 1
if isinstance(outp, ListOfEntries):
textui.print_entries(result, order, labels, flags, print_all)
elif isinstance(result, (tuple, list)):
textui.print_entries(result, order, labels, flags, print_all)
elif isinstance(outp, Entry):
textui.print_entry(result, order, labels, flags, print_all)
elif isinstance(result, dict):
textui.print_entry(result, order, labels, flags, print_all)
elif isinstance(result, unicode):
if o == 'summary':
textui.print_summary(result)
else:
textui.print_indented(result)
elif isinstance(result, bool):
# the Delete commands return a boolean indicating
# success or failure. Ignore these.
pass
elif isinstance(result, int):
textui.print_count(result, '%s %%d' % unicode(self.output[o].doc))
return rv
# list of attributes we want exported to JSON
json_friendly_attributes = (
'name', 'doc', 'NO_CLI'
)
def __json__(self):
json_dict = dict(
(a, getattr(self, a)) for a in self.json_friendly_attributes
)
json_dict['takes_args'] = list(self.get_args())
json_dict['takes_options'] = list(self.get_options())
return json_dict
@classmethod
def get_callbacks(cls, callback_type):
"""Yield callbacks of the given type"""
# Use one shared callback registry, keyed on class, to avoid problems
# with missing attributes being looked up in superclasses
callbacks = _callback_registry.get(callback_type, {}).get(cls, [None])
for callback in callbacks:
if callback is None:
try:
yield getattr(cls, '%s_callback' % callback_type)
except AttributeError:
pass
else:
yield callback
@classmethod
def register_callback(cls, callback_type, callback, first=False):
"""Register a callback
:param callback_type: The callback type (e.g. 'pre', 'post')
:param callback: The callable added
:param first: If true, the new callback will be added before all
existing callbacks; otherwise it's added after them
Note that callbacks registered this way will be attached to this class
only, not to its subclasses.
"""
assert callback_type in cls.callback_types
assert callable(callback)
_callback_registry.setdefault(callback_type, {})
try:
callbacks = _callback_registry[callback_type][cls]
except KeyError:
callbacks = _callback_registry[callback_type][cls] = [None]
if first:
callbacks.insert(0, callback)
else:
callbacks.append(callback)
@classmethod
def register_interactive_prompt_callback(cls, callback, first=False):
"""Shortcut for register_callback('interactive_prompt', ...)"""
cls.register_callback('interactive_prompt', callback, first)
def interactive_prompt_callback(self, kw):
pass
class LocalOrRemote(Command):
"""
A command that is explicitly executed locally or remotely.
This is for commands that makes sense to execute either locally or
remotely to return a perhaps different result. The best example of
this is the `ipalib.plugins.f_misc.env` plugin which returns the
key/value pairs describing the configuration state: it can be
"""
takes_options = (
Flag('server?',
doc=_('Forward to server instead of running locally'),
),
)
def run(self, *args, **options):
"""
Dispatch to forward() or execute() based on ``server`` option.
When running in a client context, this command is executed remotely if
``options['server']`` is true; otherwise it is executed locally.
When running in a server context, this command is always executed
locally and the value of ``options['server']`` is ignored.
"""
if options.get('server', False) and not self.env.in_server:
return self.forward(*args, **options)
return self.execute(*args, **options)
class Local(Command):
"""
A command that is explicitly executed locally.
This is for commands that makes sense to execute only locally
such as the help command.
"""
def run(self, *args, **options):
"""
Dispatch to forward() onlly.
"""
return self.forward(*args, **options)
def forward(self, *args, **options):
return self.execute(*args, **options)
class Object(HasParam):
# Create stubs for attributes that are set in _on_finalize()
backend = Plugin.finalize_attr('backend')
methods = Plugin.finalize_attr('methods')
params = Plugin.finalize_attr('params')
primary_key = Plugin.finalize_attr('primary_key')
params_minus_pk = Plugin.finalize_attr('params_minus_pk')
# Can override in subclasses:
backend_name = None
takes_params = tuple()
def _on_finalize(self):
self.methods = NameSpace(
self.__get_attrs('Method'), sort=False, name_attr='attr_name'
)
self._create_param_namespace('params')
pkeys = [p for p in self.params() if p.primary_key]
if len(pkeys) > 1:
raise ValueError(
'%s (Object) has multiple primary keys: %s' % (
self.name,
', '.join(p.name for p in pkeys),
)
)
if len(pkeys) == 1:
self.primary_key = pkeys[0]
self.params_minus_pk = NameSpace(
[p for p in self.params() if not p.primary_key], sort=False
)
else:
self.primary_key = None
self.params_minus_pk = self.params
if 'Backend' in self.api and self.backend_name in self.api.Backend:
self.backend = self.api.Backend[self.backend_name]
super(Object, self)._on_finalize()
def params_minus(self, *names):
"""
Yield all Param whose name is not in ``names``.
"""
if len(names) == 1 and not isinstance(names[0], (Param, str)):
names = names[0]
minus = frozenset(names)
for param in self.params():
if param.name in minus or param in minus:
continue
yield param
def get_dn(self, *args, **kwargs):
"""
Construct an LDAP DN.
"""
raise NotImplementedError('%s.get_dn()' % self.name)
def __get_attrs(self, name):
if name not in self.api:
return
namespace = self.api[name]
assert type(namespace) is APINameSpace
for plugin in namespace(): # Equivalent to dict.itervalues()
if plugin is not namespace[plugin.name]:
continue
if plugin.obj_name == self.name:
yield plugin
def get_params(self):
"""
This method gets called by `HasParam._create_param_namespace()`.
"""
for spec in self._get_param_iterable('params'):
assert isinstance(spec, (str, Param))
yield create_param(spec)
json_friendly_attributes = (
'name', 'takes_params',
)
def __json__(self):
json_dict = dict(
(a, json_serialize(getattr(self, a)))
for a in self.json_friendly_attributes
)
if self.primary_key:
json_dict['primary_key'] = self.primary_key.name
json_dict['methods'] = list(self.methods)
return json_dict
class Attribute(Plugin):
"""
Base class implementing the attribute-to-object association.
`Attribute` plugins are associated with an `Object` plugin to group
a common set of commands that operate on a common set of parameters.
The association between attribute and object is done using a simple
naming convention: the first part of the plugin class name (up to the
first underscore) is the object name, and rest is the attribute name,
as this table shows:
=============== =========== ==============
Class name Object name Attribute name
=============== =========== ==============
noun_verb noun verb
user_add user add
user_first_name user first_name
=============== =========== ==============
For example:
>>> class user_add(Attribute):
... pass
...
>>> instance = user_add()
>>> instance.obj_name
'user'
>>> instance.attr_name
'add'
In practice the `Attribute` class is not used directly, but rather is
only the base class for the `Method` class. Also see the `Object` class.
"""
obj_version = '1'
@property
def obj_name(self):
return self.name.partition('_')[0]
@property
def obj_full_name(self):
if self.obj is not None:
return self.obj.full_name
else:
return None
@property
def attr_name(self):
prefix = '{}_'.format(self.obj_name)
assert self.name.startswith(prefix)
return self.name[len(prefix):]
@property
def obj(self):
if self.obj_name is not None and self.obj_version is not None:
return self.api.Object[self.obj_name, self.obj_version]
else:
return None
class Method(Attribute, Command):
"""
A command with an associated object.
A `Method` plugin must have a corresponding `Object` plugin. The
association between object and method is done through a simple naming
convention: the first part of the method name (up to the first under
score) is the object name, as the examples in this table show:
============= =========== ==============
Method name Object name Attribute name
============= =========== ==============
user_add user add
noun_verb noun verb
door_open_now door open_now
============= =========== ==============
There are three different places a method can be accessed. For example,
say you created a `Method` plugin and its corresponding `Object` plugin
like this:
>>> from ipalib import create_api
>>> api = create_api()
>>> class user_add(Method):
... def run(self, **options):
... return dict(result='Added the user!')
...
>>> class user(Object):
... pass
...
>>> api.add_plugin(user_add)
>>> api.add_plugin(user)
>>> api.finalize()
First, the ``user_add`` plugin can be accessed through the ``api.Method``
namespace:
>>> list(api.Method)
[<class '__main__.user_add'>]
>>> api.Method.user_add(version=u'2.88') # Will call user_add.run()
{'result': 'Added the user!'}
(The "version" argument is the API version to use.
The current API version can be found in ipalib.version.API_VERSION.)
Second, because `Method` is a subclass of `Command`, the ``user_add``
plugin can also be accessed through the ``api.Command`` namespace:
>>> list(api.Command)
[<class '__main__.user_add'>]
>>> api.Command.user_add(version=u'2.88') # Will call user_add.run()
{'result': 'Added the user!'}
And third, ``user_add`` can be accessed as an attribute on the ``user``
`Object`:
>>> list(api.Object)
[<class '__main__.user'>]
>>> list(api.Object.user.methods)
['add']
>>> api.Object.user.methods.add(version=u'2.88') # Will call user_add.run()
{'result': 'Added the user!'}
The `Attribute` base class implements the naming convention for the
attribute-to-object association. Also see the `Object` class.
"""
extra_options_first = False
extra_args_first = False
def get_output_params(self):
if self.obj is not None:
for param in self.obj.params():
if 'no_output' in param.flags:
continue
yield param
yield from super(Method, self).get_output_params()
class Updater(Plugin):
"""
An LDAP update with an associated object (always update).
All plugins that subclass from `Updater` will be automatically available
as a server update function.
Plugins that subclass from Updater are registered in the ``api.Updater``
namespace. For example:
>>> from ipalib import create_api
>>> api = create_api()
>>> class my(Object):
... pass
...
>>> api.add_plugin(my)
>>> class my_update(Updater):
... pass
...
>>> api.add_plugin(my_update)
>>> api.finalize()
>>> list(api.Updater)
[<class '__main__.my_update'>]
>>> api.Updater.my_update # doctest:+ELLIPSIS
ipalib.frontend.my_update()
"""
def execute(self, **options):
raise NotImplementedError('%s.execute()' % self.name)
def __call__(self, **options):
logger.debug(
'raw: %s', self.name
)
return self.execute(**options)
| 55,791
|
Python
|
.py
| 1,339
| 31.986557
| 104
| 0.581512
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,669
|
base.py
|
freeipa_freeipa/ipalib/base.py
|
# Authors:
# Jason Gerard DeRose <jderose@redhat.com>
#
# Copyright (C) 2008 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Foundational classes and functions.
"""
import re
from ipalib.constants import NAME_REGEX, NAME_ERROR
from ipalib.constants import TYPE_ERROR, SET_ERROR, DEL_ERROR, OVERRIDE_ERROR
class ReadOnly:
"""
Base class for classes that can be locked into a read-only state.
Be forewarned that Python does not offer true read-only attributes for
user-defined classes. Do *not* rely upon the read-only-ness of this
class for security purposes!
The point of this class is not to make it impossible to set or to delete
attributes after an instance is locked, but to make it impossible to do so
*accidentally*. Rather than constantly reminding our programmers of things
like, for example, "Don't set any attributes on this ``FooBar`` instance
because doing so wont be thread-safe", this class offers a real way to
enforce read-only attribute usage.
For example, before a `ReadOnly` instance is locked, you can set and delete
its attributes as normal:
>>> class Person(ReadOnly):
... pass
...
>>> p = Person()
>>> p.name = 'John Doe'
>>> p.phone = '123-456-7890'
>>> del p.phone
But after an instance is locked, you cannot set its attributes:
>>> p.__islocked__() # Is this instance locked?
False
>>> p.__lock__() # This will lock the instance
>>> p.__islocked__()
True
>>> p.department = 'Engineering'
Traceback (most recent call last):
...
AttributeError: locked: cannot set Person.department to 'Engineering'
Nor can you deleted its attributes:
>>> del p.name
Traceback (most recent call last):
...
AttributeError: locked: cannot delete Person.name
However, as noted at the start, there are still obscure ways in which
attributes can be set or deleted on a locked `ReadOnly` instance. For
example:
>>> object.__setattr__(p, 'department', 'Engineering')
>>> p.department
'Engineering'
>>> object.__delattr__(p, 'name')
>>> hasattr(p, 'name')
False
But again, the point is that a programmer would never employ the above
techniques *accidentally*.
Lastly, this example aside, you should use the `lock()` function rather
than the `ReadOnly.__lock__()` method. And likewise, you should
use the `islocked()` function rather than the `ReadOnly.__islocked__()`
method. For example:
>>> readonly = ReadOnly()
>>> islocked(readonly)
False
>>> lock(readonly) is readonly # lock() returns the instance
True
>>> islocked(readonly)
True
"""
__locked = False
def __lock__(self):
"""
Put this instance into a read-only state.
After the instance has been locked, attempting to set or delete an
attribute will raise an AttributeError.
"""
assert self.__locked is False, '__lock__() can only be called once'
self.__locked = True
def __islocked__(self):
"""
Return True if instance is locked, otherwise False.
"""
return self.__locked
def __setattr__(self, name, value):
"""
If unlocked, set attribute named ``name`` to ``value``.
If this instance is locked, an AttributeError will be raised.
:param name: Name of attribute to set.
:param value: Value to assign to attribute.
"""
if self.__locked:
raise AttributeError(
SET_ERROR % (self.__class__.__name__, name, value)
)
return object.__setattr__(self, name, value)
def __delattr__(self, name):
"""
If unlocked, delete attribute named ``name``.
If this instance is locked, an AttributeError will be raised.
:param name: Name of attribute to delete.
"""
if self.__locked:
raise AttributeError(
DEL_ERROR % (self.__class__.__name__, name)
)
return object.__delattr__(self, name)
def lock(instance):
"""
Lock an instance of the `ReadOnly` class or similar.
This function can be used to lock instances of any class that implements
the same locking API as the `ReadOnly` class. For example, this function
can lock instances of the `config.Env` class.
So that this function can be easily used within an assignment, ``instance``
is returned after it is locked. For example:
>>> readonly = ReadOnly()
>>> readonly is lock(readonly)
True
>>> readonly.attr = 'This wont work'
Traceback (most recent call last):
...
AttributeError: locked: cannot set ReadOnly.attr to 'This wont work'
Also see the `islocked()` function.
:param instance: The instance of `ReadOnly` (or similar) to lock.
"""
assert instance.__islocked__() is False, 'already locked: %r' % instance
instance.__lock__()
assert instance.__islocked__() is True, 'failed to lock: %r' % instance
return instance
def islocked(instance):
"""
Return ``True`` if ``instance`` is locked.
This function can be used on an instance of the `ReadOnly` class or an
instance of any other class implemented the same locking API.
For example:
>>> readonly = ReadOnly()
>>> islocked(readonly)
False
>>> readonly.__lock__()
>>> islocked(readonly)
True
Also see the `lock()` function.
:param instance: The instance of `ReadOnly` (or similar) to interrogate.
"""
assert (
hasattr(instance, '__lock__') and callable(instance.__lock__)
), 'no __lock__() method: %r' % instance
return instance.__islocked__()
def check_name(name):
"""
Verify that ``name`` is suitable for a `NameSpace` member name.
In short, ``name`` must be a valid lower-case Python identifier that
neither starts nor ends with an underscore. Otherwise an exception is
raised.
This function will raise a ``ValueError`` if ``name`` does not match the
`constants.NAME_REGEX` regular expression. For example:
>>> check_name('MyName')
Traceback (most recent call last):
...
ValueError: name must match '^[a-z][_a-z0-9]*[a-z0-9]$|^[a-z]$'; got 'MyName'
Also, this function will raise a ``TypeError`` if ``name`` is not an
``str`` instance. For example:
>>> check_name(u'my_name')
Traceback (most recent call last):
...
TypeError: name: need a <type 'str'>; got u'my_name' (a <type 'unicode'>)
So that `check_name()` can be easily used within an assignment, ``name``
is returned unchanged if it passes the check. For example:
>>> n = check_name('my_name')
>>> n
'my_name'
:param name: Identifier to test.
"""
if type(name) is not str:
raise TypeError(
TYPE_ERROR % ('name', str, name, type(name))
)
if re.match(NAME_REGEX, name) is None:
raise ValueError(
NAME_ERROR % (NAME_REGEX, name)
)
return name
class NameSpace(ReadOnly):
"""
A read-only name-space with handy container behaviours.
A `NameSpace` instance is an ordered, immutable mapping object whose values
can also be accessed as attributes. A `NameSpace` instance is constructed
from an iterable providing its *members*, which are simply arbitrary objects
with a ``name`` attribute whose value:
1. Is unique among the members
2. Passes the `check_name()` function
Beyond that, no restrictions are placed on the members: they can be
classes or instances, and of any type.
The members can be accessed as attributes on the `NameSpace` instance or
through a dictionary interface. For example, say we create a `NameSpace`
instance from a list containing a single member, like this:
>>> class my_member:
... name = 'my_name'
...
>>> namespace = NameSpace([my_member])
>>> namespace
NameSpace(<1 member>, sort=True)
We can then access ``my_member`` both as an attribute and as a dictionary
item:
>>> my_member is namespace.my_name # As an attribute
True
>>> my_member is namespace['my_name'] # As dictionary item
True
For a more detailed example, say we create a `NameSpace` instance from a
generator like this:
>>> class Member:
... def __init__(self, i):
... self.i = i
... self.name = self.__name__ = 'member%d' % i
... def __repr__(self):
... return 'Member(%d)' % self.i
...
>>> ns = NameSpace(Member(i) for i in range(3))
>>> ns
NameSpace(<3 members>, sort=True)
As above, the members can be accessed as attributes and as dictionary items:
>>> ns.member0 is ns['member0']
True
>>> ns.member1 is ns['member1']
True
>>> ns.member2 is ns['member2']
True
Members can also be accessed by index and by slice. For example:
>>> ns[0]
Member(0)
>>> ns[-1]
Member(2)
>>> ns[1:]
(Member(1), Member(2))
(Note that slicing a `NameSpace` returns a ``tuple``.)
`NameSpace` instances provide standard container emulation for membership
testing, counting, and iteration. For example:
>>> 'member3' in ns # Is there a member named 'member3'?
False
>>> 'member2' in ns # But there is a member named 'member2'
True
>>> len(ns) # The number of members
3
>>> list(ns) # Iterate through the member names
['member0', 'member1', 'member2']
Although not a standard container feature, the `NameSpace.__call__()` method
provides a convenient (and efficient) way to iterate through the *members*
(as opposed to the member names). Think of it like an ordered version of
the ``dict.itervalues()`` method. For example:
>>> list(ns[name] for name in ns) # One way to do it
[Member(0), Member(1), Member(2)]
>>> list(ns()) # A more efficient, simpler way to do it
[Member(0), Member(1), Member(2)]
Another convenience method is `NameSpace.__todict__()`, which will return
a copy of the ``dict`` mapping the member names to the members.
For example:
>>> ns.__todict__()
{'member1': Member(1), 'member0': Member(0), 'member2': Member(2)}
As `NameSpace.__init__()` locks the instance, `NameSpace` instances are
read-only from the get-go. An ``AttributeError`` is raised if you try to
set *any* attribute on a `NameSpace` instance. For example:
>>> ns.member3 = Member(3) # Lets add that missing 'member3'
Traceback (most recent call last):
...
AttributeError: locked: cannot set NameSpace.member3 to Member(3)
(For information on the locking protocol, see the `ReadOnly` class, of which
`NameSpace` is a subclass.)
By default the members will be sorted alphabetically by the member name.
For example:
>>> sorted_ns = NameSpace([Member(7), Member(3), Member(5)])
>>> sorted_ns
NameSpace(<3 members>, sort=True)
>>> list(sorted_ns)
['member3', 'member5', 'member7']
>>> sorted_ns[0]
Member(3)
But if the instance is created with the ``sort=False`` keyword argument, the
original order of the members is preserved. For example:
>>> unsorted_ns = NameSpace([Member(7), Member(3), Member(5)], sort=False)
>>> unsorted_ns
NameSpace(<3 members>, sort=False)
>>> list(unsorted_ns)
['member7', 'member3', 'member5']
>>> unsorted_ns[0]
Member(7)
As a special extension, NameSpace objects can be indexed by objects that
have a "__name__" attribute (e.g. classes). These lookups are converted
to lookups on the name:
>>> class_ns = NameSpace([Member(7), Member(3), Member(5)], sort=False)
>>> unsorted_ns[Member(3)]
Member(3)
The `NameSpace` class is used in many places throughout freeIPA. For a few
examples, see the `plugable.API` and the `frontend.Command` classes.
"""
def __init__(self, members, sort=True, name_attr='name'):
"""
:param members: An iterable providing the members.
:param sort: Whether to sort the members by member name.
"""
if type(sort) is not bool:
raise TypeError(
TYPE_ERROR % ('sort', bool, sort, type(sort))
)
self.__sort = sort
if sort:
self.__members = tuple(
sorted(members, key=lambda m: getattr(m, name_attr))
)
else:
self.__members = tuple(members)
self.__names = tuple(getattr(m, name_attr) for m in self.__members)
self.__map = dict()
for member in self.__members:
name = check_name(getattr(member, name_attr))
if name in self.__map:
raise AttributeError(OVERRIDE_ERROR %
(self.__class__.__name__, name, self.__map[name], member)
)
assert not hasattr(self, name), 'Ouch! Has attribute %r' % name
self.__map[name] = member
setattr(self, name, member)
lock(self)
def __len__(self):
"""
Return the number of members.
"""
return len(self.__members)
def __iter__(self):
"""
Iterate through the member names.
If this instance was created with ``sort=False``, the names will be in
the same order as the members were passed to the constructor; otherwise
the names will be in alphabetical order (which is the default).
This method is like an ordered version of ``dict.iterkeys()``.
"""
for name in self.__names:
yield name
def __call__(self):
"""
Iterate through the members.
If this instance was created with ``sort=False``, the members will be
in the same order as they were passed to the constructor; otherwise the
members will be in alphabetical order by name (which is the default).
This method is like an ordered version of ``dict.itervalues()``.
"""
for member in self.__members:
yield member
def __contains__(self, name):
"""
Return ``True`` if namespace has a member named ``name``.
"""
name = getattr(name, '__name__', name)
return name in self.__map
def __getitem__(self, key):
"""
Return a member by name or index, or return a slice of members.
:param key: The name or index of a member, or a slice object.
"""
key = getattr(key, '__name__', key)
if isinstance(key, str):
return self.__map[key]
if type(key) in (int, slice):
return self.__members[key]
raise TypeError(
TYPE_ERROR % ('key', (str, int, slice, 'object with __name__'),
key, type(key))
)
def __repr__(self):
"""
Return a pseudo-valid expression that could create this instance.
"""
cnt = len(self)
if cnt == 1:
m = 'member'
else:
m = 'members'
return '%s(<%d %s>, sort=%r)' % (
self.__class__.__name__,
cnt,
m,
self.__sort,
)
def __todict__(self):
"""
Return a copy of the private dict mapping member name to member.
"""
return dict(self.__map)
| 16,114
|
Python
|
.py
| 400
| 33.57
| 81
| 0.628138
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,670
|
capabilities.py
|
freeipa_freeipa/ipalib/capabilities.py
|
# Authors:
# Petr Viktorin <pviktori@redhat.com>
#
# Copyright (C) 2012 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""List of, and utilities for working with, client capabilities by API version
The API version is given in ipapython.version.API_VERSION.
This module defines a dict, ``capabilities``, that maps feature names to API
versions they were introduced in.
"""
from ipapython.ipautil import APIVersion
VERSION_WITHOUT_CAPABILITIES = u'2.51'
capabilities = dict(
# messages: Server output may include an extra key, "messages", that
# contains a list of warnings and other messages.
# http://freeipa.org/page/V3/Messages
messages=u'2.52',
# optional_uid_params: Before this version, UID & GID parameter defaults
# were 999, which meant "assign dynamically", so was not possible to get
# a user with UID=999. With the capability, these parameters are optional
# and 999 really means 999.
# https://fedorahosted.org/freeipa/ticket/2886
optional_uid_params=u'2.54',
# permissions2: Reworked permission system
# http://www.freeipa.org/page/V3/Permissions_V2
permissions2=u'2.69',
# primary_key_types: Non-unicode primary keys in command output
primary_key_types=u'2.83',
# support for datetime values on the client
datetime_values=u'2.84',
# dns_name_values: dnsnames as objects
dns_name_values=u'2.88',
# vault supports aes key wrapping
vault_aes_keywrap='2.246'
)
def client_has_capability(client_version, capability):
"""Determine whether the client has the given capability
:param capability: Name of the capability to test
:param client_version: The API version string reported by the client
"""
version = APIVersion(client_version)
return version >= APIVersion(capabilities[capability])
| 2,478
|
Python
|
.py
| 55
| 41.763636
| 78
| 0.751767
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,671
|
x509.py
|
freeipa_freeipa/ipalib/x509.py
|
# Authors:
# Rob Crittenden <rcritten@redhat.com>
#
# Copyright (C) 2010 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Certificates should be stored internally DER-encoded. We can be passed
# a certificate several ways: read if from LDAP, read it from a 3rd party
# app (dogtag, candlepin, etc) or as user input.
# Conventions
#
# Where possible the following naming conventions are used:
#
# cert: the certificate is a PEM-encoded certificate
# dercert: the certificate is DER-encoded
# rawcert: the cert is in an unknown format
from __future__ import print_function
import os
import binascii
import datetime
import enum
import ipaddress
import base64
import re
from cryptography import x509 as crypto_x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.serialization import (
Encoding, PublicFormat, PrivateFormat, load_pem_private_key
)
import pyasn1
import pyasn1.error
from pyasn1.type import univ, char, namedtype, tag
from pyasn1.codec.der import decoder, encoder
from pyasn1_modules import rfc2315, rfc2459
import six
try:
from urllib3.util import ssl_match_hostname
except ImportError:
from urllib3.packages import ssl_match_hostname
from ipalib import errors
from ipapython.dnsutil import DNSName
PEM = 0
DER = 1
# The first group is the whole PEM datum and the second group is
# the base64 content (with newlines). For findall() the result is
# a list of 2-tuples of the PEM and base64 data.
PEM_CERT_REGEX = re.compile(
b'(-----BEGIN CERTIFICATE-----(.*?)-----END CERTIFICATE-----)',
re.DOTALL)
PEM_PRIV_REGEX = re.compile(
b'-----BEGIN(?: ENCRYPTED)?(?: (?:RSA|DSA|DH|EC))? PRIVATE KEY-----.*?'
b'-----END(?: ENCRYPTED)?(?: (?:RSA|DSA|DH|EC))? PRIVATE KEY-----',
re.DOTALL)
EKU_SERVER_AUTH = '1.3.6.1.5.5.7.3.1'
EKU_CLIENT_AUTH = '1.3.6.1.5.5.7.3.2'
EKU_CODE_SIGNING = '1.3.6.1.5.5.7.3.3'
EKU_EMAIL_PROTECTION = '1.3.6.1.5.5.7.3.4'
EKU_PKINIT_CLIENT_AUTH = '1.3.6.1.5.2.3.4'
EKU_PKINIT_KDC = '1.3.6.1.5.2.3.5'
EKU_ANY = '2.5.29.37.0'
EKU_PLACEHOLDER = '1.3.6.1.4.1.3319.6.10.16'
SAN_UPN = '1.3.6.1.4.1.311.20.2.3'
SAN_KRB5PRINCIPALNAME = '1.3.6.1.5.2.2'
class IPACertificate(crypto_x509.Certificate):
"""
A proxy class wrapping a python-cryptography certificate representation for
IPA purposes
"""
def __init__(self, cert, backend=None):
"""
:param cert: A python-cryptography Certificate object
:param backend: A python-cryptography Backend object
"""
self._cert = cert
self.backend = default_backend() if backend is None else backend()
# initialize the certificate fields
# we have to do it this way so that some systems don't explode since
# some field types encode-decoding is not strongly defined
self._subject = self.__get_der_field('subject')
self._issuer = self.__get_der_field('issuer')
self._serial_number = self.__get_der_field('serialNumber')
if self.version.name != 'v3':
raise ValueError('X.509 %s is not supported' %
self.version.name)
def __getstate__(self):
state = {
'_cert': self.public_bytes(Encoding.DER),
'_subject': self.subject_bytes,
'_issuer': self.issuer_bytes,
'_serial_number': self._serial_number,
}
return state
def __setstate__(self, state):
self._subject = state['_subject']
self._issuer = state['_issuer']
self._issuer = state['_serial_number']
self._cert = crypto_x509.load_der_x509_certificate(
state['_cert'], backend=default_backend())
def __eq__(self, other):
"""
Checks equality.
:param other: either cryptography.Certificate or IPACertificate or
bytes representing a DER-formatted certificate
"""
if (isinstance(other, (crypto_x509.Certificate, IPACertificate))):
return (self.public_bytes(Encoding.DER) ==
other.public_bytes(Encoding.DER))
elif isinstance(other, bytes):
return self.public_bytes(Encoding.DER) == other
else:
return False
def __ne__(self, other):
"""
Checks not equal.
"""
return not self.__eq__(other)
def __hash__(self):
"""
Computes a hash of the wrapped cryptography.Certificate.
"""
return hash(self._cert)
def __encode_extension(self, oid, critical, value):
# TODO: have another proxy for crypto_x509.Extension which would
# provide public_bytes on the top of what python-cryptography has
ext = rfc2459.Extension()
# TODO: this does not have to be so weird, pyasn1 now has codecs
# which are capable of providing python-native types
ext['extnID'] = univ.ObjectIdentifier(oid)
ext['critical'] = univ.Boolean(critical)
if pyasn1.__version__.startswith('0.3'):
# pyasn1 <= 0.3.7 needs explicit encoding
# see https://pagure.io/freeipa/issue/7685
value = encoder.encode(univ.OctetString(value))
ext['extnValue'] = univ.Any(value)
ext = encoder.encode(ext)
return ext
def __get_pyasn1_field(self, field):
"""
:returns: a field of the certificate in pyasn1 representation
"""
cert_bytes = self.tbs_certificate_bytes
cert = decoder.decode(cert_bytes, rfc2459.TBSCertificate())[0]
field = cert[field]
return field
def __get_der_field(self, field):
"""
:field: the name of the field of the certificate
:returns: bytes representing the value of a certificate field
"""
return encoder.encode(self.__get_pyasn1_field(field))
def public_bytes(self, encoding):
"""
Serializes the certificate to PEM or DER format.
"""
return self._cert.public_bytes(encoding)
def is_self_signed(self):
"""
:returns: True if this certificate is self-signed, False otherwise
"""
return self._cert.issuer == self._cert.subject
def fingerprint(self, algorithm):
"""
Counts fingerprint of the wrapped cryptography.Certificate
"""
return self._cert.fingerprint(algorithm)
@property
def serial_number(self):
return self._cert.serial_number
@property
def serial_number_bytes(self):
return self._serial_number
@property
def version(self):
return self._cert.version
@property
def subject(self):
return self._cert.subject
@property
def subject_bytes(self):
return self._subject
@property
def signature_hash_algorithm(self):
"""
Returns a HashAlgorithm corresponding to the type of the digest signed
in the certificate.
"""
return self._cert.signature_hash_algorithm
@property
def signature_algorithm_oid(self):
"""
Returns the ObjectIdentifier of the signature algorithm.
"""
return self._cert.signature_algorithm_oid
if hasattr(crypto_x509.Certificate, "signature_algorithm_parameters"):
# added in python-cryptography 41.0
@property
def signature_algorithm_parameters(self):
return self._cert.signature_algorithm_parameters
@property
def signature(self):
"""
Returns the signature bytes.
"""
return self._cert.signature
@property
def issuer(self):
return self._cert.issuer
@property
def issuer_bytes(self):
return self._issuer
@property
def not_valid_before(self):
return self._cert.not_valid_before.replace(tzinfo=datetime.timezone.utc)
@property
def not_valid_after(self):
return self._cert.not_valid_after.replace(tzinfo=datetime.timezone.utc)
if hasattr(crypto_x509.Certificate, "not_valid_before_utc"):
# added in python-cryptography 42.0.0
@property
def not_valid_before_utc(self):
return self._cert.not_valid_before_utc
@property
def not_valid_after_utc(self):
return self._cert.not_valid_after_utc
else:
@property
def not_valid_before_utc(self):
return self._cert.not_valid_before.replace(
tzinfo=datetime.timezone.utc
)
@property
def not_valid_after_utc(self):
return self._cert.not_valid_after.replace(
tzinfo=datetime.timezone.utc
)
if hasattr(crypto_x509.Certificate, "public_key_algorithm_oid"):
# added in python-cryptography 43.0.0
@property
def public_key_algorithm_oid(self):
"""
Returns the ObjectIdentifier of the public key.
"""
return self._cert.public_key_algorithm_oid
@property
def tbs_certificate_bytes(self):
return self._cert.tbs_certificate_bytes
@property
def extensions(self):
# TODO: own Extension and Extensions classes proxying
# python-cryptography
return self._cert.extensions
def public_key(self):
return self._cert.public_key()
@property
def public_key_info_bytes(self):
return self._cert.public_key().public_bytes(
encoding=Encoding.DER, format=PublicFormat.SubjectPublicKeyInfo)
@property
def extended_key_usage(self):
try:
ext_key_usage = self._cert.extensions.get_extension_for_oid(
crypto_x509.oid.ExtensionOID.EXTENDED_KEY_USAGE).value
except crypto_x509.ExtensionNotFound:
return None
return set(oid.dotted_string for oid in ext_key_usage)
@property
def extended_key_usage_bytes(self):
eku = self.extended_key_usage
if eku is None:
return None
ekurfc = rfc2459.ExtKeyUsageSyntax()
for i, oid in enumerate(sorted(eku)):
ekurfc[i] = univ.ObjectIdentifier(oid)
ekurfc = encoder.encode(ekurfc)
return self.__encode_extension('2.5.29.37', EKU_ANY not in eku, ekurfc)
@property
def san_general_names(self):
"""
Return SAN general names from a python-cryptography
certificate object. If the SAN extension is not present,
return an empty sequence.
Because python-cryptography does not yet provide a way to
handle unrecognised critical extensions (which may occur),
we must parse the certificate and extract the General Names.
For uniformity with other code, we manually construct values
of python-crytography GeneralName subtypes.
python-cryptography does not yet provide types for
ediPartyName or x400Address, so we drop these name types.
otherNames are NOT instantiated to more specific types where
the type is known. Use ``process_othernames`` to do that.
When python-cryptography can handle certs with unrecognised
critical extensions and implements ediPartyName and
x400Address, this function (and helpers) will be redundant
and should go away.
"""
gns = self.__pyasn1_get_san_general_names()
GENERAL_NAME_CONSTRUCTORS = {
'rfc822Name': lambda x: crypto_x509.RFC822Name(str(x)),
'dNSName': lambda x: crypto_x509.DNSName(str(x)),
'directoryName': _pyasn1_to_cryptography_directoryname,
'registeredID': _pyasn1_to_cryptography_registeredid,
'iPAddress': _pyasn1_to_cryptography_ipaddress,
'uniformResourceIdentifier':
lambda x: crypto_x509.UniformResourceIdentifier(
str(x)),
'otherName': _pyasn1_to_cryptography_othername,
}
result = []
for gn in gns:
gn_type = gn.getName()
if gn_type in GENERAL_NAME_CONSTRUCTORS:
result.append(
GENERAL_NAME_CONSTRUCTORS[gn_type](gn.getComponent()))
return result
def __pyasn1_get_san_general_names(self):
# pyasn1 returns None when the key is not present in the certificate
# but we need an iterable
extensions = self.__get_pyasn1_field('extensions') or []
OID_SAN = univ.ObjectIdentifier('2.5.29.17')
gns = []
for ext in extensions:
if ext['extnID'] == OID_SAN:
der = ext['extnValue']
if pyasn1.__version__.startswith('0.3'):
# pyasn1 <= 0.3.7 needs explicit unwrap of ANY container
# see https://pagure.io/freeipa/issue/7685
der = decoder.decode(der, asn1Spec=univ.OctetString())[0]
gns = decoder.decode(der, asn1Spec=rfc2459.SubjectAltName())[0]
break
return gns
@property
def san_a_label_dns_names(self):
gns = self.__pyasn1_get_san_general_names()
result = []
for gn in gns:
if gn.getName() == 'dNSName':
result.append(str(gn.getComponent()))
return result
def match_hostname(self, hostname):
# The caller is expected to catch any exceptions
match_cert = {}
match_cert['subject'] = match_subject = []
for rdn in self._cert.subject.rdns:
match_rdn = []
for ava in rdn:
if ava.oid == crypto_x509.oid.NameOID.COMMON_NAME:
match_rdn.append(('commonName', ava.value))
match_subject.append(match_rdn)
values = self.san_a_label_dns_names
if values:
match_cert['subjectAltName'] = match_san = []
for value in values:
match_san.append(('DNS', value))
ssl_match_hostname.match_hostname(
match_cert, DNSName(hostname).ToASCII()
)
# added in python-cryptography 38.0
@property
def tbs_precertificate_bytes(self):
return self._cert.tbs_precertificate_bytes
if hasattr(crypto_x509.Certificate, "verify_directly_issued_by"):
# added in python-cryptography 40.0
def verify_directly_issued_by(self, issuer):
return self._cert.verify_directly_issued_by(issuer)
def load_pem_x509_certificate(data):
"""
Load an X.509 certificate in PEM format.
:returns: a ``IPACertificate`` object.
:raises: ``ValueError`` if unable to load the certificate.
"""
return IPACertificate(
crypto_x509.load_pem_x509_certificate(data, backend=default_backend())
)
def load_der_x509_certificate(data):
"""
Load an X.509 certificate in DER format.
:returns: a ``IPACertificate`` object.
:raises: ``ValueError`` if unable to load the certificate.
"""
return IPACertificate(
crypto_x509.load_der_x509_certificate(data, backend=default_backend())
)
def load_unknown_x509_certificate(data):
"""
Only use this function when you can't be sure what kind of format does
your certificate have, e.g. input certificate files in installers
:returns: a ``IPACertificate`` object.
:raises: ``ValueError`` if unable to load the certificate.
"""
try:
return load_pem_x509_certificate(data)
except ValueError:
return load_der_x509_certificate(data)
def load_certificate_from_file(filename):
"""
Load a certificate from a PEM file.
Returns a python-cryptography ``Certificate`` object.
"""
with open(filename, mode='rb') as f:
return load_pem_x509_certificate(f.read())
def load_certificate_list(data):
"""
Load a certificate list from a sequence of concatenated PEMs.
Return a list of python-cryptography ``Certificate`` objects.
"""
certs = PEM_CERT_REGEX.findall(data)
return [load_pem_x509_certificate(cert[0]) for cert in certs]
def load_certificate_list_from_file(filename):
"""
Load a certificate list from a PEM file.
Return a list of python-cryptography ``Certificate`` objects.
"""
with open(filename, 'rb') as f:
return load_certificate_list(f.read())
def load_private_key_list(data, password=None):
"""
Load a private key list from a sequence of concatenated PEMs.
:param data: bytes containing the private keys
:param password: bytes, the password to encrypted keys in the bundle
:returns: List of python-cryptography ``PrivateKey`` objects
"""
crypto_backend = default_backend()
priv_keys = []
for match in re.finditer(PEM_PRIV_REGEX, data):
if re.search(b"ENCRYPTED", match.group()) is not None:
if password is None:
raise RuntimeError("Password is required for the encrypted "
"keys in the bundle.")
# Load private key as encrypted
priv_keys.append(
load_pem_private_key(match.group(), password,
backend=crypto_backend))
else:
priv_keys.append(
load_pem_private_key(match.group(), None,
backend=crypto_backend))
return priv_keys
def pkcs7_to_certs(data, datatype=PEM):
"""
Extract certificates from a PKCS #7 object.
:returns: a ``list`` of ``IPACertificate`` objects.
"""
if datatype == PEM:
match = re.match(
br'-----BEGIN PKCS7-----(.*?)-----END PKCS7-----',
data,
re.DOTALL)
if not match:
raise ValueError("not a valid PKCS#7 PEM")
data = base64.b64decode(match.group(1))
content_info, tail = decoder.decode(data, rfc2315.ContentInfo())
if tail:
raise ValueError("not a valid PKCS#7 message")
if content_info['contentType'] != rfc2315.signedData:
raise ValueError("not a PKCS#7 signed data message")
signed_data, tail = decoder.decode(bytes(content_info['content']),
rfc2315.SignedData())
if tail:
raise ValueError("not a valid PKCS#7 signed data message")
result = []
for certificate in signed_data['certificates']:
certificate = encoder.encode(certificate)
certificate = load_der_x509_certificate(certificate)
result.append(certificate)
return result
def validate_pem_x509_certificate(cert):
"""
Perform cert validation by trying to load it via python-cryptography.
"""
try:
load_pem_x509_certificate(cert)
except ValueError as e:
raise errors.CertificateFormatError(error=str(e))
def validate_der_x509_certificate(cert):
"""
Perform cert validation by trying to load it via python-cryptography.
"""
try:
load_der_x509_certificate(cert)
except ValueError as e:
raise errors.CertificateFormatError(error=str(e))
def write_certificate(cert, filename):
"""
Write the certificate to a file in PEM format.
:param cert: cryptograpy ``Certificate`` object
"""
try:
with open(filename, 'wb') as fp:
fp.write(cert.public_bytes(Encoding.PEM))
except (IOError, OSError) as e:
raise errors.FileError(reason=str(e))
def write_certificate_list(certs, filename, mode=None):
"""
Write a list of certificates to a file in PEM format.
:param certs: a list of IPACertificate objects to be written to a file
:param filename: a path to the file the certificates should be written into
"""
try:
with open(filename, 'wb') as f:
if mode is not None:
os.fchmod(f.fileno(), mode)
for cert in certs:
f.write(cert.public_bytes(Encoding.PEM))
except (IOError, OSError) as e:
raise errors.FileError(reason=str(e))
def write_pem_private_key(priv_key, filename, passwd=None):
"""
Write a private key to a file in PEM format. Will force 0x600 permissions
on file.
:param priv_key: cryptography ``PrivateKey`` object
:param passwd: ``bytes`` representing the password to store the
private key with
"""
if passwd is not None:
enc_alg = serialization.BestAvailableEncryption(passwd)
else:
enc_alg = serialization.NoEncryption()
try:
with open(filename, 'wb') as fp:
os.fchmod(fp.fileno(), 0o600)
fp.write(priv_key.private_bytes(
Encoding.PEM,
PrivateFormat.PKCS8,
encryption_algorithm=enc_alg))
except (IOError, OSError) as e:
raise errors.FileError(reason=str(e))
class _PrincipalName(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('name-type', univ.Integer().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))
),
namedtype.NamedType('name-string', univ.SequenceOf(char.GeneralString()).subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))
),
)
class _KRB5PrincipalName(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('realm', char.GeneralString().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))
),
namedtype.NamedType('principalName', _PrincipalName().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))
),
)
def _decode_krb5principalname(data):
principal = decoder.decode(data, asn1Spec=_KRB5PrincipalName())[0]
realm = (str(principal['realm']).replace('\\', '\\\\')
.replace('@', '\\@'))
name = principal['principalName']['name-string']
name = u'/'.join(str(n).replace('\\', '\\\\')
.replace('/', '\\/')
.replace('@', '\\@') for n in name)
name = u'%s@%s' % (name, realm)
return name
class KRB5PrincipalName(crypto_x509.general_name.OtherName):
def __init__(self, type_id, value):
super(KRB5PrincipalName, self).__init__(type_id, value)
self.name = _decode_krb5principalname(value)
class UPN(crypto_x509.general_name.OtherName):
def __init__(self, type_id, value):
super(UPN, self).__init__(type_id, value)
self.name = str(
decoder.decode(value, asn1Spec=char.UTF8String())[0])
OTHERNAME_CLASS_MAP = {
SAN_KRB5PRINCIPALNAME: KRB5PrincipalName,
SAN_UPN: UPN,
}
def process_othernames(gns):
"""
Process python-cryptography GeneralName values, yielding
OtherName values of more specific type if type is known.
"""
for gn in gns:
if isinstance(gn, crypto_x509.general_name.OtherName):
cls = OTHERNAME_CLASS_MAP.get(
gn.type_id.dotted_string,
crypto_x509.general_name.OtherName)
yield cls(gn.type_id, gn.value)
else:
yield gn
def _pyasn1_to_cryptography_directoryname(dn):
attrs = []
# Name is CHOICE { RDNSequence } (only one possibility)
for rdn in dn.getComponent():
for ava in rdn:
attr = crypto_x509.NameAttribute(
_pyasn1_to_cryptography_oid(ava['type']),
str(decoder.decode(ava['value'])[0])
)
attrs.append(attr)
return crypto_x509.DirectoryName(crypto_x509.Name(attrs))
def _pyasn1_to_cryptography_registeredid(oid):
return crypto_x509.RegisteredID(_pyasn1_to_cryptography_oid(oid))
def _pyasn1_to_cryptography_ipaddress(octet_string):
return crypto_x509.IPAddress(
ipaddress.ip_address(bytes(octet_string)))
def _pyasn1_to_cryptography_othername(on):
return crypto_x509.OtherName(
_pyasn1_to_cryptography_oid(on['type-id']),
bytes(on['value'])
)
def _pyasn1_to_cryptography_oid(oid):
return crypto_x509.ObjectIdentifier(str(oid))
def chunk(size, s):
"""Yield chunks of the specified size from the given string.
The input must be a multiple of the chunk size (otherwise
trailing characters are dropped).
Works on character strings only.
"""
return (u''.join(span) for span in six.moves.zip(*[iter(s)] * size))
def add_colons(s):
"""Add colons between each nibble pair in a hex string."""
return u':'.join(chunk(2, s))
def to_hex_with_colons(bs):
"""Convert bytes to a hex string with colons."""
return add_colons(binascii.hexlify(bs).decode('utf-8'))
class UTC(datetime.tzinfo):
ZERO = datetime.timedelta(0)
def tzname(self, dt):
return "UTC"
def utcoffset(self, dt):
return self.ZERO
def dst(self, dt):
return self.ZERO
def format_datetime(t):
if t.tzinfo is None:
t = t.replace(tzinfo=UTC())
return str(t.strftime("%a %b %d %H:%M:%S %Y %Z"))
class ExternalCAType(enum.Enum):
GENERIC = 'generic'
MS_CS = 'ms-cs'
class ExternalCAProfile:
"""
An external CA profile configuration. Currently the only
subclasses are for Microsoft CAs, for providing data in the
"Certificate Template" extension.
Constructing this class will actually return an instance of a
subclass.
Subclasses MUST set ``valid_for``.
"""
def __init__(self, s=None):
self.unparsed_input = s
# Which external CA types is the data valid for?
# A set of VALUES of the ExternalCAType enum.
valid_for = set()
def __new__(cls, s=None):
"""Construct the ExternalCAProfile value.
Return an instance of a subclass determined by
the format of the argument.
"""
# we are directly constructing a subclass; instantiate
# it and be done
if cls is not ExternalCAProfile:
return super(ExternalCAProfile, cls).__new__(cls)
# construction via the base class; therefore the string
# argument is required, and is used to determine which
# subclass to construct
if s is None:
raise ValueError('string argument is required')
parts = s.split(':')
try:
# Is the first part on OID?
_oid = univ.ObjectIdentifier(parts[0])
# It is; construct a V2 template
return MSCSTemplateV2.__new__(MSCSTemplateV2, s)
except pyasn1.error.PyAsn1Error:
# It is not an OID; treat as a template name
return MSCSTemplateV1.__new__(MSCSTemplateV1, s)
def __getstate__(self):
return self.unparsed_input
def __setstate__(self, state):
# explicitly call __init__ method to initialise object
self.__init__(state)
class MSCSTemplate(ExternalCAProfile):
"""
An Microsoft AD-CS Template specifier.
Subclasses MUST set ext_oid.
Subclass constructors MUST set asn1obj.
"""
valid_for = set([ExternalCAType.MS_CS.value])
ext_oid = None # extension OID, as a Python str
asn1obj = None # unencoded extension data
def get_ext_data(self):
"""Return DER-encoded extension data."""
return encoder.encode(self.asn1obj)
class MSCSTemplateV1(MSCSTemplate):
"""
A v1 template specifier, per
https://msdn.microsoft.com/en-us/library/cc250011.aspx.
::
CertificateTemplateName ::= SEQUENCE {
Name UTF8String
}
But note that a bare BMPString is used in practice.
"""
ext_oid = "1.3.6.1.4.1.311.20.2"
def __init__(self, s):
super(MSCSTemplateV1, self).__init__(s)
parts = s.split(':')
if len(parts) > 1:
raise ValueError(
"Cannot specify certificate template version when using name.")
self.asn1obj = char.BMPString(str(parts[0]))
class MSCSTemplateV2(MSCSTemplate):
"""
A v2 template specifier, per
https://msdn.microsoft.com/en-us/library/windows/desktop/aa378274(v=vs.85).aspx
::
CertificateTemplate ::= SEQUENCE {
templateID EncodedObjectID,
templateMajorVersion TemplateVersion,
templateMinorVersion TemplateVersion OPTIONAL
}
TemplateVersion ::= INTEGER (0..4294967295)
"""
ext_oid = "1.3.6.1.4.1.311.21.7"
@staticmethod
def check_version_in_range(desc, n):
if n < 0 or n >= 2**32:
raise ValueError(
"Template {} version must be in range 0..4294967295"
.format(desc))
def __init__(self, s):
super(MSCSTemplateV2, self).__init__(s)
parts = s.split(':')
obj = CertificateTemplateV2()
if len(parts) < 2 or len(parts) > 3:
raise ValueError(
"Incorrect template specification; required format is: "
"<oid>:<majorVersion>[:<minorVersion>]")
try:
obj['templateID'] = univ.ObjectIdentifier(parts[0])
major = int(parts[1])
self.check_version_in_range("major", major)
obj['templateMajorVersion'] = major
if len(parts) > 2:
minor = int(parts[2])
self.check_version_in_range("minor", minor)
obj['templateMinorVersion'] = int(parts[2])
except pyasn1.error.PyAsn1Error:
raise ValueError("Could not parse certificate template specifier.")
self.asn1obj = obj
class CertificateTemplateV2(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('templateID', univ.ObjectIdentifier()),
namedtype.NamedType('templateMajorVersion', univ.Integer()),
namedtype.OptionalNamedType('templateMinorVersion', univ.Integer())
)
| 30,484
|
Python
|
.py
| 761
| 31.997372
| 89
| 0.640054
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,672
|
sysrestore.py
|
freeipa_freeipa/ipalib/sysrestore.py
|
# Authors: Mark McLoughlin <markmc@redhat.com>
#
# Copyright (C) 2007 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# This module provides a very simple API which allows
# ipa-xxx-install --uninstall to restore certain
# parts of the system configuration to the way it was
# before ipa-server-install was first run
from __future__ import absolute_import
import logging
import os
import os.path
import shutil
import random
from hashlib import sha256
import six
# pylint: disable=import-error
if six.PY3:
# The SafeConfigParser class has been renamed to ConfigParser in Py3
from configparser import ConfigParser as SafeConfigParser
else:
from ConfigParser import SafeConfigParser
# pylint: enable=import-error
from ipaplatform.tasks import tasks
from ipaplatform.paths import paths
if six.PY3:
unicode = str
logger = logging.getLogger(__name__)
SYSRESTORE_PATH = paths.TMP
SYSRESTORE_INDEXFILE = "sysrestore.index"
SYSRESTORE_STATEFILE = "sysrestore.state"
class FileStore:
"""Class for handling backup and restore of files"""
def __init__(self, path=SYSRESTORE_PATH, index_file=SYSRESTORE_INDEXFILE):
"""Create a _StoreFiles object, that uses @path as the
base directory.
The file @path/sysrestore.index is used to store information
about the original location of the saved files.
"""
self._path = path
self._index = os.path.join(self._path, index_file)
self.random = random.Random()
self.files = {}
self._load()
def _load(self):
"""Load the file list from the index file. @files will
be an empty dictionary if the file doesn't exist.
"""
logger.debug("Loading Index file from '%s'", self._index)
self.files = {}
p = SafeConfigParser(interpolation=None)
p.optionxform = str
p.read(self._index)
for section in p.sections():
if section == "files":
for (key, value) in p.items(section):
self.files[key] = value
def save(self):
"""Save the file list to @_index. If @files is an empty
dict, then @_index should be removed.
"""
logger.debug("Saving Index File to '%s'", self._index)
if len(self.files) == 0:
logger.debug(" -> no files, removing file")
if os.path.exists(self._index):
os.remove(self._index)
return
p = SafeConfigParser(interpolation=None)
p.optionxform = str
p.add_section('files')
for (key, value) in self.files.items():
p.set('files', key, str(value))
with open(self._index, "w") as f:
p.write(f)
def backup_file(self, path):
"""Create a copy of the file at @path - as long as an exact copy
does not already exist - which will be restored to its
original location by restore_files().
"""
logger.debug("Backing up system configuration file '%s'", path)
if not os.path.isabs(path):
raise ValueError("Absolute path required")
if not os.path.isfile(path):
logger.debug(" -> Not backing up - '%s' doesn't exist", path)
return
_reldir, backupfile = os.path.split(path)
with open(path, 'rb') as f:
cont_hash = sha256(f.read()).hexdigest()
filename = "{hexhash}-{bcppath}".format(hexhash=cont_hash,
bcppath=backupfile)
backup_path = os.path.join(self._path, filename)
if os.path.exists(backup_path):
logger.debug(" -> Not backing up - already have a copy of '%s'",
path)
return
shutil.copy2(path, backup_path)
stat = os.stat(path)
template = '{stat.st_mode},{stat.st_uid},{stat.st_gid},{path}'
self.files[filename] = template.format(stat=stat, path=path)
self.save()
def has_file(self, path):
"""Checks whether file at @path was added to the file store
Returns #True if the file exists in the file store, #False otherwise
"""
result = False
for _key, value in self.files.items():
_mode, _uid, _gid, filepath = value.split(',', 3)
if (filepath == path):
result = True
break
return result
def restore_file(self, path, new_path=None):
"""Restore the copy of a file at @path to its original
location and delete the copy.
Takes optional parameter @new_path which specifies the
location where the file is to be restored.
Returns #True if the file was restored, #False if there
was no backup file to restore
"""
if new_path is None:
logger.debug("Restoring system configuration file '%s'",
path)
else:
logger.debug("Restoring system configuration file '%s' to '%s'",
path, new_path)
if not os.path.isabs(path):
raise ValueError("Absolute path required")
if new_path is not None and not os.path.isabs(new_path):
raise ValueError("Absolute new path required")
mode = None
uid = None
gid = None
filename = None
for (key, value) in self.files.items():
(mode,uid,gid,filepath) = value.split(',', 3)
if (filepath == path):
filename = key
break
if not filename:
raise ValueError("No such file name in the index")
backup_path = os.path.join(self._path, filename)
if not os.path.exists(backup_path):
logger.debug(" -> Not restoring - '%s' doesn't exist",
backup_path)
return False
if new_path is not None:
path = new_path
shutil.copy(backup_path, path) # SELinux needs copy
os.remove(backup_path)
os.chown(path, int(uid), int(gid))
os.chmod(path, int(mode))
tasks.restore_context(path)
del self.files[filename]
self.save()
return True
def restore_all_files(self):
"""Restore the files in the inbdex to their original
location and delete the copy.
Returns #True if the file was restored, #False if there
was no backup file to restore
"""
if len(self.files) == 0:
return False
for (filename, value) in self.files.items():
(mode,uid,gid,path) = value.split(',', 3)
backup_path = os.path.join(self._path, filename)
if not os.path.exists(backup_path):
logger.debug(" -> Not restoring - '%s' doesn't exist",
backup_path)
continue
shutil.copy(backup_path, path) # SELinux needs copy
os.remove(backup_path)
os.chown(path, int(uid), int(gid))
os.chmod(path, int(mode))
tasks.restore_context(path)
# force file to be deleted
self.files = {}
self.save()
return True
def has_files(self):
"""Return True or False if there are any files in the index
Can be used to determine if a program is configured.
"""
return len(self.files) > 0
def untrack_file(self, path):
"""Remove file at path @path from list of backed up files.
Does not remove any files from the filesystem.
Returns #True if the file was untracked, #False if there
was no backup file to restore
"""
logger.debug("Untracking system configuration file '%s'", path)
if not os.path.isabs(path):
raise ValueError("Absolute path required")
filename = None
for (key, value) in self.files.items():
_mode, _uid, _gid, filepath = value.split(',', 3)
if (filepath == path):
filename = key
break
if not filename:
raise ValueError("No such file name in the index")
backup_path = os.path.join(self._path, filename)
if not os.path.exists(backup_path):
logger.debug(" -> Not restoring - '%s' doesn't exist",
backup_path)
return False
try:
os.unlink(backup_path)
except Exception as e:
logger.error('Error removing %s: %s', backup_path, str(e))
del self.files[filename]
self.save()
return True
class StateFile:
"""A metadata file for recording system state which can
be backed up and later restored.
StateFile gets reloaded every time to prevent loss of information
recorded by child processes. But we do not solve concurrency
because there is no need for it right now.
The format is something like:
[httpd]
running=True
enabled=False
"""
def __init__(self, path=SYSRESTORE_PATH, state_file=SYSRESTORE_STATEFILE):
"""Create a StateFile object, loading from @path.
The dictionary @modules, a member of the returned object,
is where the state can be modified. @modules is indexed
using a module name to return another dictionary containing
key/value pairs with the saved state of that module.
The keys in these latter dictionaries are arbitrary strings
and the values may either be strings or booleans.
"""
self._path = os.path.join(path, state_file)
self.modules = {}
self._load()
def _load(self):
"""Load the modules from the file @_path. @modules will
be an empty dictionary if the file doesn't exist.
"""
logger.debug("Loading StateFile from '%s'", self._path)
self.modules = {}
p = SafeConfigParser(interpolation=None)
p.optionxform = str
p.read(self._path)
for module in p.sections():
self.modules[module] = {}
for (key, value) in p.items(module):
if value == str(True):
value = True
elif value == str(False):
value = False
self.modules[module][key] = value
def save(self):
"""Save the modules to @_path. If @modules is an empty
dict, then @_path should be removed.
"""
logger.debug("Saving StateFile to '%s'", self._path)
for module in list(self.modules):
if len(self.modules[module]) == 0:
del self.modules[module]
if len(self.modules) == 0:
logger.debug(" -> no modules, removing file")
if os.path.exists(self._path):
os.remove(self._path)
return
p = SafeConfigParser(interpolation=None)
p.optionxform = str
for module, vals in self.modules.items():
p.add_section(module)
for (key, value) in vals.items():
p.set(module, key, str(value))
with open(self._path, "w") as f:
p.write(f)
def backup_state(self, module, key, value):
"""Backup an item of system state from @module, identified
by the string @key and with the value @value. @value may be
a string or boolean.
"""
if not isinstance(value, (str, bool, unicode)):
raise ValueError("Only strings, booleans or unicode strings "
"are supported")
self._load()
if module not in self.modules:
self.modules[module] = {}
if key not in self.modules:
self.modules[module][key] = value
self.save()
def get_state(self, module, key):
"""Return the value of an item of system state from @module,
identified by the string @key.
If the item doesn't exist, #None will be returned, otherwise
the original string or boolean value is returned.
"""
self._load()
if module not in self.modules:
return None
return self.modules[module].get(key, None)
def delete_state(self, module, key):
"""Delete system state from @module, identified by the string
@key.
If the item doesn't exist, no change is done.
"""
self._load()
try:
del self.modules[module][key]
except KeyError:
pass
else:
self.save()
def restore_state(self, module, key):
"""Return the value of an item of system state from @module,
identified by the string @key, and remove it from the backed
up system state.
If the item doesn't exist, #None will be returned, otherwise
the original string or boolean value is returned.
"""
value = self.get_state(module, key)
if value is not None:
self.delete_state(module, key)
return value
def has_state(self, module):
"""Return True or False if there is any state stored for @module.
Can be used to determine if a service is configured.
"""
return module in self.modules
| 13,917
|
Python
|
.py
| 341
| 31.228739
| 78
| 0.602452
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,673
|
aci.py
|
freeipa_freeipa/ipalib/aci.py
|
# Authors:
# Rob Crittenden <rcritten@redhat.com>
#
# Copyright (C) 2008 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import shlex
import re
import six
# The Python re module doesn't do nested parenthesis
# Break the ACI into 3 pieces: target, name, permissions/bind_rules
ACIPat = re.compile(r'\(version\s+3.0\s*;\s*ac[li]\s+\"([^\"]*)\"\s*;'
r'\s*(.*);\s*\)', re.UNICODE)
# Break the permissions/bind_rules out
PermPat = re.compile(r'(\w+)\s*\(([^()]*)\)\s*(.*)', re.UNICODE)
# Break the bind rule out
BindPat = re.compile(r'\(?([a-zA-Z0-9;\.]+)\s*(\!?=)\s*\"(.*)\"\)?',
re.UNICODE)
ACTIONS = ["allow", "deny"]
PERMISSIONS = ["read", "write", "add", "delete", "search", "compare",
"selfwrite", "proxy", "all"]
class ACI:
"""
Holds the basic data for an ACI entry, as stored in the cn=accounts
entry in LDAP. Has methods to parse an ACI string and export to an
ACI String.
"""
__hash__ = None
def __init__(self,acistr=None):
self.name = None
self.source_group = None
self.dest_group = None
self.orig_acistr = acistr
self.target = {}
self.action = "allow"
self.permissions = ["write"]
self.bindrule = {}
if acistr is not None:
self._parse_acistr(acistr)
def __getitem__(self,key):
"""Fake getting attributes by key for sorting"""
if key == 0:
return self.name
if key == 1:
return self.source_group
if key == 2:
return self.dest_group
raise TypeError("Unknown key value %s" % key)
def __repr__(self):
"""An alias for export_to_string()"""
return self.export_to_string()
def export_to_string(self):
"""Output a Directory Server-compatible ACI string"""
self.validate()
aci = ""
for t, v in sorted(self.target.items()):
op = v['operator']
if type(v['expression']) in (tuple, list):
target = ""
for l in self._unique_list(v['expression']):
target = target + l + " || "
target = target[:-4]
aci = aci + "(%s %s \"%s\")" % (t, op, target)
else:
aci = aci + "(%s %s \"%s\")" % (t, op, v['expression'])
aci = aci + "(version 3.0;acl \"%s\";%s (%s) %s %s \"%s\"" % (self.name, self.action, ",".join(self.permissions), self.bindrule['keyword'], self.bindrule['operator'], self.bindrule['expression']) + ";)"
return aci
def _unique_list(self, l):
"""
A set() doesn't maintain order so make a list unique ourselves.
The number of entries in our lists are always going to be
relatively low and this code will be called infrequently
anyway so the overhead will be small.
"""
unique = []
for item in l:
if item not in unique:
unique.append(item)
return unique
def _remove_quotes(self, s):
# Remove leading and trailing quotes
if s.startswith('"'):
s = s[1:]
if s.endswith('"'):
s = s[:-1]
return s
def _parse_target(self, aci):
if six.PY2:
aci = aci.encode('utf-8')
lexer = shlex.shlex(aci)
lexer.wordchars = lexer.wordchars + "."
var = False
op = "="
for token in lexer:
# We should have the form (a = b)(a = b)...
if token == "(":
var = next(lexer).strip()
operator = next(lexer)
if operator not in ("=", "!="):
# Peek at the next char before giving up
operator = operator + next(lexer)
if operator not in ("=", "!="):
raise SyntaxError("No operator in target, got '%s'" % operator)
op = operator
val = next(lexer).strip()
val = self._remove_quotes(val)
end = next(lexer)
if end != ")":
raise SyntaxError('No end parenthesis in target, got %s' % end)
if var == 'targetattr':
# Make a string of the form attr || attr || ... into a list
t = re.split(r'[^a-zA-Z0-9;\*]+', val)
self.target[var] = {}
self.target[var]['operator'] = op
self.target[var]['expression'] = t
else:
self.target[var] = {}
self.target[var]['operator'] = op
self.target[var]['expression'] = val
def _parse_acistr(self, acistr):
vstart = acistr.find('version 3.0')
if vstart < 0:
raise SyntaxError("malformed ACI, unable to find version %s" % acistr)
acimatch = ACIPat.match(acistr[vstart-1:])
if not acimatch or len(acimatch.groups()) < 2:
raise SyntaxError("malformed ACI, match for version and bind rule failed %s" % acistr)
self._parse_target(acistr[:vstart-1])
self.name = acimatch.group(1)
bindperms = PermPat.match(acimatch.group(2))
if not bindperms or len(bindperms.groups()) < 3:
raise SyntaxError("malformed ACI, permissions match failed %s" % acistr)
self.action = bindperms.group(1)
self.permissions = self._unique_list(
bindperms.group(2).replace(' ','').split(',')
)
self.set_bindrule(bindperms.group(3))
def validate(self):
"""Do some basic verification that this will produce a
valid LDAP ACI.
returns True if valid
"""
if type(self.permissions) not in (tuple, list):
raise SyntaxError("permissions must be a list")
for p in self.permissions:
if p.lower() not in PERMISSIONS:
raise SyntaxError("invalid permission: '%s'" % p)
if not self.name:
raise SyntaxError("name must be set")
if not isinstance(self.name, str):
raise SyntaxError("name must be a string")
if not isinstance(self.target, dict) or len(self.target) == 0:
raise SyntaxError("target must be a non-empty dictionary")
if not isinstance(self.bindrule, dict):
raise SyntaxError("bindrule must be a dictionary")
if not self.bindrule.get('operator') or not self.bindrule.get('keyword') or not self.bindrule.get('expression'):
raise SyntaxError("bindrule is missing a component")
return True
def set_permissions(self, permissions):
if type(permissions) not in (tuple, list):
permissions = [permissions]
self.permissions = self._unique_list(permissions)
def set_target_filter(self, filter, operator="="):
self.target['targetfilter'] = {}
if not filter.startswith("("):
filter = "(" + filter + ")"
self.target['targetfilter']['expression'] = filter
self.target['targetfilter']['operator'] = operator
def set_target_attr(self, attr, operator="="):
if not attr:
if 'targetattr' in self.target:
del self.target['targetattr']
return
if type(attr) not in (tuple, list):
attr = [attr]
self.target['targetattr'] = {}
self.target['targetattr']['expression'] = self._unique_list(attr)
self.target['targetattr']['operator'] = operator
def set_target(self, target, operator="="):
assert target.startswith("ldap:///")
self.target['target'] = {}
self.target['target']['expression'] = target
self.target['target']['operator'] = operator
def set_bindrule(self, bindrule):
if bindrule.startswith('(') != bindrule.endswith(')'):
raise SyntaxError("non-matching parentheses in bindrule")
match = BindPat.match(bindrule)
if not match or len(match.groups()) < 3:
raise SyntaxError("malformed bind rule")
self.set_bindrule_keyword(match.group(1))
self.set_bindrule_operator(match.group(2))
self.set_bindrule_expression(match.group(3).replace('"',''))
def set_bindrule_keyword(self, keyword):
self.bindrule['keyword'] = keyword
def set_bindrule_operator(self, operator):
self.bindrule['operator'] = operator
def set_bindrule_expression(self, expression):
self.bindrule['expression'] = expression
def isequal(self, b):
"""
Compare the current ACI to another one to see if they are
the same.
returns True if equal, False if not.
"""
assert isinstance(b, ACI)
try:
if self.name.lower() != b.name.lower():
return False
if set(self.permissions) != set(b.permissions):
return False
if self.bindrule.get('keyword') != b.bindrule.get('keyword'):
return False
if self.bindrule.get('operator') != b.bindrule.get('operator'):
return False
if self.bindrule.get('expression') != b.bindrule.get('expression'):
return False
if self.target.get('targetfilter',{}).get('expression') != b.target.get('targetfilter',{}).get('expression'):
return False
if self.target.get('targetfilter',{}).get('operator') != b.target.get('targetfilter',{}).get('operator'):
return False
if set(self.target.get('targetattr', {}).get('expression', ())) != set(b.target.get('targetattr',{}).get('expression', ())):
return False
if self.target.get('targetattr',{}).get('operator') != b.target.get('targetattr',{}).get('operator'):
return False
if self.target.get('target',{}).get('expression') != b.target.get('target',{}).get('expression'):
return False
if self.target.get('target',{}).get('operator') != b.target.get('target',{}).get('operator'):
return False
except Exception:
# If anything throws up then they are not equal
return False
# We got this far so lets declare them the same
return True
__eq__ = isequal
def __ne__(self, b):
return not self == b
| 11,029
|
Python
|
.py
| 247
| 34.562753
| 210
| 0.571282
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,674
|
misc.py
|
freeipa_freeipa/ipalib/misc.py
|
#
# Copyright (C) 2016 FreeIPA Contributors see COPYING for license
#
import re
from ipalib import LocalOrRemote, _, ngettext
from ipalib.output import Output, summary
from ipalib import Flag
from ipalib.plugable import Registry
register = Registry()
# FIXME: We should not let env return anything in_server
# when mode == 'production'. This would allow an attacker to see the
# configuration of the server, potentially revealing compromising
# information. However, it's damn handy for testing/debugging.
class env(LocalOrRemote):
__doc__ = _('Show environment variables.')
msg_summary = _('%(count)d variables')
takes_args = (
'variables*',
)
takes_options = LocalOrRemote.takes_options + (
Flag(
'all',
cli_name='all',
doc=_('retrieve and print all attributes from the server. '
'Affects command output.'),
exclude='webui',
flags=['no_option', 'no_output'],
default=True,
),
)
has_output = (
Output(
'result',
type=dict,
doc=_('Dictionary mapping variable name to value'),
),
Output(
'total',
type=int,
doc=_('Total number of variables env (>= count)'),
flags=['no_display'],
),
Output(
'count',
type=int,
doc=_('Number of variables returned (<= total)'),
flags=['no_display'],
),
summary,
)
def __find_keys(self, variables):
keys = set()
for query in variables:
if '*' in query:
pat = re.compile(query.replace('*', '.*') + '$')
for key in self.env:
if pat.match(key):
keys.add(key)
elif query in self.env:
keys.add(query)
return keys
def execute(self, variables=None, **options):
if variables is None:
keys = self.env
else:
keys = self.__find_keys(variables)
ret = dict(
result=dict(
(key, self.env[key]) for key in keys
),
count=len(keys),
total=len(self.env),
)
if len(keys) > 1:
ret['summary'] = self.msg_summary % ret
else:
ret['summary'] = None
return ret
class plugins(LocalOrRemote):
__doc__ = _('Show all loaded plugins.')
msg_summary = ngettext(
'%(count)d plugin loaded', '%(count)d plugins loaded', 0
)
takes_options = LocalOrRemote.takes_options + (
Flag(
'all',
cli_name='all',
doc=_('retrieve and print all attributes from the server. '
'Affects command output.'),
exclude='webui',
flags=['no_option', 'no_output'],
default=True,
),
)
has_output = (
Output('result', dict, 'Dictionary mapping plugin names to bases'),
Output(
'count',
type=int,
doc=_('Number of plugins loaded'),
),
summary,
)
def execute(self, **options):
result = {}
for namespace in self.api:
for plugin in self.api[namespace]():
cls = type(plugin)
key = '{}.{}'.format(cls.__module__, cls.__name__)
result.setdefault(key, []).append(namespace)
return dict(
result=result,
count=len(result),
)
| 3,583
|
Python
|
.py
| 114
| 21.675439
| 75
| 0.522457
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,675
|
dns.py
|
freeipa_freeipa/ipalib/dns.py
|
# Authors:
# Martin Kosek <mkosek@redhat.com>
# Pavel Zuna <pzuna@redhat.com>
#
# Copyright (C) 2010 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
import re
from ipalib import errors
# dnsrecord param name formats
record_name_format = '%srecord'
part_name_format = "%s_part_%s"
extra_name_format = "%s_extra_%s"
def get_record_rrtype(name):
match = re.match('([^_]+)record$', name)
if match is None:
return None
return match.group(1).upper()
def get_part_rrtype(name):
match = re.match('([^_]+)_part_.*$', name)
if match is None:
return None
return match.group(1).upper()
def get_extra_rrtype(name):
match = re.match('([^_]+)_extra_.*$', name)
if match is None:
return None
return match.group(1).upper()
def has_cli_options(cmd, options, no_option_msg, allow_empty_attrs=False):
sufficient = ('setattr', 'addattr', 'delattr', 'rename', 'dnsttl')
if any(k in options for k in sufficient):
return
has_options = False
for attr in options.keys():
obj_params = [n for n in cmd.params
if get_record_rrtype(n) or get_part_rrtype(n)]
if attr in obj_params:
if options[attr] or allow_empty_attrs:
has_options = True
break
if not has_options:
raise errors.OptionError(no_option_msg)
def get_rrparam_from_part(cmd, part_name):
"""
Get an instance of DNSRecord parameter that has part_name as its part.
If such parameter is not found, None is returned
:param part_name Part parameter name
"""
try:
param = cmd.params[part_name]
rrtype = (get_part_rrtype(param.name) or
get_extra_rrtype(param.name))
if not rrtype:
return None
# All DNS record part or extra parameters contain a name of its
# parent RR parameter in its hint attribute
rrparam = cmd.params[record_name_format % rrtype.lower()]
except (KeyError, AttributeError):
return None
return rrparam
def iterate_rrparams_by_parts(cmd, kw, skip_extra=False):
"""
Iterates through all DNSRecord instances that has at least one of its
parts or extra options in given dictionary. It returns the DNSRecord
instance only for the first occurence of part/extra option.
:param kw Dictionary with DNS record parts or extra options
:param skip_extra Skip DNS record extra options, yield only DNS records
with a real record part
"""
processed = []
for opt in kw:
rrparam = get_rrparam_from_part(cmd, opt)
if rrparam is None:
continue
if skip_extra and get_extra_rrtype(opt):
continue
if rrparam.name not in processed:
processed.append(rrparam.name)
yield rrparam
| 3,551
|
Python
|
.py
| 92
| 32.576087
| 75
| 0.672399
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,676
|
certmonger.py
|
freeipa_freeipa/ipalib/install/certmonger.py
|
# Authors: Rob Crittenden <rcritten@redhat.com>
# David Kupka <dkupka@redhat.com>
#
# Copyright (C) 2010 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Some certmonger functions, mostly around updating the request file.
# This is used so we can add tracking to the Apache and 389-ds
# server certificates created during the IPA server installation.
from __future__ import print_function, absolute_import
import logging
import os
import time
import dbus
import shlex
import subprocess
import tempfile
from ipalib import api
from ipalib.constants import CA_DBUS_TIMEOUT
from ipapython.dn import DN
from ipapython.ipautil import Sleeper
from ipaplatform.paths import paths
from ipaplatform import services
logger = logging.getLogger(__name__)
DBUS_CM_PATH = '/org/fedorahosted/certmonger'
DBUS_CM_IF = 'org.fedorahosted.certmonger'
DBUS_CM_NAME = 'org.fedorahosted.certmonger'
DBUS_CM_REQUEST_IF = 'org.fedorahosted.certmonger.request'
DBUS_CM_CA_IF = 'org.fedorahosted.certmonger.ca'
DBUS_PROPERTY_IF = 'org.freedesktop.DBus.Properties'
# These properties, if encountered in search criteria, result in a
# subset test instead of equality test.
ARRAY_PROPERTIES = ['template-hostname']
"""
Certmonger helper routines.
Search criteria
---------------
Functions that look up requests take a ``dict`` of search criteria.
In general, the key is a name of a property in the request property
interface. But there are some special cases with different
behaviour:
``nickname``
a.k.a. "request ID". If given, only the specified request is
retrieved (if it exists), and it is still tested against other
criteria.
``ca-name``
Test equality against the nickname of the CA (a.k.a. request
helper) object for the request.
``template-hostname``
Must be an iterable of DNS names. Tests that the given values
are a subset of the values defined on the Certmonger request.
"""
class _cm_dbus_object:
"""
Auxiliary class for convenient DBus object handling.
"""
def __init__(self, bus, parent, object_path, object_dbus_interface,
parent_dbus_interface=None, property_interface=False):
"""
bus - DBus bus object, result of dbus.SystemBus() or dbus.SessionBus()
Object is accesible over this DBus bus instance.
object_path - path to requested object on DBus bus
object_dbus_interface
parent_dbus_interface
property_interface - create DBus property interface? True or False
"""
if bus is None or object_path is None or object_dbus_interface is None:
raise RuntimeError(
"bus, object_path and dbus_interface must not be None.")
if parent_dbus_interface is None:
parent_dbus_interface = object_dbus_interface
self.bus = bus
self.parent = parent
self.path = object_path
self.obj_dbus_if = object_dbus_interface
self.parent_dbus_if = parent_dbus_interface
self.obj = bus.get_object(parent_dbus_interface, object_path)
self.obj_if = dbus.Interface(self.obj, object_dbus_interface)
if property_interface:
self.prop_if = dbus.Interface(self.obj, DBUS_PROPERTY_IF)
class _certmonger(_cm_dbus_object):
"""
Create a connection to certmonger.
By default use SystemBus. When not available use private connection
over Unix socket.
This solution is really ugly and should be removed as soon as DBus
SystemBus is available at system install time.
"""
timeout = 300
def _start_private_conn(self):
sock_filename = os.path.join(tempfile.mkdtemp(), 'certmonger')
self._proc = subprocess.Popen([paths.CERTMONGER, '-n', '-L', '-P',
sock_filename])
for _t in range(0, self.timeout, 5):
if os.path.exists(sock_filename):
return "unix:path=%s" % sock_filename
time.sleep(5)
self._stop_private_conn()
raise RuntimeError("Failed to start certmonger: Timed out")
def _stop_private_conn(self):
if self._proc:
retcode = self._proc.poll()
if retcode is not None:
return
self._proc.terminate()
for _t in range(0, self.timeout, 5):
retcode = self._proc.poll()
if retcode is not None:
return
time.sleep(5)
logger.error("Failed to stop certmonger.")
def __del__(self):
self._stop_private_conn()
def __init__(self):
self._proc = None
self._bus = None
try:
self._bus = dbus.SystemBus()
except dbus.DBusException as e:
err_name = e.get_dbus_name()
if err_name not in ['org.freedesktop.DBus.Error.NoServer',
'org.freedesktop.DBus.Error.FileNotFound']:
logger.error("Failed to connect to certmonger over "
"SystemBus: %s", e)
raise
try:
self._private_sock = self._start_private_conn()
self._bus = dbus.connection.Connection(self._private_sock)
except dbus.DBusException as e:
logger.error("Failed to connect to certmonger over "
"private socket: %s", e)
raise
else:
try:
self._bus.get_name_owner(DBUS_CM_NAME)
except dbus.DBusException:
try:
services.knownservices.certmonger.start()
except Exception as e:
logger.error("Failed to start certmonger: %s", e)
raise
for _t in range(0, self.timeout, 5):
try:
self._bus.get_name_owner(DBUS_CM_NAME)
break
except dbus.DBusException:
pass
time.sleep(5)
raise RuntimeError('Failed to start certmonger')
super(_certmonger, self).__init__(self._bus, None, DBUS_CM_PATH,
DBUS_CM_IF)
def _get_requests(criteria):
"""
Get all requests that matches the provided criteria.
:param criteria: dict of criteria; see module doc for details
"""
if not isinstance(criteria, dict):
raise TypeError('"criteria" must be dict.')
cm = _certmonger()
requests = []
requests_paths = []
if 'nickname' in criteria:
request_path = cm.obj_if.find_request_by_nickname(criteria['nickname'])
if request_path:
requests_paths = [request_path]
else:
requests_paths = cm.obj_if.get_requests()
for request_path in requests_paths:
request = _cm_dbus_object(cm.bus, cm, request_path, DBUS_CM_REQUEST_IF,
DBUS_CM_IF, True)
for criterion in criteria:
if criterion == 'ca-name':
ca_path = request.obj_if.get_ca()
if ca_path is None:
raise RuntimeError("certmonger CA '%s' is not defined" %
criteria.get('ca-name'))
ca = _cm_dbus_object(cm.bus, cm, ca_path, DBUS_CM_CA_IF,
DBUS_CM_IF)
if criteria[criterion] != ca.obj_if.get_nickname():
break
elif criterion in ARRAY_PROPERTIES:
# perform subset test
expect = set(criteria[criterion])
got = request.prop_if.Get(DBUS_CM_REQUEST_IF, criterion)
if not expect.issubset(got):
break
else:
value = request.prop_if.Get(DBUS_CM_REQUEST_IF, criterion)
if criteria[criterion] != value:
break
else:
requests.append(request)
return requests
def _get_request(criteria):
"""
Find request that matches criteria. Return ``None`` if no match.
Raise ``RuntimeError`` if there is more than one matching request.
:param criteria: dict of criteria; see module doc for details
"""
requests = _get_requests(criteria)
if len(requests) == 0:
return None
elif len(requests) == 1:
return requests[0]
else:
raise RuntimeError("Criteria expected to be met by 1 request, got %s."
% len(requests))
def get_request_value(request_id, directive):
"""
Get property of request.
"""
try:
request = _get_request(dict(nickname=request_id))
except RuntimeError as e:
logger.error('Failed to get request: %s', e)
raise
if request:
if directive == 'ca-name':
ca_path = request.obj_if.get_ca()
ca = _cm_dbus_object(request.bus, request, ca_path, DBUS_CM_CA_IF,
DBUS_CM_IF)
return ca.obj_if.get_nickname()
else:
return request.prop_if.Get(DBUS_CM_REQUEST_IF, directive)
else:
return None
def get_request_id(criteria):
"""
If you don't know the certmonger request_id then try to find it by looking
through all the requests.
Return ``None`` if no match. Raise ``RuntimeError`` if there is
more than one matching request.
:param criteria: dict of criteria; see module doc for details
"""
try:
request = _get_request(criteria)
except RuntimeError as e:
logger.error('Failed to get request: %s', e)
raise
if request:
return request.prop_if.Get(DBUS_CM_REQUEST_IF, 'nickname')
else:
return None
def get_requests_for_dir(dir):
"""
Return a list containing the request ids for a given NSS database
directory.
"""
reqid = []
criteria = {'cert-storage': 'NSSDB', 'key-storage': 'NSSDB',
'cert-database': dir, 'key-database': dir, }
requests = _get_requests(criteria)
for request in requests:
reqid.append(request.prop_if.Get(DBUS_CM_REQUEST_IF, 'nickname'))
return reqid
def add_request_value(request_id, directive, value):
"""
Add a new directive to a certmonger request file.
"""
try:
request = _get_request({'nickname': request_id})
except RuntimeError as e:
logger.error('Failed to get request: %s', e)
raise
if request:
request.obj_if.modify({directive: value})
def add_principal(request_id, principal):
"""
In order for a certmonger request to be renewable it needs a principal.
When an existing certificate is added via start-tracking it won't have
a principal.
"""
add_request_value(request_id, 'template-principal', [principal])
def add_subject(request_id, subject):
"""
In order for a certmonger request to be renwable it needs the subject
set in the request file.
When an existing certificate is added via start-tracking it won't have
a subject_template set.
"""
add_request_value(request_id, 'template-subject', subject)
def request_and_wait_for_cert(
certpath, subject, principal, nickname=None, passwd_fname=None,
dns=None, ca='IPA', profile=None,
pre_command=None, post_command=None, storage='NSSDB', perms=None,
resubmit_timeout=0, stop_tracking_on_error=False,
nss_user=None):
"""Request certificate, wait and possibly resubmit failing requests
Submit a cert request to certmonger and wait until the request has
finished.
With timeout, a failed request is resubmitted. During parallel replica
installation, a request sometimes fails with CA_REJECTED or
CA_UNREACHABLE. The error occurs when the master is either busy or some
information haven't been replicated yet. Even a stuck request can be
recovered, e.g. when permission and group information have been
replicated.
"""
req_id = request_cert(
certpath, subject, principal, nickname, passwd_fname, dns, ca,
profile, pre_command, post_command, storage, perms, nss_user
)
# Don't wait longer than resubmit timeout if it is configured
certmonger_timeout = api.env.certmonger_wait_timeout
if resubmit_timeout and resubmit_timeout < certmonger_timeout:
certmonger_timeout = resubmit_timeout
deadline = time.time() + resubmit_timeout
while True: # until success, timeout, or error
try:
state = wait_for_request(req_id, certmonger_timeout)
except RuntimeError as e:
logger.debug("wait_for_request raised %s", e)
state = 'TIMEOUT'
ca_error = get_request_value(req_id, 'ca-error')
if state == 'MONITORING' and ca_error is None:
# we got a winner, exiting
logger.debug("Cert request %s was successful", req_id)
return req_id
logger.debug(
"Cert request %s failed: %s (%s)", req_id, state, ca_error
)
if state in {'CA_REJECTED', 'CA_UNREACHABLE'}:
# probably unrecoverable error
logger.debug("Giving up on cert request %s", req_id)
break
if not resubmit_timeout:
# no resubmit
break
if time.time() > deadline:
logger.debug("Request %s reached resubmit deadline", req_id)
break
if state == 'TIMEOUT':
logger.debug("%s not in final state, continue waiting", req_id)
time.sleep(10)
else:
# sleep and resubmit
logger.debug("Sleep and resubmit cert request %s", req_id)
time.sleep(10)
resubmit_request(req_id)
if stop_tracking_on_error:
stop_tracking(request_id=req_id)
raise RuntimeError(
"Certificate issuance failed ({}: {})".format(state, ca_error)
)
def request_cert(
certpath, subject, principal, nickname=None, passwd_fname=None,
dns=None, ca='IPA', profile=None,
pre_command=None, post_command=None, storage='NSSDB', perms=None,
nss_user=None):
"""
Execute certmonger to request a server certificate.
``dns``
A sequence of DNS names to appear in SAN request extension.
``perms``
A tuple of (cert, key) permissions in e.g., (0644,0660)
"""
if storage == 'FILE':
certfile, keyfile = certpath
# This is a workaround for certmonger having different Subject
# representation with NSS and OpenSSL
# https://pagure.io/certmonger/issue/62
subject = str(DN(*reversed(DN(subject))))
else:
certfile = certpath
keyfile = certpath
cm = _certmonger()
ca_path = cm.obj_if.find_ca_by_nickname(ca)
if not ca_path:
raise RuntimeError('{} CA not found'.format(ca))
request_parameters = dict(KEY_STORAGE=storage, CERT_STORAGE=storage,
CERT_LOCATION=certfile, KEY_LOCATION=keyfile,
SUBJECT=subject, CA=ca_path)
if nickname:
request_parameters["CERT_NICKNAME"] = nickname
request_parameters["KEY_NICKNAME"] = nickname
if principal:
request_parameters['PRINCIPAL'] = [principal]
if dns:
if not isinstance(dns, (list, tuple)):
raise TypeError(dns)
request_parameters['DNS'] = dns
if passwd_fname:
request_parameters['KEY_PIN_FILE'] = passwd_fname
if profile:
request_parameters['ca-profile'] = profile
if nss_user:
request_parameters['nss-user'] = nss_user
certmonger_cmd_template = paths.CERTMONGER_COMMAND_TEMPLATE
if pre_command:
if not os.path.isabs(pre_command):
pre_command = certmonger_cmd_template % (pre_command)
request_parameters['cert-presave-command'] = pre_command
if post_command:
if not os.path.isabs(post_command):
post_command = certmonger_cmd_template % (post_command)
request_parameters['cert-postsave-command'] = post_command
if perms:
request_parameters['cert-perms'] = perms[0]
request_parameters['key-perms'] = perms[1]
result = cm.obj_if.add_request(request_parameters)
try:
if result[0]:
request = _cm_dbus_object(cm.bus, cm, result[1], DBUS_CM_REQUEST_IF,
DBUS_CM_IF, True)
else:
raise RuntimeError('add_request() returned False')
except Exception as e:
logger.error('Failed to create a new request: %s', e)
raise
return request.obj_if.get_nickname()
def start_tracking(
certpath, ca='IPA', nickname=None, pin=None, pinfile=None,
pre_command=None, post_command=None, profile=None, storage="NSSDB",
token_name=None, dns=None, nss_user=None):
"""
Tell certmonger to track the given certificate in either a file or an NSS
database. The certificate access can be protected by a password_file.
This uses the generic certmonger command getcert so we can specify
a different helper.
:param certpath:
The path to an NSS database or a tuple (PEM certificate, private key).
:param ca:
Nickanme of the CA for which the given certificate should be tracked.
:param nickname:
Nickname of the NSS certificate in ``certpath`` to be tracked.
:param pin:
The passphrase for either NSS database containing ``nickname`` or
for the encrypted key in the ``certpath`` tuple.
:param pinfile:
Similar to ``pin`` parameter except this is a path to a file containing
the required passphrase.
:param pre_command:
Specifies a command for certmonger to run before it renews a
certificate. This command must reside in /usr/lib/ipa/certmonger
to work with SELinux.
:param post_command:
Specifies a command for certmonger to run after it has renewed a
certificate. This command must reside in /usr/lib/ipa/certmonger
to work with SELinux.
:param storage:
One of "NSSDB" or "FILE", describes whether certmonger should use
NSS or OpenSSL backend to track the certificate in ``certpath``
:param profile:
Which certificate profile should be used.
:param token_name:
Hardware token name for HSM support
:param dns:
List of DNS names
:param nss_user:
login of the private key owner
:returns: certificate tracking nickname.
"""
if storage == 'FILE':
certfile, keyfile = certpath
else:
certfile = certpath
keyfile = certpath
cm = _certmonger()
certmonger_cmd_template = paths.CERTMONGER_COMMAND_TEMPLATE
ca_path = cm.obj_if.find_ca_by_nickname(ca)
if not ca_path:
raise RuntimeError('{} CA not found'.format(ca))
params = {
'TRACK': True,
'CERT_STORAGE': storage,
'KEY_STORAGE': storage,
'CERT_LOCATION': certfile,
'KEY_LOCATION': keyfile,
'CA': ca_path
}
if nickname:
params['CERT_NICKNAME'] = nickname
params['KEY_NICKNAME'] = nickname
if pin:
params['KEY_PIN'] = pin
if pinfile:
params['KEY_PIN_FILE'] = os.path.abspath(pinfile)
if pre_command:
if not os.path.isabs(pre_command):
pre_command = certmonger_cmd_template % (pre_command)
params['cert-presave-command'] = pre_command
if post_command:
if not os.path.isabs(post_command):
post_command = certmonger_cmd_template % (post_command)
params['cert-postsave-command'] = post_command
if profile:
params['ca-profile'] = profile
if token_name not in {None, "internal"}:
# only pass token names for external tokens (e.g. HSM)
params['key-token'] = token_name
params['cert-token'] = token_name
if dns is not None and len(dns) > 0:
params['DNS'] = dns
if nss_user:
params['nss-user'] = nss_user
result = cm.obj_if.add_request(params)
try:
if result[0]:
request = _cm_dbus_object(cm.bus, cm, result[1], DBUS_CM_REQUEST_IF,
DBUS_CM_IF, True)
else:
raise RuntimeError('add_request() returned False')
except Exception as e:
logger.error('Failed to add new request: %s', e)
raise
return request.prop_if.Get(DBUS_CM_REQUEST_IF, 'nickname')
def stop_tracking(secdir=None, request_id=None, nickname=None, certfile=None):
"""
Stop tracking the current request using either the request_id or nickname.
Returns True or False
"""
if request_id is None and nickname is None and certfile is None:
raise RuntimeError('One of request_id, nickname and certfile is'
' required.')
if secdir is not None and certfile is not None:
raise RuntimeError("Can't specify both secdir and certfile.")
criteria = dict()
if secdir:
criteria['cert-database'] = secdir
if request_id:
criteria['nickname'] = request_id
if nickname:
criteria['cert-nickname'] = nickname
if certfile:
criteria['cert-file'] = certfile
try:
request = _get_request(criteria)
except RuntimeError as e:
logger.error('Failed to get request: %s', e)
raise
if request:
request.parent.obj_if.remove_request(request.path)
def modify(request_id, ca=None, profile=None, template_v2=None):
update = {}
if ca is not None:
cm = _certmonger()
update['CA'] = cm.obj_if.find_ca_by_nickname(ca)
if profile is not None:
update['template-profile'] = profile
if template_v2 is not None:
update['template-ms-certificate-template'] = template_v2
if len(update) > 0:
request = _get_request({'nickname': request_id})
request.obj_if.modify(update)
def resubmit_request(
request_id,
ca=None,
profile=None,
template_v2=None,
is_ca=False):
"""
:param request_id: the certmonger numeric request ID
:param ca: the nickname for the certmonger CA, e.g. IPA or SelfSign
:param profile: the profile to use, e.g. SubCA. For requests using the
Dogtag CA, this is the profile to use. This also causes
the Microsoft certificate tempalte name extension to the
CSR (for telling AD CS what template to use).
:param template_v2: Microsoft V2 template specifier extension value.
Format: <oid>:<major-version>[:<minor-version>]
:param is_ca: boolean that if True adds the CA basic constraint
"""
request = _get_request({'nickname': request_id})
if request:
update = {}
if ca is not None:
cm = _certmonger()
update['CA'] = cm.obj_if.find_ca_by_nickname(ca)
if profile is not None:
update['template-profile'] = profile
if template_v2 is not None:
update['template-ms-certificate-template'] = template_v2
if is_ca:
update['template-is-ca'] = True
update['template-ca-path-length'] = -1 # no path length
# TODO: certmonger assumes some hard-coded defaults like RSA 2048.
# Try to fetch values from current cert rather.
for key, convert in [('key-size', int), ('key-type', str)]:
try:
value = request.prop_if.Get(DBUS_CM_REQUEST_IF, key)
except dbus.DBusException:
continue
else:
if value:
# convert dbus.Int64() to int, dbus.String() to str
update[key] = convert(value)
if len(update) > 0:
request.obj_if.modify(update)
request.obj_if.resubmit()
def _find_IPA_ca():
"""
Look through all the certmonger CA files to find the one that
has id=IPA
We can use find_request_value because the ca files have the
same file format.
"""
cm = _certmonger()
ca_path = cm.obj_if.find_ca_by_nickname('IPA')
return _cm_dbus_object(cm.bus, cm, ca_path, DBUS_CM_CA_IF, DBUS_CM_IF, True)
def add_principal_to_cas(principal):
"""
If the hostname we were passed to use in ipa-client-install doesn't
match the value of gethostname() then we need to append
-k host/HOSTNAME@REALM to the ca helper defined for
/usr/libexec/certmonger/ipa-submit.
We also need to restore this on uninstall.
"""
ca = _find_IPA_ca()
if ca:
ext_helper = ca.prop_if.Get(DBUS_CM_CA_IF, 'external-helper')
if ext_helper and '-k' not in shlex.split(ext_helper):
ext_helper = '%s -k %s' % (ext_helper.strip(), principal)
ca.prop_if.Set(DBUS_CM_CA_IF, 'external-helper', ext_helper)
def remove_principal_from_cas():
"""
Remove any -k principal options from the ipa_submit helper.
"""
ca = _find_IPA_ca()
if ca:
ext_helper = ca.prop_if.Get(DBUS_CM_CA_IF, 'external-helper')
if ext_helper and '-k' in shlex.split(ext_helper):
ext_helper = shlex.split(ext_helper)[0]
ca.prop_if.Set(DBUS_CM_CA_IF, 'external-helper', ext_helper)
def modify_ca_helper(ca_name, helper):
"""
Modify certmonger CA helper.
Applies the new helper and return the previous configuration.
"""
bus = dbus.SystemBus()
obj = bus.get_object('org.fedorahosted.certmonger',
'/org/fedorahosted/certmonger')
iface = dbus.Interface(obj, 'org.fedorahosted.certmonger')
path = iface.find_ca_by_nickname(ca_name)
if not path:
raise RuntimeError("{} is not configured".format(ca_name))
else:
ca_obj = bus.get_object('org.fedorahosted.certmonger', path)
ca_iface = dbus.Interface(ca_obj,
'org.freedesktop.DBus.Properties')
old_helper = ca_iface.Get('org.fedorahosted.certmonger.ca',
'external-helper')
ca_iface.Set('org.fedorahosted.certmonger.ca',
'external-helper', helper,
# Give dogtag extra time to generate cert
timeout=CA_DBUS_TIMEOUT)
return old_helper
def get_pin(token="internal"):
"""
Dogtag stores its NSS pin in a file formatted as token:PIN.
The caller is expected to handle any exceptions raised.
"""
if token and token != 'internal':
token = 'hardware-' + token
with open(paths.PKI_TOMCAT_PASSWORD_CONF, 'r') as f:
for line in f:
(tok, pin) = line.split('=', 1)
if token == tok:
return pin.strip()
return None
def check_state(dirs):
"""
Given a set of directories and nicknames verify that we are no longer
tracking certificates.
dirs is a list of directories to test for. We will return a tuple
of nicknames for any tracked certificates found.
This can only check for NSS-based certificates.
"""
reqids = []
for dir in dirs:
reqids.extend(get_requests_for_dir(dir))
return reqids
def wait_for_request(request_id, timeout=120):
sleep = Sleeper(
sleep=0.5, # getcert.c:waitfor() uses 125ms
timeout=timeout,
raises=RuntimeError("request timed out")
)
last_state = None
while True:
state = str(get_request_value(request_id, 'status'))
if state != last_state:
logger.debug("certmonger request is in state %r", state)
if state in {'CA_REJECTED', 'CA_UNREACHABLE', 'CA_UNCONFIGURED',
'NEED_GUIDANCE', 'NEED_CA', 'MONITORING'}:
break
last_state = state
sleep()
return state
| 28,499
|
Python
|
.py
| 699
| 32.13877
| 80
| 0.626413
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,677
|
hostname.py
|
freeipa_freeipa/ipalib/install/hostname.py
|
#
# Copyright (C) 2016 FreeIPA Contributors see COPYING for license
#
"""
Host name installer module
"""
from ipapython.install import typing
from ipapython.install.core import knob
from ipapython.ipautil import CheckedIPAddress
from . import service
from .service import prepare_only
class HostNameInstallInterface(service.ServiceInstallInterface):
"""
Interface common to all service installers which create DNS address
records for `host_name`
"""
ip_addresses = knob(
typing.List[CheckedIPAddress], None,
description="Specify IP address that should be added to DNS. This "
"option can be used multiple times",
cli_names='--ip-address',
cli_metavar='IP_ADDRESS',
)
ip_addresses = prepare_only(ip_addresses)
@ip_addresses.validator
def ip_addresses(self, values):
for value in values:
try:
CheckedIPAddress(value)
except Exception as e:
raise ValueError("invalid IP address {0}: {1}".format(
value, e))
all_ip_addresses = knob(
None,
description="All routable IP addresses configured on any interface "
"will be added to DNS",
)
all_ip_addresses = prepare_only(all_ip_addresses)
no_host_dns = knob(
None,
description="Do not use DNS for hostname lookup during installation",
)
no_host_dns = prepare_only(no_host_dns)
no_wait_for_dns = knob(
None,
description="do not wait until the host is resolvable in DNS",
)
no_wait_for_dns = prepare_only(no_wait_for_dns)
| 1,648
|
Python
|
.py
| 48
| 27.375
| 77
| 0.658491
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,678
|
kinit.py
|
freeipa_freeipa/ipalib/install/kinit.py
|
#
# Copyright (C) 2024 FreeIPA Contributors see COPYING for license
#
# code was moved to ipalib.kinit. This module is now an alias
__all__ = (
"validate_principal",
"kinit_keytab",
"kinit_password",
"kinit_armor",
"kinit_pkinit",
)
from ..kinit import (
validate_principal,
kinit_keytab,
kinit_password,
kinit_armor,
kinit_pkinit,
)
| 377
|
Python
|
.py
| 18
| 17.611111
| 66
| 0.677871
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,679
|
__init__.py
|
freeipa_freeipa/ipalib/install/__init__.py
|
#
# Copyright (C) 2016 FreeIPA Contributors see COPYING for license
#
| 71
|
Python
|
.py
| 3
| 22.666667
| 66
| 0.779412
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,680
|
dnsforwarders.py
|
freeipa_freeipa/ipalib/install/dnsforwarders.py
|
#
# Copyright (C) 2020 FreeIPA Contributors see COPYING for license
#
"""DNS forwarder and systemd-resolve1 helpers
"""
import ipaddress
import logging
import os
import socket
import dbus
from ipaplatform.paths import paths
from ipapython.dnsutil import get_ipa_resolver
logger = logging.getLogger(__name__)
_SYSTEMD_RESOLV_CONF = {
"/run/systemd/resolve/stub-resolv.conf",
"/run/systemd/resolve/resolv.conf",
"/lib/systemd/resolv.conf",
"/usr/lib/systemd/resolv.conf",
}
_DBUS_RESOLVE1_NAME = "org.freedesktop.resolve1"
_DBUS_RESOLVE1_PATH = "/org/freedesktop/resolve1"
_DBUS_RESOLVE1_MANAGER_IF = "org.freedesktop.resolve1.Manager"
_DBUS_PROPERTY_IF = "org.freedesktop.DBus.Properties"
# netlink interface index for resolve1 global settings and loopback
IFINDEX_GLOBAL = 0
IFINDEX_LOOPBACK = 1
def detect_resolve1_resolv_conf():
"""Detect if /etc/resolv.conf is managed by systemd-resolved
See man(5) NetworkManager.conf
"""
try:
dest = os.readlink(paths.RESOLV_CONF)
except OSError:
# not a link
return False
# convert path relative to /etc/resolv.conf to abs path
dest = os.path.normpath(
os.path.join(os.path.dirname(paths.RESOLV_CONF), dest)
)
return dest in _SYSTEMD_RESOLV_CONF
def get_resolve1_nameservers(*, with_ifindex=False):
"""Get list of DNS nameservers from systemd-resolved
:return: list of tuples (ifindex, ipaddress_obj)
"""
bus = dbus.SystemBus()
try:
resolve1 = bus.get_object(_DBUS_RESOLVE1_NAME, _DBUS_RESOLVE1_PATH)
prop_if = dbus.Interface(resolve1, _DBUS_PROPERTY_IF)
dns_prop = prop_if.Get(_DBUS_RESOLVE1_MANAGER_IF, "DNSEx")
finally:
bus.close()
results = []
for ifindex, af, dns_arr, port, sniname in dns_prop:
if port not in {0, 53} or sniname:
# non-default port, non-standard port, or SNI name configuration
# for DNS over TLS, e.g. 1.2.3.4:9953#example.com
continue
# convert packed format to IPAddress object (like inet_ntop)
if af == socket.AF_INET:
dnsip = ipaddress.IPv4Address(bytes(dns_arr))
elif af == socket.AF_INET6:
dnsip = ipaddress.IPv6Address(bytes(dns_arr))
else:
# neither IPv4 nor IPv6
continue
if with_ifindex:
# netlink interface index, see socket.if_nameindex()
ifindex = int(ifindex)
results.append((ifindex, dnsip))
else:
results.append(dnsip)
return results
def get_dnspython_nameservers(*, with_ifindex=False):
"""Get list of DNS nameservers from dnspython
On Linux dnspython parses /etc/resolv.conf for us
:return: list of tuples (ifindex, ipaddress_obj)
"""
results = []
for nameserver in get_ipa_resolver().nameservers:
nameserver = ipaddress.ip_address(nameserver)
if with_ifindex:
results.append((IFINDEX_GLOBAL, nameserver))
else:
results.append(nameserver)
return results
def get_nameservers():
"""Get list of unique, non-loopback DNS nameservers
:return: list of strings
"""
if detect_resolve1_resolv_conf():
logger.debug(
"systemd-resolved detected, fetching nameservers from D-Bus"
)
nameservers = get_resolve1_nameservers(with_ifindex=True)
else:
logger.debug(
"systemd-resolved not detected, parsing %s", paths.RESOLV_CONF
)
nameservers = get_dnspython_nameservers(with_ifindex=True)
logger.debug("Detected nameservers: %r", nameservers)
result = []
seen = set()
for ifindex, ip in nameservers:
# unique entries
if ip in seen:
continue
seen.add(ip)
# skip loopback
if ifindex == IFINDEX_LOOPBACK or ip.is_loopback:
continue
result.append(str(ip))
logger.debug("Use nameservers %r", result)
return result
if __name__ == "__main__":
from pprint import pprint
print("systemd-resolved detected:", detect_resolve1_resolv_conf())
print("Interfaces:", socket.if_nameindex())
print("dnspython nameservers:")
pprint(get_dnspython_nameservers(with_ifindex=True))
print("resolve1 nameservers:")
try:
pprint(get_resolve1_nameservers(with_ifindex=True))
except Exception as e:
print(e)
print("nameservers:", get_nameservers())
| 4,470
|
Python
|
.py
| 125
| 29.272
| 76
| 0.667207
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,681
|
service.py
|
freeipa_freeipa/ipalib/install/service.py
|
#
# Copyright (C) 2016 FreeIPA Contributors see COPYING for license
#
"""
Base service installer module
"""
from ipalib.util import validate_domain_name
from ipapython.install import common, core, typing
from ipapython.install.core import group, knob
def prepare_only(obj):
"""
Decorator which makes an installer attribute appear only in the prepare
phase of the install
"""
obj.__exclude__ = getattr(obj, '__exclude__', set()) | {'enroll'}
return obj
def enroll_only(obj):
"""
Decorator which makes an installer attribute appear only in the enroll
phase of the install
"""
obj.__exclude__ = getattr(obj, '__exclude__', set()) | {'prepare'}
return obj
def master_install_only(obj):
"""
Decorator which makes an installer attribute appear only in master install
"""
obj.__exclude__ = getattr(obj, '__exclude__', set()) | {'replica_install'}
return obj
def replica_install_only(obj):
"""
Decorator which makes an installer attribute appear only in replica install
"""
obj.__exclude__ = getattr(obj, '__exclude__', set()) | {'master_install'}
return obj
def _does(cls, arg):
def remove(name):
def removed(self):
raise AttributeError(name)
return property(removed)
return type(
cls.__name__,
(cls,),
{
n: remove(n) for n in dir(cls)
if arg in getattr(getattr(cls, n), '__exclude__', set())
}
)
def prepares(cls):
"""
Returns installer class stripped of attributes not related to the prepare
phase of the install
"""
return _does(cls, 'prepare')
def enrolls(cls):
"""
Returns installer class stripped of attributes not related to the enroll
phase of the install
"""
return _does(cls, 'enroll')
def installs_master(cls):
"""
Returns installer class stripped of attributes not related to master
install
"""
return _does(cls, 'master_install')
def installs_replica(cls):
"""
Returns installer class stripped of attributes not related to replica
install
"""
return _does(cls, 'replica_install')
@group
class ServiceInstallInterface(common.Installable,
common.Interactive,
core.Composite):
"""
Interface common to all service installers
"""
description = "Basic"
domain_name = knob(
str, None,
description="primary DNS domain of the IPA deployment "
"(not necessarily related to the current hostname)",
cli_names='--domain',
)
@domain_name.validator
def domain_name(self, value):
validate_domain_name(value)
servers = knob(
typing.List[str], None,
description="FQDN of IPA server",
cli_names='--server',
cli_metavar='SERVER',
)
realm_name = knob(
str, None,
description="Kerberos realm name of the IPA deployment (typically "
"an upper-cased name of the primary DNS domain)",
cli_names='--realm',
)
@realm_name.validator
def realm_name(self, value):
validate_domain_name(value, entity="realm")
host_name = knob(
str, None,
description="The hostname of this machine (FQDN). If specified, the "
"hostname will be set and the system configuration will "
"be updated to persist over reboot. By default the result "
"of getfqdn() call from Python's socket module is used.",
cli_names='--hostname',
)
ca_cert_files = knob(
typing.List[str], None,
description="load the CA certificate from this file",
cli_names='--ca-cert-file',
cli_metavar='FILE',
)
dm_password = knob(
str, None,
sensitive=True,
description="Directory Manager password (for the existing master)",
)
class ServiceAdminInstallInterface(ServiceInstallInterface):
"""
Interface common to all service installers which require admin user
authentication
"""
principal = knob(
str, None,
)
principal = enroll_only(principal)
principal = replica_install_only(principal)
admin_password = knob(
str, None,
sensitive=True,
)
admin_password = enroll_only(admin_password)
| 4,396
|
Python
|
.py
| 138
| 25.173913
| 79
| 0.632875
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,682
|
sysrestore.py
|
freeipa_freeipa/ipalib/install/sysrestore.py
|
#
# Copyright (C) 2020 FreeIPA Contributors see COPYING for license
#
"""
Facade for ipalib.sysrestore for backwards compatibility
"""
from ipalib import sysrestore as real_sysrestore
class FileStore(real_sysrestore.FileStore):
def __init__(self, path=real_sysrestore.SYSRESTORE_PATH,
index_file=real_sysrestore.SYSRESTORE_INDEXFILE):
super(FileStore, self).__init__(path, index_file)
class StateFile(real_sysrestore.StateFile):
def __init__(self, path=real_sysrestore.SYSRESTORE_PATH,
state_file=real_sysrestore.SYSRESTORE_STATEFILE):
super(StateFile, self).__init__(path, state_file)
| 651
|
Python
|
.py
| 15
| 38.133333
| 66
| 0.728571
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,683
|
certstore.py
|
freeipa_freeipa/ipalib/install/certstore.py
|
# Authors:
# Jan Cholasta <jcholast@redhat.com>
#
# Copyright (C) 2014 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
LDAP shared certificate store.
"""
from pyasn1.error import PyAsn1Error
from ipapython.dn import DN
from ipapython.certdb import get_ca_nickname, TrustFlags
from ipalib import errors, x509
from ipalib.constants import IPA_CA_CN
def _parse_cert(cert):
try:
subject = DN(cert.subject)
issuer = DN(cert.issuer)
serial_number = cert.serial_number
public_key_info = cert.public_key_info_bytes
except (ValueError, PyAsn1Error) as e:
raise ValueError("failed to decode certificate: %s" % e)
subject = str(subject).replace('\\;', '\\3b')
issuer = str(issuer).replace('\\;', '\\3b')
issuer_serial = '%s;%s' % (issuer, serial_number)
return subject, issuer_serial, public_key_info
def init_ca_entry(entry, cert, nickname, trusted, ext_key_usage):
"""
Initialize certificate store entry for a CA certificate.
"""
subject, issuer_serial, public_key = _parse_cert(cert)
if ext_key_usage is not None:
try:
cert_eku = cert.extended_key_usage
except ValueError as e:
raise ValueError("failed to decode certificate: %s" % e)
if cert_eku is not None:
cert_eku -= {x509.EKU_SERVER_AUTH, x509.EKU_CLIENT_AUTH,
x509.EKU_EMAIL_PROTECTION, x509.EKU_CODE_SIGNING,
x509.EKU_ANY, x509.EKU_PLACEHOLDER}
ext_key_usage = ext_key_usage | cert_eku
entry['objectClass'] = ['ipaCertificate', 'pkiCA', 'ipaKeyPolicy']
entry['cn'] = [nickname]
entry['ipaCertSubject'] = [subject]
entry['ipaCertIssuerSerial'] = [issuer_serial]
entry['ipaPublicKey'] = [public_key]
entry['cACertificate;binary'] = [cert]
if trusted is not None:
entry['ipaKeyTrust'] = ['trusted' if trusted else 'distrusted']
if ext_key_usage is not None:
ext_key_usage = list(ext_key_usage)
if not ext_key_usage:
ext_key_usage.append(x509.EKU_PLACEHOLDER)
entry['ipaKeyExtUsage'] = ext_key_usage
def update_compat_ca(ldap, base_dn, cert):
"""
Update the CA certificate in cn=CAcert,cn=ipa,cn=etc,SUFFIX.
"""
dn = DN(('cn', 'CAcert'), ('cn', 'ipa'), ('cn', 'etc'), base_dn)
try:
entry = ldap.get_entry(dn, attrs_list=['cACertificate;binary'])
entry.single_value['cACertificate;binary'] = cert
ldap.update_entry(entry)
except errors.NotFound:
entry = ldap.make_entry(dn)
entry['objectClass'] = ['nsContainer', 'pkiCA']
entry.single_value['cn'] = 'CAcert'
entry.single_value['cACertificate;binary'] = cert
ldap.add_entry(entry)
except errors.EmptyModlist:
pass
def clean_old_config(ldap, base_dn, dn, config_ipa, config_compat):
"""
Remove ipaCA and compatCA flags from their previous carriers.
"""
if not config_ipa and not config_compat:
return
try:
result, _truncated = ldap.find_entries(
base_dn=DN(('cn', 'certificates'), ('cn', 'ipa'), ('cn', 'etc'),
base_dn),
filter='(|(ipaConfigString=ipaCA)(ipaConfigString=compatCA))',
attrs_list=['ipaConfigString'])
except errors.NotFound:
return
for entry in result:
if entry.dn == dn:
continue
for config in list(entry['ipaConfigString']):
if config.lower() == 'ipaca' and config_ipa:
entry['ipaConfigString'].remove(config)
elif config.lower() == 'compatca' and config_compat:
entry['ipaConfigString'].remove(config)
try:
ldap.update_entry(entry)
except errors.EmptyModlist:
pass
def add_ca_cert(ldap, base_dn, cert, nickname, trusted=None,
ext_key_usage=None, config_ipa=False, config_compat=False):
"""
Add new entry for a CA certificate to the certificate store.
"""
container_dn = DN(('cn', 'certificates'), ('cn', 'ipa'), ('cn', 'etc'),
base_dn)
dn = DN(('cn', nickname), container_dn)
entry = ldap.make_entry(dn)
init_ca_entry(entry, cert, nickname, trusted, ext_key_usage)
if config_ipa:
entry.setdefault('ipaConfigString', []).append('ipaCA')
if config_compat:
entry.setdefault('ipaConfigString', []).append('compatCA')
if config_compat:
update_compat_ca(ldap, base_dn, cert)
ldap.add_entry(entry)
clean_old_config(ldap, base_dn, dn, config_ipa, config_compat)
def update_ca_cert(ldap, base_dn, cert, trusted=None, ext_key_usage=None,
config_ipa=False, config_compat=False):
"""
Update existing entry for a CA certificate in the certificate store.
"""
subject, issuer_serial, public_key = _parse_cert(cert)
filter = ldap.make_filter({'ipaCertSubject': subject})
result, _truncated = ldap.find_entries(
base_dn=DN(('cn', 'certificates'), ('cn', 'ipa'), ('cn', 'etc'),
base_dn),
filter=filter,
attrs_list=['cn', 'ipaCertSubject', 'ipaCertIssuerSerial',
'ipaPublicKey', 'ipaKeyTrust', 'ipaKeyExtUsage',
'ipaConfigString', 'cACertificate;binary'])
entry = result[0]
dn = entry.dn
for old_cert in entry['cACertificate;binary']:
# Check if we are adding a new cert
if old_cert == cert:
break
else:
# We are adding a new cert, validate it
if entry.single_value['ipaCertSubject'].lower() != subject.lower():
raise ValueError("subject name mismatch")
if entry.single_value['ipaPublicKey'] != public_key:
raise ValueError("subject public key info mismatch")
entry['ipaCertIssuerSerial'].append(issuer_serial)
entry['cACertificate;binary'].append(cert)
# Update key trust
if trusted is not None:
old_trust = entry.single_value.get('ipaKeyTrust')
new_trust = 'trusted' if trusted else 'distrusted'
if old_trust is not None and old_trust.lower() != new_trust:
raise ValueError("inconsistent trust")
entry.single_value['ipaKeyTrust'] = new_trust
# Update extended key usage
if trusted is not False:
if ext_key_usage is not None:
old_eku = set(entry.get('ipaKeyExtUsage', []))
old_eku.discard(x509.EKU_PLACEHOLDER)
new_eku = old_eku | ext_key_usage
if not new_eku:
new_eku.add(x509.EKU_PLACEHOLDER)
entry['ipaKeyExtUsage'] = list(new_eku)
else:
entry.pop('ipaKeyExtUsage', None)
# Update configuration flags
is_ipa = False
is_compat = False
for config in entry.get('ipaConfigString', []):
if config.lower() == 'ipaca':
is_ipa = True
elif config.lower() == 'compatca':
is_compat = True
if config_ipa and not is_ipa:
entry.setdefault('ipaConfigString', []).append('ipaCA')
if config_compat and not is_compat:
entry.setdefault('ipaConfigString', []).append('compatCA')
if is_compat or config_compat:
update_compat_ca(ldap, base_dn, cert)
ldap.update_entry(entry)
clean_old_config(ldap, base_dn, dn, config_ipa, config_compat)
def put_ca_cert(ldap, base_dn, cert, nickname, trusted=None,
ext_key_usage=None, config_ipa=False, config_compat=False):
"""
Add or update entry for a CA certificate in the certificate store.
:param cert: IPACertificate
"""
try:
update_ca_cert(ldap, base_dn, cert, trusted, ext_key_usage,
config_ipa=config_ipa, config_compat=config_compat)
except errors.NotFound:
add_ca_cert(ldap, base_dn, cert, nickname, trusted, ext_key_usage,
config_ipa=config_ipa, config_compat=config_compat)
except errors.EmptyModlist:
pass
def make_compat_ca_certs(certs, realm, ipa_ca_subject):
"""
Make CA certificates and associated key policy from DER certificates.
"""
result = []
for cert in certs:
subject, _issuer_serial, _public_key_info = _parse_cert(cert)
subject = DN(subject)
if ipa_ca_subject is not None and subject == DN(ipa_ca_subject):
nickname = get_ca_nickname(realm)
ext_key_usage = {x509.EKU_SERVER_AUTH,
x509.EKU_CLIENT_AUTH,
x509.EKU_EMAIL_PROTECTION,
x509.EKU_CODE_SIGNING}
else:
nickname = str(subject)
ext_key_usage = {x509.EKU_SERVER_AUTH}
result.append((cert, nickname, True, ext_key_usage))
return result
def get_ca_certs(ldap, base_dn, compat_realm, compat_ipa_ca,
filter_subject=None):
"""
Get CA certificates and associated key policy from the certificate store.
"""
if filter_subject is not None:
if not isinstance(filter_subject, list):
filter_subject = [filter_subject]
filter_subject = [str(subj).replace('\\;', '\\3b')
for subj in filter_subject]
certs = []
config_dn = DN(('cn', 'ipa'), ('cn', 'etc'), base_dn)
container_dn = DN(('cn', 'certificates'), config_dn)
try:
# Search the certificate store for CA certificate entries
filters = ['(objectClass=ipaCertificate)', '(objectClass=pkiCA)']
if filter_subject:
filter = ldap.make_filter({'ipaCertSubject': filter_subject})
filters.append(filter)
result, _truncated = ldap.find_entries(
base_dn=container_dn,
filter=ldap.combine_filters(filters, ldap.MATCH_ALL),
attrs_list=['cn', 'ipaCertSubject', 'ipaCertIssuerSerial',
'ipaPublicKey', 'ipaKeyTrust', 'ipaKeyExtUsage',
'cACertificate;binary'])
for entry in result:
nickname = entry.single_value['cn']
trusted = entry.single_value.get('ipaKeyTrust', 'unknown').lower()
if trusted == 'trusted':
trusted = True
elif trusted == 'distrusted':
trusted = False
else:
trusted = None
ext_key_usage = entry.get('ipaKeyExtUsage')
if ext_key_usage is not None:
ext_key_usage = set(str(p) for p in ext_key_usage)
ext_key_usage.discard(x509.EKU_PLACEHOLDER)
for cert in entry.get('cACertificate;binary', []):
try:
_parse_cert(cert)
except ValueError:
certs = []
break
certs.append((cert, nickname, trusted, ext_key_usage))
except errors.NotFound:
try:
ldap.get_entry(container_dn, [''])
except errors.NotFound:
# Fallback to cn=CAcert,cn=ipa,cn=etc,SUFFIX
dn = DN(('cn', 'CAcert'), config_dn)
entry = ldap.get_entry(dn, ['cACertificate;binary'])
cert = entry.single_value['cACertificate;binary']
try:
subject, _issuer_serial, _public_key_info = _parse_cert(cert)
except ValueError:
pass
else:
if filter_subject is not None and subject not in filter_subject:
raise errors.NotFound(reason="no matching entry found")
if compat_ipa_ca:
ca_subject = subject
else:
ca_subject = None
certs = make_compat_ca_certs([cert], compat_realm, ca_subject)
if certs:
return certs
else:
raise errors.NotFound(reason="no such entry")
def trust_flags_to_key_policy(trust_flags):
"""
Convert certutil trust flags to certificate store key policy.
"""
return trust_flags[1:]
def key_policy_to_trust_flags(trusted, ca, ext_key_usage):
"""
Convert certificate store key policy to certutil trust flags.
"""
return TrustFlags(False, trusted, ca, ext_key_usage)
def put_ca_cert_nss(ldap, base_dn, cert, nickname, trust_flags,
config_ipa=False, config_compat=False):
"""
Add or update entry for a CA certificate in the certificate store.
:param cert: IPACertificate
"""
trusted, ca, ext_key_usage = trust_flags_to_key_policy(trust_flags)
if ca is False:
raise ValueError("must be CA certificate")
put_ca_cert(ldap, base_dn, cert, nickname, trusted, ext_key_usage,
config_ipa, config_compat)
def get_ca_certs_nss(ldap, base_dn, compat_realm, compat_ipa_ca,
filter_subject=None):
"""
Get CA certificates and associated trust flags from the certificate store.
"""
nss_certs = []
certs = get_ca_certs(ldap, base_dn, compat_realm, compat_ipa_ca,
filter_subject=filter_subject)
for cert, nickname, trusted, ext_key_usage in certs:
trust_flags = key_policy_to_trust_flags(trusted, True, ext_key_usage)
nss_certs.append((cert, nickname, trust_flags))
return nss_certs
def get_ca_subject(ldap, container_ca, base_dn):
"""
Look for the IPA CA certificate subject.
"""
dn = DN(('cn', IPA_CA_CN), container_ca, base_dn)
try:
cacert_subject = ldap.get_entry(dn)['ipacasubjectdn'][0]
except errors.NotFound:
# if the entry doesn't exist, we are dealing with a pre-v4.4
# installation, where the default CA subject was always based
# on the subject_base.
attrs = ldap.get_ipa_config()
subject_base = attrs.get('ipacertificatesubjectbase')[0]
cacert_subject = DN(('CN', 'Certificate Authority'), subject_base)
return cacert_subject
| 14,564
|
Python
|
.py
| 340
| 33.888235
| 80
| 0.617389
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,684
|
rpcserver.py
|
freeipa_freeipa/ipaserver/rpcserver.py
|
# Authors:
# Jason Gerard DeRose <jderose@redhat.com>
#
# Copyright (C) 2008-2016 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
RPC server.
Also see the `ipalib.rpc` module.
"""
from __future__ import absolute_import
import logging
from xml.sax.saxutils import escape
import os
import time
import traceback
from io import BytesIO
from sys import version_info
from urllib.parse import parse_qs
from xmlrpc.client import Fault
import gssapi
import requests
import ldap.controls
from pyasn1.type import univ, namedtype
from pyasn1.codec.ber import encoder
import six
from ipalib import plugable, errors
from ipalib.capabilities import VERSION_WITHOUT_CAPABILITIES
from ipalib.frontend import Local
from ipalib.install.kinit import kinit_armor, kinit_password
from ipalib.backend import Executioner
from ipalib.errors import (
PublicError, InternalError, JSONError,
CCacheError, RefererError, InvalidSessionPassword, NotFound, ACIError,
ExecutionError, PasswordExpired, KrbPrincipalExpired, KrbPrincipalWrongFAST,
UserLocked)
from ipalib.request import context, destroy_context
from ipalib.rpc import xml_dumps, xml_loads
from ipalib.ipajson import json_encode_binary, json_decode_binary
from ipapython.dn import DN
from ipaserver.plugins.ldap2 import ldap2
from ipalib.backend import Backend
from ipalib.krb_utils import (
get_credentials_if_valid)
from ipapython import kerberos
from ipapython import ipautil
from ipaplatform.paths import paths
from ipapython.version import VERSION
from ipalib.text import _
from base64 import b64decode, b64encode
from requests.auth import AuthBase
if six.PY3:
unicode = str
# time.perf_counter_ns appeared in Python 3.7.
if version_info < (3, 7):
time.perf_counter_ns = lambda: int(time.perf_counter() * 10**9)
logger = logging.getLogger(__name__)
HTTP_STATUS_SUCCESS = '200 Success'
HTTP_STATUS_SERVER_ERROR = '500 Internal Server Error'
HTTP_STATUS_SERVICE_UNAVAILABLE = "503 Service Unavailable"
_not_found_template = """<html>
<head>
<title>404 Not Found</title>
</head>
<body>
<h1>Not Found</h1>
<p>
The requested URL <strong>%(url)s</strong> was not found on this server.
</p>
</body>
</html>"""
_bad_request_template = """<html>
<head>
<title>400 Bad Request</title>
</head>
<body>
<h1>Bad Request</h1>
<p>
<strong>%(message)s</strong>
</p>
</body>
</html>"""
_internal_error_template = """<html>
<head>
<title>500 Internal Server Error</title>
</head>
<body>
<h1>Internal Server Error</h1>
<p>
<strong>%(message)s</strong>
</p>
</body>
</html>"""
_unauthorized_template = """<html>
<head>
<title>401 Unauthorized</title>
</head>
<body>
<h1>Invalid Authentication</h1>
<p>
<strong>%(message)s</strong>
</p>
</body>
</html>"""
_service_unavailable_template = """<html>
<head>
<title>503 Service Unavailable</title>
</head>
<body>
<h1>Service Unavailable</h1>
<p>
<strong>%(message)s</strong>
</p>
</body>
</html>"""
_success_template = """<html>
<head>
<title>200 Success</title>
</head>
<body>
<h1>%(title)s</h1>
<p>
<strong>%(message)s</strong>
</p>
</body>
</html>"""
class HTTP_Status(plugable.Plugin):
def check_referer(self, environ):
if "HTTP_REFERER" not in environ:
logger.error("Rejecting request with missing Referer")
return False
if (not environ["HTTP_REFERER"].startswith(
"https://%s/ipa" % self.api.env.host)
and not self.env.in_tree):
logger.error("Rejecting request with bad Referer %s",
environ["HTTP_REFERER"])
return False
logger.debug("Valid Referer %s", environ["HTTP_REFERER"])
return True
def not_found(self, environ, start_response, url, message):
"""
Return a 404 Not Found error.
"""
status = '404 Not Found'
response_headers = [('Content-Type', 'text/html; charset=utf-8')]
logger.info('%s: URL="%s", %s', status, url, message)
start_response(status, response_headers)
output = _not_found_template % dict(url=escape(url))
return [output.encode('utf-8')]
def bad_request(self, environ, start_response, message):
"""
Return a 400 Bad Request error.
"""
status = '400 Bad Request'
response_headers = [('Content-Type', 'text/html; charset=utf-8')]
logger.info('%s: %s', status, message)
start_response(status, response_headers)
output = _bad_request_template % dict(message=escape(message))
return [output.encode('utf-8')]
def internal_error(self, environ, start_response, message):
"""
Return a 500 Internal Server Error.
"""
status = HTTP_STATUS_SERVER_ERROR
response_headers = [('Content-Type', 'text/html; charset=utf-8')]
logger.error('%s: %s', status, message)
start_response(status, response_headers)
output = _internal_error_template % dict(message=escape(message))
return [output.encode('utf-8')]
def unauthorized(self, environ, start_response, message, reason):
"""
Return a 401 Unauthorized error.
"""
status = '401 Unauthorized'
response_headers = [('Content-Type', 'text/html; charset=utf-8')]
if reason:
response_headers.append(('X-IPA-Rejection-Reason', reason))
logger.info('%s: %s', status, message)
start_response(status, response_headers)
output = _unauthorized_template % dict(message=escape(message))
return [output.encode('utf-8')]
def service_unavailable(self, environ, start_response, message):
"""
Return a 503 Service Unavailable
"""
status = HTTP_STATUS_SERVICE_UNAVAILABLE
response_headers = [('Content-Type', 'text/html; charset=utf-8')]
logger.error('%s: %s', status, message)
start_response(status, response_headers)
output = _service_unavailable_template % dict(message=escape(message))
return [output.encode('utf-8')]
def read_input(environ):
"""
Read the request body from environ['wsgi.input'].
"""
try:
length = int(environ.get('CONTENT_LENGTH'))
except (ValueError, TypeError):
return None
return environ['wsgi.input'].read(length).decode('utf-8')
def params_2_args_options(params):
if len(params) == 0:
return (tuple(), dict())
if len(params) == 1:
return (params[0], dict())
return (params[0], params[1])
def nicify_query(query, encoding='utf-8'):
if not query:
return
for (key, value) in query.items():
if len(value) == 0:
yield (key, None)
elif len(value) == 1:
yield (key, value[0].decode(encoding))
else:
yield (key, tuple(v.decode(encoding) for v in value))
def extract_query(environ):
"""
Return the query as a ``dict``, or ``None`` if no query is presest.
"""
qstr = None
if environ['REQUEST_METHOD'] == 'POST':
if environ['CONTENT_TYPE'] == 'application/x-www-form-urlencoded':
qstr = read_input(environ)
elif environ['REQUEST_METHOD'] == 'GET':
qstr = environ['QUERY_STRING']
if qstr:
query = dict(nicify_query(parse_qs(qstr))) # keep_blank_values=True)
else:
query = {}
environ['wsgi.query'] = query
return query
class wsgi_dispatch(Executioner, HTTP_Status):
"""
WSGI routing middleware and entry point into IPA server.
The `wsgi_dispatch` plugin is the entry point into the IPA server.
It dispatchs the request to the appropriate wsgi application
handler which is specific to the authentication and RPC mechanism.
"""
def __init__(self, api):
super(wsgi_dispatch, self).__init__(api)
self.__apps = {}
def __iter__(self):
for key in sorted(self.__apps):
yield key
def __getitem__(self, key):
return self.__apps[key]
def __contains__(self, key):
return key in self.__apps
def __call__(self, environ, start_response):
logger.debug('WSGI wsgi_dispatch.__call__:')
try:
return self.route(environ, start_response)
finally:
destroy_context()
def _on_finalize(self):
self.url = self.env['mount_ipa']
super(wsgi_dispatch, self)._on_finalize()
def route(self, environ, start_response):
key = environ.get('PATH_INFO')
if key in self.__apps:
app = self.__apps[key]
return app(environ, start_response)
url = environ['SCRIPT_NAME'] + environ['PATH_INFO']
return self.not_found(environ, start_response, url,
'URL fragment "%s" does not have a handler' % (key))
def mount(self, app, key):
"""
Mount the WSGI application *app* at *key*.
"""
# if self.__islocked__():
# raise Exception('%s.mount(): locked, cannot mount %r at %r' % (
# self.name, app, key)
# )
if key in self.__apps:
raise Exception('%s.mount(): cannot replace %r with %r at %r' % (
self.name, self.__apps[key], app, key)
)
logger.debug('Mounting %r at %r', app, key)
self.__apps[key] = app
class WSGIExecutioner(Executioner):
"""
Base class for execution backends with a WSGI application interface.
"""
headers = None
content_type = None
key = ''
_system_commands = {}
def _on_finalize(self):
self.url = self.env.mount_ipa + self.key
super(WSGIExecutioner, self)._on_finalize()
if 'wsgi_dispatch' in self.api.Backend:
self.api.Backend.wsgi_dispatch.mount(self, self.key)
def _get_command(self, name):
try:
# assume version 1 for unversioned command calls
command = self.api.Command[name, '1']
except KeyError:
try:
command = self.api.Command[name]
except KeyError:
command = None
if command is None or isinstance(command, Local):
raise errors.CommandError(name=name)
return command
def wsgi_execute(self, environ):
result = None
error = None
_id = None
name = None
args = ()
options = {}
command = None
e = None
if 'HTTP_REFERER' not in environ:
return self.marshal(result, RefererError(referer='missing'), _id)
if not environ['HTTP_REFERER'].startswith('https://%s/ipa' % self.api.env.host) and not self.env.in_tree:
return self.marshal(result, RefererError(referer=environ['HTTP_REFERER']), _id)
if self.api.env.debug:
time_start = time.perf_counter_ns()
try:
if 'KRB5CCNAME' in environ:
setattr(context, "ccache_name", environ['KRB5CCNAME'])
if ('HTTP_ACCEPT_LANGUAGE' in environ):
lang_reg_w_q = environ['HTTP_ACCEPT_LANGUAGE'].split(',')[0]
lang_reg = lang_reg_w_q.split(';')[0]
lang = lang_reg.split('-')[0]
setattr(context, "languages", [lang])
if (
environ.get('CONTENT_TYPE', '').startswith(self.content_type)
and environ['REQUEST_METHOD'] == 'POST'
):
data = read_input(environ)
(name, args, options, _id) = self.unmarshal(data)
else:
(name, args, options, _id) = self.simple_unmarshal(environ)
if name in self._system_commands:
result = self._system_commands[name](self, *args, **options)
else:
command = self._get_command(name)
result = command(*args, **options)
except PublicError as e:
if self.api.env.debug:
logger.debug('WSGI wsgi_execute PublicError: %s',
traceback.format_exc())
error = e
except Exception as e:
logger.exception(
'non-public: %s: %s', e.__class__.__name__, str(e)
)
error = InternalError()
finally:
if hasattr(context, "languages"):
delattr(context, "languages")
principal = getattr(context, 'principal', 'UNKNOWN')
if command is not None:
try:
params = command.args_options_2_params(*args, **options)
except Exception as e:
if self.api.env.debug:
time_end = time.perf_counter_ns()
logger.info(
'exception %s caught when converting options: %s',
e.__class__.__name__, str(e)
)
# get at least some context of what is going on
params = options
error = e
else:
if self.api.env.debug:
time_end = time.perf_counter_ns()
if error:
result_string = type(error).__name__
else:
result_string = 'SUCCESS'
logger.info('[%s] %s: %s(%s): %s',
type(self).__name__,
principal,
name,
', '.join(command._repr_iter(**params)),
result_string)
if self.api.env.debug:
logger.debug('[%s] %s: %s(%s): %s %s',
type(self).__name__,
principal,
name,
', '.join(command._repr_iter(**params)),
result_string,
'etime=' + str(time_end - time_start))
else:
logger.info('[%s] %s: %s: %s',
type(self).__name__,
principal,
name,
type(error).__name__)
version = options.get('version', VERSION_WITHOUT_CAPABILITIES)
return self.marshal(result, error, _id, version)
def simple_unmarshal(self, environ):
name = environ['PATH_INFO'].strip('/')
options = extract_query(environ)
return (name, tuple(), options, None)
def __call__(self, environ, start_response):
"""
WSGI application for execution.
"""
logger.debug('WSGI WSGIExecutioner.__call__:')
try:
status = HTTP_STATUS_SUCCESS
response = self.wsgi_execute(environ)
if self.headers:
headers = self.headers
else:
headers = [('Content-Type',
self.content_type + '; charset=utf-8')]
except Exception:
logger.exception('WSGI %s.__call__():', self.name)
status = HTTP_STATUS_SERVER_ERROR
response = status.encode('utf-8')
headers = [('Content-Type', 'text/plain; charset=utf-8')]
logout_cookie = getattr(context, 'logout_cookie', None)
if logout_cookie is not None:
headers.append(('IPASESSION', logout_cookie))
start_response(status, headers)
return [response]
def unmarshal(self, data):
raise NotImplementedError('%s.unmarshal()' % type(self).__name__)
def marshal(self, result, error, _id=None,
version=VERSION_WITHOUT_CAPABILITIES):
raise NotImplementedError('%s.marshal()' % type(self).__name__)
class jsonserver(WSGIExecutioner, HTTP_Status):
"""
JSON RPC server.
For information on the JSON-RPC spec, see:
http://json-rpc.org/wiki/specification
"""
content_type = 'application/json'
def __call__(self, environ, start_response):
'''
'''
logger.debug('WSGI jsonserver.__call__:')
response = super(jsonserver, self).__call__(environ, start_response)
return response
def marshal(self, result, error, _id=None,
version=VERSION_WITHOUT_CAPABILITIES):
if error:
assert isinstance(error, PublicError)
error = dict(
code=error.errno,
message=error.strerror,
data=error.kw,
name=unicode(error.__class__.__name__),
)
principal = getattr(context, 'principal', 'UNKNOWN')
response = dict(
result=result,
error=error,
id=_id,
principal=unicode(principal),
version=unicode(VERSION),
)
dump = json_encode_binary(
response, version, pretty_print=self.api.env.debug
)
return dump.encode('utf-8')
def unmarshal(self, data):
try:
d = json_decode_binary(data)
except ValueError as e:
raise JSONError(error=e)
if not isinstance(d, dict):
raise JSONError(error=_('Request must be a dict'))
if 'method' not in d:
raise JSONError(error=_('Request is missing "method"'))
if 'params' not in d:
raise JSONError(error=_('Request is missing "params"'))
method = d['method']
params = d['params']
_id = d.get('id')
if not isinstance(params, (list, tuple)):
raise JSONError(error=_('params must be a list'))
if len(params) != 2:
raise JSONError(error=_('params must contain [args, options]'))
args = params[0]
if not isinstance(args, (list, tuple)):
raise JSONError(error=_('params[0] (aka args) must be a list'))
options = params[1]
if not isinstance(options, dict):
raise JSONError(error=_('params[1] (aka options) must be a dict'))
options = dict((str(k), v) for (k, v) in options.items())
return (method, args, options, _id)
class NegotiateAuth(AuthBase):
"""Negotiate Augh using python GSSAPI"""
def __init__(self, target_host, ccache_name=None):
self.context = None
self.target_host = target_host
self.ccache_name = ccache_name
def __call__(self, request):
self.initial_step(request)
request.register_hook('response', self.handle_response)
return request
def deregister(self, response):
response.request.deregister_hook('response', self.handle_response)
def _get_negotiate_token(self, response):
token = None
if response is not None:
h = response.headers.get('www-authenticate', '')
if h.startswith('Negotiate'):
val = h[h.find('Negotiate') + len('Negotiate'):].strip()
if len(val) > 0:
token = b64decode(val)
return token
def _set_authz_header(self, request, token):
request.headers['Authorization'] = (
'Negotiate {}'.format(b64encode(token).decode('utf-8')))
def initial_step(self, request, response=None):
if self.context is None:
store = {'ccache': self.ccache_name}
creds = gssapi.Credentials(usage='initiate', store=store)
name = gssapi.Name('HTTP@{0}'.format(self.target_host),
name_type=gssapi.NameType.hostbased_service)
self.context = gssapi.SecurityContext(creds=creds, name=name,
usage='initiate')
in_token = self._get_negotiate_token(response)
out_token = self.context.step(in_token)
self._set_authz_header(request, out_token)
def handle_response(self, response, **kwargs):
status = response.status_code
if status >= 400 and status != 401:
return response
in_token = self._get_negotiate_token(response)
if in_token is not None:
out_token = self.context.step(in_token)
if self.context.complete:
return response
elif not out_token:
return response
self._set_authz_header(response.request, out_token)
# use response so we can make another request
_ = response.content # pylint: disable=unused-variable
response.raw.release_conn()
newresp = response.connection.send(response.request, **kwargs)
newresp.history.append(response)
return self.handle_response(newresp, **kwargs)
return response
class KerberosSession(HTTP_Status):
'''
Functionally shared by all RPC handlers using both sessions and
Kerberos. This class must be implemented as a mixin class rather
than the more obvious technique of subclassing because the classes
needing this do not share a common base class.
'''
def need_login(self, start_response):
status = '401 Unauthorized'
headers = []
response = b''
logout_cookie = getattr(context, 'logout_cookie', None)
if logout_cookie is not None:
headers.append(('IPASESSION', logout_cookie))
logger.debug('%s need login', status)
start_response(status, headers)
return [response]
def get_environ_creds(self, environ):
# If we have a ccache ...
ccache_name = environ.get('KRB5CCNAME')
if ccache_name is None:
logger.debug('no ccache, need login')
return None
# ... make sure we have a name ...
principal = environ.get('GSS_NAME')
if principal is None:
logger.debug('no Principal Name, need login')
return None
# ... and use it to resolve the ccache name (Issue: 6972 )
gss_name = gssapi.Name(principal, gssapi.NameType.kerberos_principal)
# Fail if Kerberos credentials are expired or missing
creds = get_credentials_if_valid(name=gss_name,
ccache_name=ccache_name)
if not creds:
setattr(context, 'logout_cookie', 'MagBearerToken=')
logger.debug(
'ccache expired or invalid, deleting session, need login')
return None
return ccache_name
def finalize_kerberos_acquisition(self, who, ccache_name, environ, start_response, headers=None):
if headers is None:
headers = []
# Connect back to ourselves to get mod_auth_gssapi to
# generate a cookie for us.
try:
target = self.api.env.host
# pylint: disable-next=missing-timeout
r = requests.get('http://{0}/ipa/session/cookie'.format(target),
auth=NegotiateAuth(target, ccache_name),
verify=paths.IPA_CA_CRT)
session_cookie = r.cookies.get("ipa_session")
if not session_cookie:
raise ValueError('No session cookie found')
except Exception as e:
return self.unauthorized(environ, start_response,
str(e),
'Authentication failed')
headers.append(('IPASESSION', session_cookie))
start_response(HTTP_STATUS_SUCCESS, headers)
return [b'']
class KerberosWSGIExecutioner(WSGIExecutioner, KerberosSession):
"""Base class for xmlserver and jsonserver_kerb
"""
def _on_finalize(self):
super(KerberosWSGIExecutioner, self)._on_finalize()
def __call__(self, environ, start_response):
logger.debug('KerberosWSGIExecutioner.__call__:')
user_ccache=environ.get('KRB5CCNAME')
object.__setattr__(
self, 'headers',
[('Content-Type', '%s; charset=utf-8' % self.content_type)]
)
if user_ccache is None:
status = HTTP_STATUS_SERVER_ERROR
logger.error(
'%s: %s', status,
'KerberosWSGIExecutioner.__call__: '
'KRB5CCNAME not defined in HTTP request environment')
return self.marshal(None, CCacheError())
try:
self.create_context(ccache=user_ccache)
response = super(KerberosWSGIExecutioner, self).__call__(
environ, start_response)
except PublicError as e:
status = HTTP_STATUS_SUCCESS
response = status.encode('utf-8')
start_response(status, self.headers)
return [self.marshal(None, e)]
finally:
destroy_context()
return response
class xmlserver(KerberosWSGIExecutioner):
"""
Execution backend plugin for XML-RPC server.
Also see the `ipalib.rpc.xmlclient` plugin.
"""
content_type = 'text/xml'
key = '/xml'
def listMethods(self, *params):
"""list methods for XML-RPC introspection"""
if params:
raise errors.ZeroArgumentError(name='system.listMethods')
return (tuple(unicode(cmd.name) for cmd in self.api.Command) +
tuple(unicode(name) for name in self._system_commands))
def _get_method_name(self, name, *params):
"""Get a method name for XML-RPC introspection commands"""
if not params:
raise errors.RequirementError(name='method name')
elif len(params) > 1:
raise errors.MaxArgumentError(name=name, count=1)
[method_name] = params
return method_name
def methodSignature(self, *params):
"""get method signature for XML-RPC introspection"""
method_name = self._get_method_name('system.methodSignature', *params)
if method_name in self._system_commands:
# TODO
# for now let's not go out of our way to document standard XML-RPC
return u'undef'
else:
self._get_command(method_name)
# All IPA commands return a dict (struct),
# and take a params, options - list and dict (array, struct)
return [[u'struct', u'array', u'struct']]
def methodHelp(self, *params):
"""get method docstring for XML-RPC introspection"""
method_name = self._get_method_name('system.methodHelp', *params)
if method_name in self._system_commands:
return u''
else:
command = self._get_command(method_name)
return unicode(command.doc or '')
_system_commands = {
'system.listMethods': listMethods,
'system.methodSignature': methodSignature,
'system.methodHelp': methodHelp,
}
def unmarshal(self, data):
(params, name) = xml_loads(data)
if name in self._system_commands:
# For XML-RPC introspection, return params directly
return (name, params, {}, None)
(args, options) = params_2_args_options(params)
if 'version' not in options:
# Keep backwards compatibility with client containing
# bug https://fedorahosted.org/freeipa/ticket/3294:
# If `version` is not given in XML-RPC, assume an old version
options['version'] = VERSION_WITHOUT_CAPABILITIES
return (name, args, options, None)
def marshal(self, result, error, _id=None,
version=VERSION_WITHOUT_CAPABILITIES):
if error:
logger.debug('response: %s: %s',
error.__class__.__name__, str(error))
response = Fault(error.errno, error.strerror)
else:
if isinstance(result, dict):
logger.debug('response: entries returned %d',
result.get('count', 1))
response = (result,)
dump = xml_dumps(response, version, methodresponse=True)
return dump.encode('utf-8')
class jsonserver_i18n_messages(jsonserver):
"""
JSON RPC server for i18n messages only.
"""
key = '/i18n_messages'
def not_allowed(self, start_response):
status = '405 Method Not Allowed'
headers = [('Allow', 'POST')]
response = b''
logger.debug('jsonserver_i18n_messages: %s', status)
start_response(status, headers)
return [response]
def forbidden(self, start_response):
status = '403 Forbidden'
headers = []
response = b'Invalid RPC command'
logger.debug('jsonserver_i18n_messages: %s', status)
start_response(status, headers)
return [response]
def __call__(self, environ, start_response):
logger.debug('WSGI jsonserver_i18n_messages.__call__:')
if environ['REQUEST_METHOD'] != 'POST':
return self.not_allowed(start_response)
data = read_input(environ)
unmarshal_data = super(jsonserver_i18n_messages, self
).unmarshal(data)
name = unmarshal_data[0] if unmarshal_data else ''
if name != 'i18n_messages':
return self.forbidden(start_response)
environ['wsgi.input'] = BytesIO(data.encode('utf-8'))
response = super(jsonserver_i18n_messages, self
).__call__(environ, start_response)
return response
class jsonserver_session(jsonserver, KerberosSession):
"""
JSON RPC server protected with session auth.
"""
key = '/session/json'
def __init__(self, api):
super(jsonserver_session, self).__init__(api)
def _on_finalize(self):
super(jsonserver_session, self)._on_finalize()
def __call__(self, environ, start_response):
'''
'''
logger.debug('WSGI jsonserver_session.__call__:')
if not self.check_referer(environ):
return self.bad_request(environ, start_response, 'denied')
# Redirect to login if no Kerberos credentials
ccache_name = self.get_environ_creds(environ)
if ccache_name is None:
return self.need_login(start_response)
# Store the ccache name in the per-thread context
setattr(context, 'ccache_name', ccache_name)
# This may fail if a ticket from wrong realm was handled via browser
try:
self.create_context(ccache=ccache_name)
except ACIError as e:
return self.unauthorized(environ, start_response, str(e), 'denied')
except errors.DatabaseError as e:
# account is disable but user has a valid ticket
msg = str(e)
if "account inactivated" in msg.lower():
return self.unauthorized(
environ, start_response, str(e), "account disabled"
)
else:
return self.service_unavailable(environ, start_response, msg)
except CCacheError:
return self.need_login(start_response)
try:
response = super(jsonserver_session, self).__call__(environ, start_response)
finally:
destroy_context()
return response
class jsonserver_kerb(jsonserver, KerberosWSGIExecutioner):
"""
JSON RPC server protected with kerberos auth.
"""
key = '/json'
class KerberosLogin(Backend, KerberosSession):
key = None
def _on_finalize(self):
super(KerberosLogin, self)._on_finalize()
self.api.Backend.wsgi_dispatch.mount(self, self.key)
def __call__(self, environ, start_response):
logger.debug('WSGI KerberosLogin.__call__:')
if not self.check_referer(environ):
return self.bad_request(environ, start_response, 'denied')
# Redirect to login if no Kerberos credentials
user_ccache_name = self.get_environ_creds(environ)
if user_ccache_name is None:
return self.need_login(start_response)
return self.finalize_kerberos_acquisition('login_kerberos', user_ccache_name, environ, start_response)
class login_kerberos(KerberosLogin):
key = '/session/login_kerberos'
class login_x509(KerberosLogin):
key = '/session/login_x509'
def __call__(self, environ, start_response):
logger.debug('WSGI login_x509.__call__:')
if not self.check_referer(environ):
return self.bad_request(environ, start_response, 'denied')
if 'KRB5CCNAME' not in environ:
return self.unauthorized(
environ, start_response, 'KRB5CCNAME not set',
'Authentication failed')
return super(login_x509, self).__call__(environ, start_response)
class login_password(Backend, KerberosSession):
content_type = 'text/plain'
key = '/session/login_password'
def _on_finalize(self):
super(login_password, self)._on_finalize()
self.api.Backend.wsgi_dispatch.mount(self, self.key)
def __call__(self, environ, start_response):
def attempt_kinit(user_principal, password,
ipa_ccache_name, use_armor=True):
try:
# try to remove in case an old file was there
os.unlink(ipa_ccache_name)
except OSError:
pass
try:
self.kinit(user_principal, password,
ipa_ccache_name, use_armor=use_armor)
except PasswordExpired as e:
return self.unauthorized(environ, start_response,
str(e), 'password-expired')
except InvalidSessionPassword as e:
return self.unauthorized(environ, start_response,
str(e), 'invalid-password')
except KrbPrincipalExpired as e:
return self.unauthorized(environ,
start_response,
str(e),
'krbprincipal-expired')
except UserLocked as e:
return self.unauthorized(environ,
start_response,
str(e),
'user-locked')
return None
logger.debug('WSGI login_password.__call__:')
if not self.check_referer(environ):
return self.bad_request(environ, start_response, 'denied')
# Get the user and password parameters from the request
content_type = environ.get('CONTENT_TYPE', '').lower()
if not content_type.startswith('application/x-www-form-urlencoded'):
return self.bad_request(environ, start_response, "Content-Type must be application/x-www-form-urlencoded")
method = environ.get('REQUEST_METHOD', '').upper()
if method == 'POST':
query_string = read_input(environ)
else:
return self.bad_request(environ, start_response, "HTTP request method must be POST")
try:
query_dict = parse_qs(query_string)
except Exception:
return self.bad_request(environ, start_response, "cannot parse query data")
user = query_dict.get('user', None)
if user is not None:
if len(user) == 1:
user = user[0]
else:
return self.bad_request(environ, start_response, "more than one user parameter")
else:
return self.bad_request(environ, start_response, "no user specified")
# allows login in the form user@SERVER_REALM or user@server_realm
# we kinit as enterprise principal so we can assume that unknown realms
# are UPN
try:
user_principal = kerberos.Principal(user)
except Exception:
# the principal is malformed in some way (e.g. user@REALM1@REALM2)
# netbios names (NetBIOS1\user) are also not accepted (yet)
return self.unauthorized(environ, start_response, '', 'denied')
password = query_dict.get('password', None)
if password is not None:
if len(password) == 1:
password = password[0]
else:
return self.bad_request(environ, start_response, "more than one password parameter")
else:
return self.bad_request(environ, start_response, "no password specified")
# Get the ccache we'll use and attempt to get credentials in it with user,password
ipa_ccache_name = os.path.join(paths.IPA_CCACHES,
'kinit_{}'.format(os.getpid()))
try:
result = attempt_kinit(user_principal, password,
ipa_ccache_name, use_armor=True)
except KrbPrincipalWrongFAST:
result = attempt_kinit(user_principal, password,
ipa_ccache_name, use_armor=False)
if result is not None:
return result
result = self.finalize_kerberos_acquisition('login_password',
ipa_ccache_name, environ,
start_response)
try:
# Try not to litter the filesystem with unused TGTs
os.unlink(ipa_ccache_name)
except OSError:
pass
return result
def kinit(self, principal, password, ccache_name, use_armor=True):
if use_armor:
# get anonymous ccache as an armor for FAST to enable OTP auth
armor_path = os.path.join(paths.IPA_CCACHES,
"armor_{}".format(os.getpid()))
logger.debug('Obtaining armor in ccache %s', armor_path)
try:
kinit_armor(
armor_path,
pkinit_anchors=[paths.KDC_CERT, paths.KDC_CA_BUNDLE_PEM],
)
except RuntimeError:
logger.error("Failed to obtain armor cache")
# We try to continue w/o armor, 2FA will be impacted
armor_path = None
else:
armor_path = None
try:
kinit_password(
unicode(principal),
password,
ccache_name,
armor_ccache_name=armor_path,
enterprise=True,
canonicalize=True,
lifetime=self.api.env.kinit_lifetime)
except RuntimeError as e:
if ('kinit: Cannot read password while '
'getting initial credentials') in str(e):
raise PasswordExpired(principal=principal, message=unicode(e))
elif ('kinit: Client\'s entry in database'
' has expired while getting initial credentials') in str(e):
raise KrbPrincipalExpired(principal=principal,
message=unicode(e))
elif ('kinit: Clients credentials have been revoked '
'while getting initial credentials') in str(e):
raise UserLocked(principal=principal,
message=unicode(e))
elif ('kinit: Error constructing AP-REQ armor: '
'Matching credential not found') in str(e):
raise KrbPrincipalWrongFAST(principal=principal)
raise InvalidSessionPassword(principal=principal,
message=unicode(e))
finally:
if armor_path:
logger.debug('Cleanup the armor ccache')
ipautil.run([paths.KDESTROY, '-A', '-c', armor_path],
env={'KRB5CCNAME': armor_path}, raiseonerr=False)
class change_password(Backend, HTTP_Status):
content_type = 'text/plain'
key = '/session/change_password'
def _on_finalize(self):
super(change_password, self)._on_finalize()
self.api.Backend.wsgi_dispatch.mount(self, self.key)
def __call__(self, environ, start_response):
logger.info('WSGI change_password.__call__:')
if not self.check_referer(environ):
return self.bad_request(environ, start_response, 'denied')
# Get the user and password parameters from the request
content_type = environ.get('CONTENT_TYPE', '').lower()
if not content_type.startswith('application/x-www-form-urlencoded'):
return self.bad_request(environ, start_response, "Content-Type must be application/x-www-form-urlencoded")
method = environ.get('REQUEST_METHOD', '').upper()
if method == 'POST':
query_string = read_input(environ)
else:
return self.bad_request(environ, start_response, "HTTP request method must be POST")
try:
query_dict = parse_qs(query_string)
except Exception:
return self.bad_request(
environ, start_response, "cannot parse query data"
)
data = {}
for field in ('user', 'old_password', 'new_password', 'otp'):
value = query_dict.get(field, None)
if value is not None:
if len(value) == 1:
data[field] = value[0]
else:
return self.bad_request(environ, start_response, "more than one %s parameter"
% field)
elif field != 'otp': # otp is optional
return self.bad_request(environ, start_response, "no %s specified" % field)
# start building the response
logger.info("WSGI change_password: start password change of user '%s'",
data['user'])
status = HTTP_STATUS_SUCCESS
response_headers = [('Content-Type', 'text/html; charset=utf-8')]
title = 'Password change rejected'
result = 'error'
policy_error = None
bind_dn = DN((self.api.Object.user.primary_key.name, data['user']),
self.api.env.container_user, self.api.env.basedn)
try:
pw = data['old_password']
if data.get('otp'):
pw = data['old_password'] + data['otp']
conn = ldap2(self.api)
conn.connect(bind_dn=bind_dn, bind_pw=pw)
except (NotFound, ACIError):
result = 'invalid-password'
message = 'The old password or username is not correct.'
except Exception as e:
message = "Could not connect to LDAP server."
logger.error("change_password: cannot authenticate '%s' to LDAP "
"server: %s",
data['user'], str(e))
else:
try:
conn.modify_password(bind_dn, data['new_password'], data['old_password'], skip_bind=True)
except ExecutionError as e:
result = 'policy-error'
policy_error = escape(str(e))
message = "Password change was rejected: %s" % escape(str(e))
except Exception as e:
message = "Could not change the password"
logger.error("change_password: cannot change password of "
"'%s': %s",
data['user'], str(e))
else:
result = 'ok'
title = "Password change successful"
message = "Password was changed."
finally:
if conn.isconnected():
conn.disconnect()
logger.info('%s: %s', status, message)
response_headers.append(('X-IPA-Pwchange-Result', result))
if policy_error:
response_headers.append(('X-IPA-Pwchange-Policy-Error', policy_error))
start_response(status, response_headers)
output = _success_template % dict(title=str(title),
message=str(message))
return [output.encode('utf-8')]
class sync_token(Backend, HTTP_Status):
content_type = 'text/plain'
key = '/session/sync_token'
class OTPSyncRequest(univ.Sequence):
OID = "2.16.840.1.113730.3.8.10.6"
componentType = namedtype.NamedTypes(
namedtype.NamedType('firstCode', univ.OctetString()),
namedtype.NamedType('secondCode', univ.OctetString()),
namedtype.OptionalNamedType('tokenDN', univ.OctetString())
)
def _on_finalize(self):
super(sync_token, self)._on_finalize()
self.api.Backend.wsgi_dispatch.mount(self, self.key)
def __call__(self, environ, start_response):
# Make sure this is a form request.
content_type = environ.get('CONTENT_TYPE', '').lower()
if not content_type.startswith('application/x-www-form-urlencoded'):
return self.bad_request(environ, start_response, "Content-Type must be application/x-www-form-urlencoded")
# Make sure this is a POST request.
method = environ.get('REQUEST_METHOD', '').upper()
if method == 'POST':
query_string = read_input(environ)
else:
return self.bad_request(environ, start_response, "HTTP request method must be POST")
# Parse the query string to a dictionary.
try:
query_dict = parse_qs(query_string)
except Exception:
return self.bad_request(
environ, start_response, "cannot parse query data"
)
data = {}
for field in ('user', 'password', 'first_code', 'second_code', 'token'):
value = query_dict.get(field, None)
if value is not None:
if len(value) == 1:
data[field] = value[0]
else:
return self.bad_request(environ, start_response, "more than one %s parameter"
% field)
elif field != 'token':
return self.bad_request(environ, start_response, "no %s specified" % field)
# Create the request control.
sr = self.OTPSyncRequest()
sr.setComponentByName('firstCode', data['first_code'])
sr.setComponentByName('secondCode', data['second_code'])
if 'token' in data:
try:
token_dn = DN(data['token'])
except ValueError:
token_dn = DN((self.api.Object.otptoken.primary_key.name, data['token']),
self.api.env.container_otp, self.api.env.basedn)
sr.setComponentByName('tokenDN', str(token_dn))
rc = ldap.controls.RequestControl(sr.OID, True, encoder.encode(sr))
# Resolve the user DN
bind_dn = DN((self.api.Object.user.primary_key.name, data['user']),
self.api.env.container_user, self.api.env.basedn)
# Start building the response.
status = HTTP_STATUS_SUCCESS
response_headers = [('Content-Type', 'text/html; charset=utf-8')]
title = 'Token sync rejected'
# Perform the synchronization.
conn = ldap2(self.api)
try:
conn.connect(bind_dn=bind_dn,
bind_pw=data['password'],
serverctrls=[rc,])
result = 'ok'
title = "Token sync successful"
message = "Token was synchronized."
except (NotFound, ACIError):
result = 'invalid-credentials'
message = 'The username, password or token codes are not correct.'
except Exception as e:
result = 'error'
message = "Could not connect to LDAP server."
logger.error("token_sync: cannot authenticate '%s' to LDAP "
"server: %s",
data['user'], str(e))
finally:
if conn.isconnected():
conn.disconnect()
# Report status and return.
response_headers.append(('X-IPA-TokenSync-Result', result))
start_response(status, response_headers)
output = _success_template % dict(title=str(title),
message=str(message))
return [output.encode('utf-8')]
class xmlserver_session(xmlserver, KerberosSession):
"""
XML RPC server protected with session auth.
"""
key = '/session/xml'
def __init__(self, api):
super(xmlserver_session, self).__init__(api)
def _on_finalize(self):
super(xmlserver_session, self)._on_finalize()
def need_login(self, start_response):
status = '401 Unauthorized'
headers = []
response = b''
logger.debug('xmlserver_session: %s need login', status)
start_response(status, headers)
return [response]
def __call__(self, environ, start_response):
'''
'''
logger.debug('WSGI xmlserver_session.__call__:')
if not self.check_referer(environ):
return self.bad_request(environ, start_response, 'denied')
ccache_name = environ.get('KRB5CCNAME')
# Redirect to /ipa/xml if no Kerberos credentials
if ccache_name is None:
logger.debug('xmlserver_session.__call_: no ccache, need TGT')
return self.need_login(start_response)
# Redirect to /ipa/xml if Kerberos credentials are expired
creds = get_credentials_if_valid(ccache_name=ccache_name)
if not creds:
logger.debug('xmlserver_session.__call_: ccache expired, deleting '
'session, need login')
# The request is finished with the ccache, destroy it.
return self.need_login(start_response)
# Store the session data in the per-thread context
setattr(context, 'ccache_name', ccache_name)
try:
response = super(xmlserver_session, self).__call__(environ, start_response)
finally:
destroy_context()
return response
| 50,449
|
Python
|
.py
| 1,184
| 31.842061
| 118
| 0.586305
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,685
|
setup.py
|
freeipa_freeipa/ipaserver/setup.py
|
# Authors:
# Jason Gerard DeRose <jderose@redhat.com>
#
# Copyright (C) 2008 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Python-level packaging using setuptools
"""
from os.path import abspath, dirname
import sys
if __name__ == '__main__':
# include ../ for ipasetup.py
sys.path.append(dirname(dirname(abspath(__file__))))
from ipasetup import ipasetup # noqa: E402
ipasetup(
name='ipaserver',
doc=__doc__,
package_dir={'ipaserver': ''},
packages=[
'ipaserver',
'ipaserver.advise',
'ipaserver.advise.plugins',
'ipaserver.custodia',
'ipaserver.custodia.httpd',
'ipaserver.custodia.message',
'ipaserver.custodia.server',
'ipaserver.dnssec',
'ipaserver.plugins',
'ipaserver.secrets',
'ipaserver.secrets.handlers',
'ipaserver.install',
'ipaserver.install.plugins',
'ipaserver.install.server',
],
install_requires=[
"cryptography",
"dbus-python",
"dnspython",
# dogtag-pki is just the client package on PyPI. ipaserver
# requires the full pki package.
# "dogtag-pki",
"ipaclient",
"ipalib",
"ipaplatform",
"ipapython",
"jwcrypto",
"lxml",
"netaddr",
"psutil",
"pyasn1",
"requests",
"six",
"python-augeas",
"python-ldap",
],
entry_points={
'ipaserver.custodia.authenticators': [
('SimpleCredsAuth = '
'ipaserver.custodia.httpd.authenticators:SimpleCredsAuth'),
('SimpleHeaderAuth = '
'custodia.httpd.authenticators:SimpleHeaderAuth'),
],
'ipaserver.custodia.authorizers': [
'IPAKEMKeys = ipaserver.secrets.kem:IPAKEMKeys',
],
'ipaserver.custodia.consumers': [
'Secrets = ipaserver.custodia.secrets:Secrets',
'Root = ipaserver.custodia.root:Root',
],
'ipaserver.custodia.stores': [
'IPASecStore = ipaserver.secrets.store:IPASecStore',
],
},
extras_require={
# These packages are currently not available on PyPI.
"dcerpc": ["samba", "pysss", "pysss_nss_idmap"],
"hbactest": ["pyhbac"],
"install": ["SSSDConfig"],
"trust": ["pysss_murmur", "pysss_nss_idmap"],
}
)
| 3,311
|
Python
|
.py
| 94
| 25.776596
| 76
| 0.57423
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,686
|
servroles.py
|
freeipa_freeipa/ipaserver/servroles.py
|
#
# Copyright (C) 2016 FreeIPA Contributors see COPYING for license
#
"""
This module contains the set of classes which abstract various bits and pieces
of information present in the LDAP tree about functionalities such as DNS
server, Active Directory trust controller etc. These properties come in two
distinct groups:
server roles
this group represents a genral functionality provided by one or more
IPA servers, such as DNS server, certificate authority and such. In
this case there is a many-to-many mapping between the roles and the
masters which provide them.
server attributes
these represent a functionality associated with the whole topology,
such as CA renewal master or DNSSec key master.
See the corresponding design page (http://www.freeipa.org/page/V4/Server_Roles)
for more info.
Both of these groups use `LDAPBasedProperty` class as a base.
Server Roles
============
Server role objects are usually consuming information from the master's service
container (cn=FQDN,cn=masters,cn=ipa,cn=etc,$SUFFIX) are represented by
`ServiceBasedRole`class. To create an instance of such role, you only need to
specify role name and individual services comprising the role (more systemd
services may be enabled to provide some function):
>>> example_role = ServiceBasedRole(
... "Example Role",
... component_services = ['SERVICE1', 'SERVICE2'])
>>> example_role.name
'Example Role'
The role object can then be queried for the status of the role in the whole
topology or on a single master by using its `status` method. This method
returns a list of dictionaries akin to LDAP entries comprised from server name,
role name and role status (enabled if role is enabled, configured if the
service entries are present but not marked as enabled by 'enabledService'
config string, absent if the service entries are not present).
Note that 'AD trust agent' role is based on membership of the master in the
'adtrust agents' sysaccount group and is thus an instance of different class
(`ADTrustBasedRole`). This role also does not have 'configured' status, since
the master is either member of the group ('enabled') or not ('absent')
Server Attributes
=================
Server attributes are implemented as instances of `ServerAttribute` class. The
attribute is defined by some flag set on 'ipaConfigString' attribute of some
service entry. To create your own server attribute, see the following example:
>>> example_attribute = ServerAttribute("Example Attribute", example_role,
... "SERVICE1", "roleMaster")
>>> example_attribute.name
'Example Attribute'
The FQDN of master with the attribute set can be requested using `get()`
method. The attribute master can be changed by the `set()` method
which accepts FQDN of a new master hosting the attribute.
The available role/attribute instances are stored in
`role_instances`/`attribute_instances` tuples.
"""
import abc
from collections import namedtuple, defaultdict
from ldap import SCOPE_ONELEVEL
import six
from ipalib import _, errors
from ipapython.dn import DN
from ipaserver.masters import ENABLED_SERVICE, HIDDEN_SERVICE
if six.PY3:
unicode = str
ENABLED = u'enabled'
CONFIGURED = u'configured'
HIDDEN = u'hidden'
ABSENT = u'absent'
@six.add_metaclass(abc.ABCMeta)
class LDAPBasedProperty:
"""
base class for all master properties defined by LDAP content
:param attr_name: attribute name
:param name: user-friendly name of the property
:param attrs_list: list of attributes to retrieve during search, defaults
to all
"""
def __init__(self, attr_name, name):
self.attr_name = attr_name
self.name = name
# for hidden services, insert hidden before '_server' suffix
if attr_name.endswith(u'_server'):
parts = attr_name.rsplit(u'_', 1)
self.attr_name_hidden = u'{}_hidden_server'.format(parts[0])
else:
self.attr_name_hidden = None
@six.add_metaclass(abc.ABCMeta)
class BaseServerRole(LDAPBasedProperty):
"""
Server role hierarchy apex. All other server role definition should either
inherit from it or at least provide the 'status' method for querying role
status
property
"""
def create_role_status_dict(self, server, status):
"""
the output of `status()` method should be a list of dictionaries having
the following keys:
* role_servrole: name of role
* server_server: server FQDN
* status: role status on server
this methods returns such a dict given server and role status
"""
return {
u'role_servrole': self.name,
u'server_server': server,
u'status': status}
@abc.abstractmethod
def create_search_params(self, ldap, api_instance, server=None):
"""
create search base and filter
:param ldap: ldap connection
:param api_instance: API instance
:param server: server FQDN. if given, the method should generate
filter and search base matching only the status on this server
:returns: tuple of search base (a DN) and search filter
"""
@abc.abstractmethod
def get_result_from_entries(self, entries):
"""
Get role status from returned LDAP entries
:param entries: LDAPEntry objects returned by `search()`
:returns: list of dicts generated by `create_role_status_dict()`
method
"""
def _fill_in_absent_masters(self, ldap2, api_instance, result):
"""
get all masters on which the role is absent
:param ldap2: LDAP connection
:param api_instance: API instance
:param result: output of `get_result_from_entries` method
:returns: list of masters on which the role is absent
"""
search_base = DN(api_instance.env.container_masters,
api_instance.env.basedn)
search_filter = '(objectclass=ipaConfigObject)'
attrs_list = ['cn']
all_masters = ldap2.get_entries(
search_base,
filter=search_filter,
scope=SCOPE_ONELEVEL,
attrs_list=attrs_list)
all_master_cns = set(m['cn'][0] for m in all_masters)
enabled_configured_masters = set(r[u'server_server'] for r in result)
absent_masters = all_master_cns.difference(enabled_configured_masters)
return [self.create_role_status_dict(m, ABSENT) for m in
absent_masters]
def status(self, api_instance, server=None, attrs_list=("*",)):
"""
probe and return status of the role either on single server or on the
whole topology
:param api_instance: API instance
:param server: server FQDN. If given, only the status of the role on
this master will be returned
:returns: * 'enabled' if the role is enabled on the master
* 'configured' if it is not enabled but has
been configured by installer
* 'hidden' if the role is not advertised
* 'absent' otherwise
"""
ldap2 = api_instance.Backend.ldap2
search_base, search_filter = self.create_search_params(
ldap2, api_instance, server=server)
try:
entries = ldap2.get_entries(
search_base,
filter=search_filter,
attrs_list=attrs_list)
except errors.EmptyResult:
entries = []
if not entries and server is not None:
return [self.create_role_status_dict(server, ABSENT)]
result = self.get_result_from_entries(entries)
if server is None:
result.extend(
self._fill_in_absent_masters(ldap2, api_instance, result))
return sorted(result, key=lambda x: x[u'server_server'])
class ServerAttribute(LDAPBasedProperty):
"""
Class from which server attributes should be instantiated
:param associated_role_name: name of a role which must be enabled
on the provider
:param associated_service_name: name of LDAP service on which the
attribute is set. Does not need to belong to the service entries
of associate role
:param ipa_config_string_value: value of `ipaConfigString` attribute
associated with the presence of server attribute
"""
def __init__(self, attr_name, name, associated_role_name,
associated_service_name,
ipa_config_string_value):
super(ServerAttribute, self).__init__(attr_name, name)
self.associated_role_name = associated_role_name
self.associated_service_name = associated_service_name
self.ipa_config_string_value = ipa_config_string_value
@property
def associated_role(self):
for inst in role_instances:
if self.associated_role_name == inst.attr_name:
return inst
raise NotImplementedError(
"{}: no valid associated role found".format(self.attr_name))
def create_search_filter(self, ldap):
"""
Create search filter which matches LDAP data corresponding to the
attribute
"""
svc_filter = ldap.make_filter_from_attr(
'cn', self.associated_service_name)
configstring_filter = ldap.make_filter_from_attr(
'ipaConfigString', self.ipa_config_string_value)
return ldap.combine_filters(
[svc_filter, configstring_filter], rules=ldap.MATCH_ALL)
def get(self, api_instance):
"""
get the master which has the attribute set
:param api_instance: API instance
:returns: master FQDN
"""
ldap2 = api_instance.Backend.ldap2
search_base = DN(api_instance.env.container_masters,
api_instance.env.basedn)
search_filter = self.create_search_filter(ldap2)
try:
entries = ldap2.get_entries(search_base, filter=search_filter)
except errors.EmptyResult:
return []
master_cns = {e.dn[1]['cn'] for e in entries}
associated_role_providers = set(
self._get_assoc_role_providers(api_instance))
if not master_cns.issubset(associated_role_providers):
raise errors.ValidationError(
name=self.name,
error=_("all masters must have %(role)s role enabled" %
{'role': self.associated_role.name})
)
return sorted(master_cns)
def _get_master_dns(self, api_instance, servers):
return [
DN(('cn', server), api_instance.env.container_masters,
api_instance.env.basedn) for server in servers]
def _get_masters_service_entries(self, ldap, master_dns):
service_dns = [
DN(('cn', self.associated_service_name), master_dn) for master_dn
in master_dns]
return [ldap.get_entry(service_dn) for service_dn in service_dns]
def _add_attribute_to_svc_entry(self, ldap, service_entry):
"""
add the server attribute to the entry of associated service
:param ldap: LDAP connection object
:param service_entry: associated service entry
"""
ipa_config_string = service_entry.get('ipaConfigString', [])
ipa_config_string.append(self.ipa_config_string_value)
service_entry['ipaConfigString'] = ipa_config_string
ldap.update_entry(service_entry)
def _remove_attribute_from_svc_entry(self, ldap, service_entry):
"""
remove the server attribute to the entry of associated service
single ipaConfigString attribute is case-insensitive, we must handle
arbitrary case of target value
:param ldap: LDAP connection object
:param service_entry: associated service entry
"""
ipa_config_string = service_entry.get('ipaConfigString', [])
for value in ipa_config_string:
if value.lower() == self.ipa_config_string_value.lower():
service_entry['ipaConfigString'].remove(value)
ldap.update_entry(service_entry)
def _get_assoc_role_providers(self, api_instance):
"""get list of all servers on which the associated role is enabled
Consider a hidden and configured server as a valid provider for a
role, as all services are started.
"""
return [
r[u'server_server']
for r in self.associated_role.status(api_instance)
if r[u'status'] in {ENABLED, HIDDEN, CONFIGURED}
]
def _remove(self, api_instance, masters):
"""
remove attribute from one or more masters
:param api_instance: API instance
:param master: list or iterable containing master FQDNs
"""
ldap = api_instance.Backend.ldap2
master_dns = self._get_master_dns(api_instance, masters)
service_entries = self._get_masters_service_entries(ldap, master_dns)
for service_entry in service_entries:
self._remove_attribute_from_svc_entry(ldap, service_entry)
def _add(self, api_instance, masters):
"""
add attribute to the master
:param api_instance: API instance
:param master: iterable containing master FQDNs
:raises: * errors.ValidationError if the associated role is not enabled
on the master
"""
ldap = api_instance.Backend.ldap2
master_dns = self._get_master_dns(api_instance, masters)
service_entries = self._get_masters_service_entries(ldap, master_dns)
for service_entry in service_entries:
self._add_attribute_to_svc_entry(ldap, service_entry)
def _check_receiving_masters_having_associated_role(self, api_instance,
masters):
assoc_role_providers = set(
self._get_assoc_role_providers(api_instance))
masters_set = set(masters)
masters_without_role = masters_set - assoc_role_providers
if masters_without_role:
raise errors.ValidationError(
name=', '.join(sorted(masters_without_role)),
error=_("must have %(role)s role enabled" %
{'role': self.associated_role.name})
)
def set(self, api_instance, masters):
"""
set the attribute on masters
:param api_instance: API instance
:param masters: an interable with FQDNs of the new masters
the attribute is automatically unset from previous masters if present
:raises: errors.EmptyModlist if the new masters is the same as
the original ones
"""
old_masters = self.get(api_instance)
if sorted(old_masters) == sorted(masters):
raise errors.EmptyModlist
self._check_receiving_masters_having_associated_role(
api_instance, masters)
if old_masters:
self._remove(api_instance, old_masters)
self._add(api_instance, masters)
class SingleValuedServerAttribute(ServerAttribute):
"""
Base class for server attributes that are forced to be single valued
this means that `get` method will return a one-element list, and `set`
method will accept only one-element list
"""
def set(self, api_instance, masters):
if len(masters) > 1:
raise errors.ValidationError(
name=self.attr_name,
error=_("must be enabled only on a single master"))
super(SingleValuedServerAttribute, self).set(api_instance, masters)
def get(self, api_instance):
masters = super(SingleValuedServerAttribute, self).get(api_instance)
num_masters = len(masters)
if num_masters > 1:
raise errors.SingleMatchExpected(found=num_masters)
return masters
_Service = namedtuple('Service', ['name', 'enabled', 'hidden'])
class ServiceBasedRole(BaseServerRole):
"""
class for all role instances whose status is defined by presence of one or
more entries in LDAP and/or their attributes
"""
def __init__(self, attr_name, name, component_services):
super(ServiceBasedRole, self).__init__(attr_name, name)
self.component_services = component_services
def _validate_component_services(self, services):
svc_set = {s.name for s in services}
if svc_set != set(self.component_services):
raise ValueError(
"{}: Mismatch between component services and search result "
"(expected: {}, got: {})".format(
self.__class__.__name__,
', '.join(sorted(self.component_services)),
', '.join(sorted(s.name for s in services))))
def _get_service(self, entry):
entry_cn = entry['cn'][0]
enabled = self._is_service_enabled(entry)
hidden = self._is_service_hidden(entry)
return _Service(name=entry_cn, enabled=enabled, hidden=hidden)
def _is_service_enabled(self, entry):
"""
determine whether the service is enabled based on the presence of
enabledService attribute in ipaConfigString attribute.
Since the attribute is case-insensitive, we must first lowercase its
values and do the comparison afterwards.
:param entry: LDAPEntry of the service
:returns: True if the service entry is enabled, False otherwise
"""
ipaconfigstring_values = set(entry.get('ipaConfigString', []))
return ENABLED_SERVICE in ipaconfigstring_values
def _is_service_hidden(self, entry):
"""Determine if service is hidden
:param entry: LDAPEntry of the service
:returns: True if the service entry is enabled, False otherwise
"""
ipaconfigstring_values = set(entry.get('ipaConfigString', []))
return HIDDEN_SERVICE in ipaconfigstring_values
def _get_services_by_masters(self, entries):
"""
given list of entries, return a dictionary keyed by master FQDNs which
contains list of service entries belonging to the master
"""
services_by_master = defaultdict(list)
for e in entries:
service = self._get_service(e)
master_cn = e.dn[1]['cn']
services_by_master[master_cn].append(service)
return services_by_master
def get_result_from_entries(self, entries):
result = []
services_by_master = self._get_services_by_masters(entries)
for master, services in services_by_master.items():
try:
self._validate_component_services(services)
except ValueError:
continue
if all(s.enabled for s in services):
status = ENABLED
elif all(s.hidden for s in services):
status = HIDDEN
else:
status = CONFIGURED
result.append(self.create_role_status_dict(master, status))
return result
def create_search_params(self, ldap, api_instance, server=None):
search_base = DN(api_instance.env.container_masters,
api_instance.env.basedn)
search_filter = ldap.make_filter_from_attr(
'cn',
self.component_services,
rules=ldap.MATCH_ANY,
exact=True
)
if server is not None:
search_base = DN(('cn', server), search_base)
return search_base, search_filter
def status(self, api_instance, server=None):
return super(ServiceBasedRole, self).status(
api_instance, server=server, attrs_list=('ipaConfigString', 'cn'))
class ADtrustBasedRole(BaseServerRole):
"""
Class which should instantiate roles besed on membership in 'adtrust agent'
sysaccount group.
"""
def get_result_from_entries(self, entries):
result = []
for e in entries:
result.append(
self.create_role_status_dict(e['fqdn'][0], ENABLED)
)
return result
def create_search_params(self, ldap, api_instance, server=None):
search_base = DN(
api_instance.env.container_host, api_instance.env.basedn)
search_filter = ldap.make_filter_from_attr(
"memberof",
DN(('cn', 'adtrust agents'),
api_instance.env.container_sysaccounts,
api_instance.env.basedn)
)
if server is not None:
server_filter = ldap.make_filter_from_attr(
'fqdn',
server,
exact=True
)
search_filter = ldap.combine_filters(
[search_filter, server_filter],
rules=ldap.MATCH_ALL
)
return search_base, search_filter
role_instances = (
ADtrustBasedRole(u"ad_trust_agent_server", u"AD trust agent"),
ServiceBasedRole(
u"ad_trust_controller_server",
u"AD trust controller",
component_services=['ADTRUST']
),
ServiceBasedRole(
u"ca_server_server",
u"CA server",
component_services=['CA']
),
ServiceBasedRole(
u"dns_server_server",
u"DNS server",
component_services=['DNS', 'DNSKeySync']
),
ServiceBasedRole(
u"ipa_master_server",
u"IPA master",
component_services=['HTTP', 'KDC', 'KPASSWD']
),
ServiceBasedRole(
u"kra_server_server",
u"KRA server",
component_services=['KRA']
)
)
attribute_instances = (
SingleValuedServerAttribute(
u"ca_renewal_master_server",
u"CA renewal master",
u"ca_server_server",
u"CA",
u"caRenewalMaster",
),
SingleValuedServerAttribute(
u"dnssec_key_master_server",
u"DNSSec key master",
u"dns_server_server",
u"DNSSEC",
u"dnssecKeyMaster",
),
ServerAttribute(
u"pkinit_server_server",
u"PKINIT enabled server",
u"ipa_master_server",
u"KDC",
u"pkinitEnabled"
)
)
| 22,358
|
Python
|
.py
| 517
| 34.26499
| 79
| 0.644503
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,687
|
dcerpc.py
|
freeipa_freeipa/ipaserver/dcerpc.py
|
# Authors:
# Alexander Bokovoy <abokovoy@redhat.com>
#
# Copyright (C) 2011-2016 Red Hat
# see file 'COPYING' for use and warranty information
#
# Portions (C) Andrew Tridgell, Andrew Bartlett
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Make sure we only run this module at the server where samba4-python
# package is installed to avoid issues with unavailable modules
from __future__ import absolute_import
from contextlib import contextmanager
import logging
import re
import time
from ipalib import api, _
from ipalib import errors
from ipalib.constants import FQDN
from ipapython import ipautil
from ipapython.dn import DN
from ipapython.dnsutil import query_srv
from ipapython.ipaldap import ldap_initialize
from ipaserver.dcerpc_common import (TRUST_BIDIRECTIONAL,
TRUST_JOIN_EXTERNAL,
trust_type_string)
from ipalib.util import normalize_name
import os
import struct
import random
from samba import param
from samba import credentials
from samba.dcerpc import security, lsa, drsblobs, nbt, netlogon
from samba.ndr import ndr_pack, ndr_print
from samba import net
from samba import ntstatus
import samba
try:
from samba.trust_utils import CreateTrustedDomainRelax
except ImportError:
CreateTrustedDomainRelax = None
try:
from samba import arcfour_encrypt
except ImportError:
if CreateTrustedDomainRelax is None:
raise ImportError("No supported Samba Python bindings")
import ldap as _ldap
from ipapython import ipaldap
from ipapython.dnsutil import DNSName
from dns.exception import DNSException
import pysss_nss_idmap
import pysss
import six
from ipaplatform.paths import paths
from time import sleep
try:
from ldap.controls import RequestControl as LDAPControl
except ImportError:
from ldap.controls import LDAPControl
if six.PY3:
unicode = str
long = int
__doc__ = _("""
Classes to manage trust joins using DCE-RPC calls
The code in this module relies heavily on samba4-python package
and Samba4 python bindings.
""")
logger = logging.getLogger(__name__)
def is_sid_valid(sid):
try:
security.dom_sid(sid)
except (TypeError, ValueError):
return False
else:
return True
access_denied_error = errors.ACIError(
info=_('CIFS server denied your credentials'))
dcerpc_error_codes = {
-1073741823:
errors.RemoteRetrieveError(
reason=_('communication with CIFS server was unsuccessful')),
-1073741790: access_denied_error,
-1073741715: access_denied_error,
-1073741614: access_denied_error,
-1073741603:
errors.ValidationError(
name=_('AD domain controller'),
error=_('unsupported functional level')),
-1073741811: # NT_STATUS_INVALID_PARAMETER
errors.RemoteRetrieveError(
reason=_('AD domain controller complains about communication '
'sequence. It may mean unsynchronized time on both '
'sides, for example')),
-1073741776: # NT_STATUS_INVALID_PARAMETER_MIX,
# we simply will skip the binding
access_denied_error,
-1073741772: # NT_STATUS_OBJECT_NAME_NOT_FOUND
errors.NotFound(
reason=_('Cannot find specified domain or server name')),
}
dcerpc_error_messages = {
"NT_STATUS_OBJECT_NAME_NOT_FOUND":
errors.NotFound(
reason=_('Cannot find specified domain or server name')),
"The object name is not found.":
errors.NotFound(
reason=_('Cannot find specified domain or server name')),
"WERR_NO_LOGON_SERVERS":
errors.RemoteRetrieveError(
reason=_('AD DC was unable to reach any IPA domain controller. '
'Most likely it is a DNS or firewall issue')),
# This is a very long key, don't change it
"There are currently no logon servers available to "
"service the logon request.":
errors.RemoteRetrieveError(
reason=_('AD DC was unable to reach any IPA domain controller. '
'Most likely it is a DNS or firewall issue')),
"NT_STATUS_INVALID_PARAMETER_MIX":
errors.RequirementError(
name=_('At least the domain or IP address should be specified')),
}
pysss_type_key_translation_dict = {
pysss_nss_idmap.ID_USER: 'user',
pysss_nss_idmap.ID_GROUP: 'group',
# Used for users with magic private groups
pysss_nss_idmap.ID_BOTH: 'both',
}
class TrustTopologyConflictSolved(Exception):
"""
Internal trust error: raised when previously detected
trust topology conflict is automatically solved.
No separate errno is assigned as this error should
not be visible outside the dcerpc.py code.
"""
def assess_dcerpc_error(error):
"""
Takes error returned by Samba bindings and converts it into
an IPA error class.
"""
if isinstance(error, RuntimeError):
error_tuple = error.args
else:
error_tuple = error
if len(error_tuple) != 2:
raise RuntimeError("Unable to parse error: {err!r}".format(err=error))
num, message = error_tuple
if num and num in dcerpc_error_codes:
return dcerpc_error_codes[num]
if message and message in dcerpc_error_messages:
return dcerpc_error_messages[message]
reason = _('CIFS server communication error: code "%(num)s", '
'message "%(message)s" (both may be "None")') % \
dict(num=num, message=message)
return errors.RemoteRetrieveError(reason=reason)
class ExtendedDNControl(LDAPControl):
def __init__(self):
LDAPControl.__init__(
self,
controlType="1.2.840.113556.1.4.529",
criticality=False,
encodedControlValue=b'0\x03\x02\x01\x01'
)
class DomainValidator:
ATTR_FLATNAME = 'ipantflatname'
ATTR_SID = 'ipantsecurityidentifier'
ATTR_TRUSTED_SID = 'ipanttrusteddomainsid'
ATTR_TRUST_PARTNER = 'ipanttrustpartner'
ATTR_TRUST_AUTHOUT = 'ipanttrustauthoutgoing'
def __init__(self, api):
self.api = api
self.ldap = self.api.Backend.ldap2
self.domain = None
self.flatname = None
self.dn = None
self.sid = None
self._domains = None
self._info = dict()
self._creds = None
self._admin_creds = None
self._parm = None
def is_configured(self):
cn_trust_local = DN(('cn', self.api.env.domain),
self.api.env.container_cifsdomains,
self.api.env.basedn)
try:
entry_attrs = self.ldap.get_entry(cn_trust_local,
[self.ATTR_FLATNAME,
self.ATTR_SID])
self.flatname = entry_attrs[self.ATTR_FLATNAME][0]
self.sid = entry_attrs[self.ATTR_SID][0]
self.dn = entry_attrs.dn
self.domain = self.api.env.domain
except errors.NotFound:
return False
return True
def get_trusted_domains(self):
"""
Returns case-insensitive dict of trusted domain tuples
(flatname, sid, trust_auth_outgoing), keyed by domain name.
"""
cn_trust = DN(('cn', 'ad'), self.api.env.container_trusts,
self.api.env.basedn)
try:
search_kw = {'objectClass': 'ipaNTTrustedDomain'}
filter = self.ldap.make_filter(search_kw,
rules=self.ldap.MATCH_ALL)
entries, _truncated = self.ldap.find_entries(
filter=filter,
base_dn=cn_trust,
attrs_list=[self.ATTR_TRUSTED_SID,
self.ATTR_FLATNAME,
self.ATTR_TRUST_PARTNER]
)
# We need to use case-insensitive dictionary since we use
# domain names as keys and those are generally case-insensitive
result = ipautil.CIDict()
for e in entries:
try:
t_partner = e.single_value.get(self.ATTR_TRUST_PARTNER)
fname_norm = e.single_value.get(self.ATTR_FLATNAME).lower()
trusted_sid = e.single_value.get(self.ATTR_TRUSTED_SID)
except KeyError as exc:
# Some piece of trusted domain info in LDAP is missing
# Skip the domain, but leave log entry for investigation
logger.warning("Trusted domain '%s' entry misses an "
"attribute: %s", e.dn, exc)
continue
result[t_partner] = (fname_norm,
security.dom_sid(trusted_sid))
return result
except errors.NotFound:
return []
def set_trusted_domains(self):
# At this point we have SID_NT_AUTHORITY family SID and really need to
# check it against prefixes of domain SIDs we trust to
if not self._domains:
self._domains = self.get_trusted_domains()
if len(self._domains) == 0:
# Our domain is configured but no trusted domains are configured
# This means we can't check the correctness of a trusted
# domain SIDs
raise errors.ValidationError(name='sid',
error=_('no trusted domain '
'is configured'))
def get_domain_by_sid(self, sid, exact_match=False):
if not self.domain:
# our domain is not configured or self.is_configured() never run
# reject SIDs as we can't check correctness of them
raise errors.ValidationError(name='sid',
error=_('domain is not configured'))
# Parse sid string to see if it is really in a SID format
try:
test_sid = security.dom_sid(sid)
except (TypeError, ValueError):
raise errors.ValidationError(name='sid',
error=_('SID is not valid'))
# At this point we have SID_NT_AUTHORITY family SID and really need to
# check it against prefixes of domain SIDs we trust to
self.set_trusted_domains()
# We have non-zero list of trusted domains and have to go through
# them one by one and check their sids as prefixes / exact match
# depending on the value of exact_match flag
if exact_match:
# check exact match of sids
for domain in self._domains:
if sid == str(self._domains[domain][1]):
return domain
raise errors.NotFound(reason=_("SID does not match exactly"
"with any trusted domain's SID"))
else:
# check as prefixes
test_sid_subauths = test_sid.sub_auths
for domain in self._domains:
domsid = self._domains[domain][1]
sub_auths = domsid.sub_auths
num_auths = min(test_sid.num_auths, domsid.num_auths)
if test_sid_subauths[:num_auths] == sub_auths[:num_auths]:
return domain
raise errors.NotFound(reason=_('SID does not match any '
'trusted domain'))
def is_trusted_sid_valid(self, sid):
try:
self.get_domain_by_sid(sid)
except (errors.ValidationError, errors.NotFound):
return False
else:
return True
def is_trusted_domain_sid_valid(self, sid):
try:
self.get_domain_by_sid(sid, exact_match=True)
except (errors.ValidationError, errors.NotFound):
return False
else:
return True
def get_sid_from_domain_name(self, name):
"""Returns binary representation of SID for the trusted domain name
or None if name is not in the list of trusted domains."""
domains = self.get_trusted_domains()
if name in domains:
return domains[name][1]
else:
return None
def get_trusted_domain_objects(self, domain=None, flatname=None, filter="",
attrs=None, scope=_ldap.SCOPE_SUBTREE,
basedn=None):
"""
Search for LDAP objects in a trusted domain specified either by
`domain' or `flatname'. The actual LDAP search is specified by
`filter', `attrs', `scope' and `basedn'. When `basedn' is empty,
database root DN is used.
"""
assert domain is not None or flatname is not None
"""Returns SID for the trusted domain object (user or group only)"""
if not self.domain:
# our domain is not configured or self.is_configured() never run
raise errors.ValidationError(name=_('Trust setup'),
error=_('Our domain is '
'not configured'))
if not self._domains:
self._domains = self.get_trusted_domains()
if len(self._domains) == 0:
# Our domain is configured but no trusted domains are configured
raise errors.ValidationError(name=_('Trust setup'),
error=_('No trusted domain is '
'configured'))
entries = None
if domain is not None:
if domain not in self._domains:
raise errors.ValidationError(name=_('trusted domain object'),
error=_('domain is not trusted'))
# Now we have a name to check against our list of trusted domains
entries = self.search_in_dc(domain, filter, attrs, scope, basedn)
elif flatname is not None:
# Flatname was specified, traverse through the list of trusted
# domains first to find the proper one
found_flatname = False
for domain in self._domains:
if self._domains[domain][0] == flatname:
found_flatname = True
entries = self.search_in_dc(domain, filter,
attrs, scope, basedn)
if entries:
break
if not found_flatname:
raise errors.ValidationError(name=_('trusted domain object'),
error=_('no trusted domain '
'matched the specified '
'flat name'))
if not entries:
raise errors.NotFound(reason=_('trusted domain object not found'))
return entries
def get_trusted_domain_object_sid(self, object_name,
fallback_to_ldap=True):
result = pysss_nss_idmap.getsidbyname(object_name)
if object_name in result and \
(pysss_nss_idmap.SID_KEY in result[object_name]):
object_sid = result[object_name][pysss_nss_idmap.SID_KEY]
if self.is_trusted_sid_valid(object_sid):
return object_sid
else:
raise errors.ValidationError(name=_('trusted domain object'),
error=_('Object does not belong '
'to a trusted domain'))
# If fallback to AD DC LDAP is not allowed, bail out
if not fallback_to_ldap:
raise errors.ValidationError(name=_('trusted domain object'),
error=_('SSSD was unable to resolve '
'the object to a valid SID'))
# Else, we are going to contact AD DC LDAP
components = normalize_name(object_name)
if not ('domain' in components or 'flatname' in components):
# No domain or realm specified, ambiguous search
raise errors.ValidationError(name=_('trusted domain object'),
error=_('Ambiguous search, user '
'domain was not specified'))
attrs = ['objectSid']
filter = '(&(sAMAccountName=%(name)s)' \
'(|(objectClass=user)(objectClass=group)))' \
% dict(name=components['name'])
scope = _ldap.SCOPE_SUBTREE
entries = self.get_trusted_domain_objects(components.get('domain'),
components.get('flatname'),
filter, attrs, scope)
if len(entries) > 1:
# Treat non-unique entries as invalid
raise errors.ValidationError(name=_('trusted domain object'),
error=_('Trusted domain did not '
'return a unique object'))
sid = self.__sid_to_str(entries[0]['objectSid'][0])
try:
test_sid = security.dom_sid(sid)
return unicode(test_sid)
except (TypeError, ValueError):
raise errors.ValidationError(name=_('trusted domain object'),
error=_('Trusted domain did not '
'return a valid SID for '
'the object'))
def get_trusted_domain_object_type(self, name_or_sid):
"""
Return the type of the object corresponding to the given name in
the trusted domain, which is either 'user', 'group' or 'both'.
The 'both' types is used for users with magic private groups.
"""
object_type = None
if is_sid_valid(name_or_sid):
result = pysss_nss_idmap.getnamebysid(name_or_sid)
else:
result = pysss_nss_idmap.getsidbyname(name_or_sid)
if name_or_sid in result:
object_type = result[name_or_sid].get(pysss_nss_idmap.TYPE_KEY)
# Do the translation to hide pysss_nss_idmap constants
# from higher-level code
return pysss_type_key_translation_dict.get(object_type)
def get_trusted_domain_object_from_sid(self, sid):
logger.debug("Converting SID to object name: %s", sid)
# Check if the given SID is valid
if not self.is_trusted_sid_valid(sid):
raise errors.ValidationError(name='sid', error='SID is not valid')
# Use pysss_nss_idmap to obtain the name
result = pysss_nss_idmap.getnamebysid(sid).get(sid)
valid_types = (pysss_nss_idmap.ID_USER,
pysss_nss_idmap.ID_GROUP,
pysss_nss_idmap.ID_BOTH)
if result:
if result.get(pysss_nss_idmap.TYPE_KEY) in valid_types:
return result.get(pysss_nss_idmap.NAME_KEY)
# If unsuccessful, search AD DC LDAP
logger.debug("Searching AD DC LDAP")
# escape_filter_chars(sid_bytes, 2) but for bytes
escaped_sid = "".join(
"\\%02x" % b for b in ndr_pack(security.dom_sid(sid))
)
attrs = ['sAMAccountName']
filter = (r'(&(objectSid=%(sid)s)'
'(|(objectClass=user)(objectClass=group)))'
% dict(sid=escaped_sid)) # sid in binary
domain = self.get_domain_by_sid(sid)
entries = self.get_trusted_domain_objects(domain=domain,
filter=filter,
attrs=attrs)
if len(entries) > 1:
# Treat non-unique entries as invalid
raise errors.ValidationError(name=_('trusted domain object'),
error=_('Trusted domain did not '
'return a unique object'))
object_name = (
"%s@%s" % (entries[0].single_value['sAMAccountName'].lower(),
domain.lower())
)
return unicode(object_name)
def __get_trusted_domain_user_and_groups(self, object_name):
"""
Returns a tuple with user SID and a list of SIDs of all groups he is
a member of.
LIMITATIONS:
- only Trusted Admins group members can use this function as it
uses secret for IPA-Trusted domain link
- List of group SIDs does not contain group memberships outside
of the trusted domain
"""
components = normalize_name(object_name)
domain = components.get('domain')
flatname = components.get('flatname')
name = components.get('name')
is_valid_sid = is_sid_valid(object_name)
if is_valid_sid:
# Find a trusted domain for the SID
domain = self.get_domain_by_sid(object_name)
# Now search a trusted domain for a user with this SID
attrs = ['cn']
filter = '(&(objectClass=user)(objectSid=%(sid)s))' \
% dict(sid=object_name)
try:
entries = self.get_trusted_domain_objects(domain=domain,
filter=filter,
attrs=attrs,
scope=_ldap.SCOPE_SUBTREE)
except errors.NotFound:
raise errors.NotFound(reason=_('trusted domain user not found'))
user_dn = entries[0].dn
elif domain or flatname:
attrs = ['cn']
filter = '(&(sAMAccountName=%(name)s)(objectClass=user))' \
% dict(name=name)
try:
entries = self.get_trusted_domain_objects(domain,
flatname, filter, attrs,
_ldap.SCOPE_SUBTREE)
except errors.NotFound:
raise errors.NotFound(reason=_('trusted domain user not found'))
user_dn = entries[0].dn
else:
# No domain or realm specified, ambiguous search
raise errors.ValidationError(name=_('trusted domain object'),
error=_('Ambiguous search, '
'user domain was not specified'))
# Get SIDs of user object and it's groups
# tokenGroups attribute must be read with a scope BASE for a known user
# distinguished name to avoid search error
attrs = ['objectSID', 'tokenGroups']
filter = "(objectClass=user)"
entries = self.get_trusted_domain_objects(domain,
flatname, filter, attrs,
_ldap.SCOPE_BASE, user_dn)
object_sid = self.__sid_to_str(entries[0]['objectSid'][0])
group_sids = [self.__sid_to_str(sid)
for sid in entries[0]['tokenGroups']]
return (object_sid, group_sids)
def get_trusted_domain_user_and_groups(self, object_name):
"""
Returns a tuple with user SID and a list of SIDs of all groups he is
a member of.
First attempts to perform SID lookup via SSSD and in case of failure
resorts back to checking trusted domain's AD DC LDAP directly.
LIMITATIONS:
- only Trusted Admins group members can use this function as it
uses secret for IPA-Trusted domain link if SSSD lookup failed
- List of group SIDs does not contain group memberships outside
of the trusted domain
"""
group_sids = None
group_list = None
object_sid = None
is_valid_sid = is_sid_valid(object_name)
if is_valid_sid:
object_sid = object_name
result = pysss_nss_idmap.getnamebysid(object_name)
if object_name in result and \
(pysss_nss_idmap.NAME_KEY in result[object_name]):
group_list = pysss.getgrouplist(
result[object_name][pysss_nss_idmap.NAME_KEY])
else:
result = pysss_nss_idmap.getsidbyname(object_name)
if object_name in result and \
(pysss_nss_idmap.SID_KEY in result[object_name]):
object_sid = result[object_name][pysss_nss_idmap.SID_KEY]
group_list = pysss.getgrouplist(object_name)
if not group_list:
return self.__get_trusted_domain_user_and_groups(object_name)
group_sids = pysss_nss_idmap.getsidbyname(group_list)
return (
object_sid,
[el[1][pysss_nss_idmap.SID_KEY] for el in group_sids.items()]
)
def __sid_to_str(self, sid):
"""
Converts binary SID to string representation
Returns unicode string
"""
sid_rev_num = ord(sid[0])
number_sub_id = ord(sid[1])
ia = struct.unpack('!Q', '\x00\x00'+sid[2:8])[0]
subs = [
struct.unpack('<I', sid[8+4*i:12+4*i])[0]
for i in range(number_sub_id)
]
return 'S-%d-%d-%s' % (sid_rev_num, ia,
'-'.join([str(s) for s in subs]),)
def kinit_as_administrator(self, domain):
"""
Initializes ccache with http service credentials.
Applies session code defaults for ccache directory and naming prefix.
Session code uses kinit_+<pid>, we use
kinit_+<TD>+<domain netbios name> so there is no clash.
Returns tuple (ccache path, principal) where (None, None) signifes an
error on ccache initialization
"""
if self._admin_creds is None:
return (None, None)
domain_suffix = domain.replace('.', '-')
ccache_name = "kinit_TDA%s" % (domain_suffix)
ccache_path = os.path.join(paths.IPA_CCACHES, ccache_name)
(principal, password) = self._admin_creds.split('%', 1)
# Destroy the contents of the ccache
logger.debug('Destroying the contents of the separate ccache')
ipautil.run(
[paths.KDESTROY, '-A', '-c', ccache_path],
env={'KRB5CCNAME': ccache_path},
raiseonerr=False)
# Destroy the contents of the ccache
logger.debug('Running kinit with credentials of AD administrator')
result = ipautil.run(
[paths.KINIT, principal],
env={'KRB5CCNAME': ccache_path},
stdin=password,
raiseonerr=False)
if result.returncode == 0:
return (ccache_path, principal)
else:
return (None, None)
def search_in_dc(self, domain, filter, attrs, scope, basedn=None,
quiet=False):
"""
Perform LDAP search in a trusted domain `domain' Domain Controller.
Returns resulting entries or None.
"""
entries = None
info = self.__retrieve_trusted_domain_gc_list(domain)
if not info:
raise errors.ValidationError(
name=_('Trust setup'),
error=_('Cannot retrieve trusted domain GC list'))
for (host, port) in info['gc']:
entries = self.__search_in_dc(info, host, port, filter, attrs,
scope, basedn=basedn,
quiet=quiet)
if entries:
break
return entries
def __search_in_dc(self, info, host, port, filter, attrs, scope,
basedn=None, quiet=False):
"""
Actual search in AD LDAP server, using SASL GSSAPI authentication
Returns LDAP result or None.
"""
ccache_name = None
if self._admin_creds:
(ccache_name,
_principal) = self.kinit_as_administrator(info['dns_domain'])
if ccache_name:
with ipautil.private_ccache(path=ccache_name):
entries = None
try:
# AD does not support SASL + TLS at the same time
# https://msdn.microsoft.com/en-us/library/cc223500.aspx
conn = ipaldap.LDAPClient.from_hostname_plain(
host,
no_schema=True,
decode_attrs=False
)
conn.gssapi_bind()
if basedn is None:
# Use domain root base DN
basedn = ipautil.realm_to_suffix(info['dns_domain'])
entries = conn.get_entries(basedn, scope, filter, attrs)
except Exception as e:
msg = "Search on AD DC {host}:{port} failed with: {err}"\
.format(host=host, port=str(port), err=str(e))
if quiet:
logger.debug('%s', msg)
else:
logger.warning('%s', msg)
return entries
return None
def __retrieve_trusted_domain_gc_list(self, domain):
"""
Retrieves domain information and preferred GC list
Returns dictionary with following keys
name -- NetBIOS name of the trusted domain
dns_domain -- DNS name of the trusted domain
gc -- array of tuples (server, port) for Global Catalog
"""
if domain in self._info:
return self._info[domain]
if not self._creds:
self._parm = param.LoadParm()
self._parm.load(
os.path.join(paths.USR_SHARE_IPA_DIR, "smb.conf.empty"))
self._parm.set('netbios name', self.flatname)
self._creds = credentials.Credentials()
self._creds.set_kerberos_state(credentials.MUST_USE_KERBEROS)
self._creds.guess(self._parm)
self._creds.set_workstation(self.flatname)
netrc = net.Net(creds=self._creds, lp=self._parm)
finddc_error = None
result = None
flags = nbt.NBT_SERVER_LDAP | nbt.NBT_SERVER_GC | nbt.NBT_SERVER_CLOSEST
try:
result = netrc.finddc(domain=domain, flags=flags)
except RuntimeError as e:
try:
# If search of closest GC failed, attempt to find any one
flags = nbt.NBT_SERVER_LDAP | nbt.NBT_SERVER_GC
result = netrc.finddc(domain=domain, flags=flags)
except RuntimeError as e:
finddc_error = e
if not self._domains:
self._domains = self.get_trusted_domains()
info = dict()
servers = []
if result:
info['name'] = unicode(result.domain_name)
info['dns_domain'] = unicode(result.dns_domain)
servers = [(unicode(result.pdc_dns_name), 3268)]
else:
info['name'] = self._domains[domain]
info['dns_domain'] = domain
# Retrieve GC servers list
gc_name = '_gc._tcp.%s.' % info['dns_domain']
try:
answers = query_srv(gc_name)
except DNSException:
answers = []
for answer in answers:
server = str(answer.target).rstrip(".")
servers.append((server, answer.port))
info['gc'] = servers
# Both methods should not fail at the same time
if finddc_error and len(info['gc']) == 0:
raise assess_dcerpc_error(finddc_error)
self._info[domain] = info
return info
def string_to_array(what):
if six.PY3 and isinstance(what, bytes):
return list(what)
return [ord(v) for v in what]
class TrustDomainInstance:
def __init__(self, hostname, creds=None):
self.parm = param.LoadParm()
self.parm.load(os.path.join(paths.USR_SHARE_IPA_DIR, "smb.conf.empty"))
if len(hostname) > 0:
self.parm.set('netbios name', hostname)
self.creds = creds
self.hostname = hostname
self.info = {}
self._pipe = None
self._policy_handle = None
self.read_only = False
self.ftinfo_records = None
self.ftinfo_data = None
self.validation_attempts = 0
def __gen_lsa_connection(self, binding):
if self.creds is None:
raise errors.RequirementError(name=_('CIFS credentials object'))
try:
result = lsa.lsarpc(binding, self.parm, self.creds)
return result
except RuntimeError as e:
raise assess_dcerpc_error(e)
def init_lsa_pipe(self, remote_host):
"""
Try to initialize connection to the LSA pipe at remote host.
This method tries consequently all possible transport options
and selects one that works. See __gen_lsa_bindings() for details.
The actual result may depend on details of existing credentials.
For example, using signing causes NO_SESSION_KEY with Win2K8 and
using kerberos against Samba with signing does not work.
"""
# short-cut: if LSA pipe is initialized, skip completely
if self._pipe:
return
attempts = 0
session_attempts = 0
bindings = self.__gen_lsa_bindings(remote_host)
for binding in bindings:
try:
self._pipe = self.__gen_lsa_connection(binding)
if self._pipe and self._pipe.session_key:
break
except errors.ACIError:
attempts = attempts + 1
except RuntimeError:
# When session key is not available, we just skip this binding
session_attempts = session_attempts + 1
if self._pipe is None and \
(attempts + session_attempts) == len(bindings):
raise errors.ACIError(
info=_('CIFS server %(host)s denied your credentials')
% dict(host=remote_host))
if self._pipe is None:
raise errors.RemoteRetrieveError(
reason=_('Cannot establish LSA connection to %(host)s. '
'Is CIFS server running?') % dict(host=remote_host))
self.binding = binding
self.session_key = self._pipe.session_key
def __gen_lsa_bindings(self, remote_host):
"""
There are multiple transports to issue LSA calls. However, depending on
a system in use they may be blocked by local operating system policies.
Generate all we can use. init_lsa_pipe() will try them one by one until
there is one working.
We try NCACN_NP before NCACN_IP_TCP and use SMB2 before SMB1.
"""
transports = ('ncacn_np', 'ncacn_ip_tcp')
options = ('smb2,print', 'print')
return ['%s:%s[%s]' % (t, remote_host, o)
for t in transports for o in options]
def retrieve_anonymously(self, remote_host,
discover_srv=False, search_pdc=False):
"""
When retrieving DC information anonymously, we can't get SID of the domain
"""
netrc = net.Net(creds=self.creds, lp=self.parm)
flags = nbt.NBT_SERVER_LDAP | nbt.NBT_SERVER_DS | nbt.NBT_SERVER_WRITABLE
if search_pdc:
flags = flags | nbt.NBT_SERVER_PDC
try:
if discover_srv:
result = netrc.finddc(domain=remote_host, flags=flags)
else:
result = netrc.finddc(address=remote_host, flags=flags)
except RuntimeError as e:
dcerpc_error = assess_dcerpc_error(e)
logger.error(
getattr(dcerpc_error, "info", None)
or getattr(dcerpc_error, "reason", str(dcerpc_error))
)
return False
if not result:
return False
self.info['name'] = unicode(result.domain_name)
self.info['dns_domain'] = unicode(result.dns_domain)
self.info['dns_forest'] = unicode(result.forest)
self.info['guid'] = unicode(result.domain_uuid)
self.info['dc'] = unicode(result.pdc_dns_name)
self.info['is_pdc'] = (result.server_type & nbt.NBT_SERVER_PDC) != 0
# Netlogon response doesn't contain SID of the domain.
# We need to do rootDSE search with LDAP_SERVER_EXTENDED_DN_OID
# control to reveal the SID
ldap_uri = 'ldap://%s' % (result.pdc_dns_name)
conn = ldap_initialize(ldap_uri)
conn.set_option(_ldap.OPT_SERVER_CONTROLS, [ExtendedDNControl()])
search_result = None
try:
_objtype, res = conn.search_s('', _ldap.SCOPE_BASE)[0]
for o in res.keys():
if isinstance(res[o], list):
t = res[o]
for z, v in enumerate(t):
if isinstance(v, bytes):
t[z] = v.decode('utf-8')
elif isinstance(res[o], bytes):
res[o] = res[o].decode('utf-8')
search_result = res['defaultNamingContext'][0]
self.info['dns_hostname'] = res['dnsHostName'][0]
except _ldap.LDAPError as e:
logger.error(
"LDAP error when connecting to %s: %s",
unicode(result.pdc_name), str(e))
except KeyError as e:
logger.error("KeyError: %s, LDAP entry from %s "
"returned malformed. Your DNS might be "
"misconfigured.",
unicode(e),
unicode(result.pdc_name))
if search_result:
self.info['sid'] = self.parse_naming_context(search_result)
return True
def parse_naming_context(self, context):
naming_ref = re.compile('.*<SID=(S-.*)>.*')
return unicode(naming_ref.match(context).group(1))
def retrieve(self, remote_host):
self.init_lsa_pipe(remote_host)
objectAttribute = lsa.ObjectAttribute()
objectAttribute.sec_qos = lsa.QosInfo()
try:
self._policy_handle = \
self._pipe.OpenPolicy2("", objectAttribute,
security.SEC_FLAG_MAXIMUM_ALLOWED)
result = self._pipe.QueryInfoPolicy2(self._policy_handle,
lsa.LSA_POLICY_INFO_DNS)
except RuntimeError as e:
raise assess_dcerpc_error(e)
self.info['name'] = unicode(result.name.string)
self.info['dns_domain'] = unicode(result.dns_domain.string)
self.info['dns_forest'] = unicode(result.dns_forest.string)
self.info['guid'] = unicode(result.domain_guid)
self.info['sid'] = unicode(result.sid)
self.info['dc'] = remote_host
try:
result = self._pipe.QueryInfoPolicy2(self._policy_handle,
lsa.LSA_POLICY_INFO_ROLE)
except RuntimeError as e:
raise assess_dcerpc_error(e)
self.info['is_pdc'] = (result.role == lsa.LSA_ROLE_PRIMARY)
if all([self.info['is_pdc'],
self.info['dns_domain'] == self.info['dns_forest']]):
try:
netr_pipe = netlogon.netlogon(self.binding,
self.parm, self.creds)
self.ftinfo_data = netr_pipe.netr_DsRGetForestTrustInformation(
self.info['dc'], None, 0)
except RuntimeError as e:
raise assess_dcerpc_error(e)
def generate_auth(self, trustdom_secret):
password_blob = string_to_array(trustdom_secret.encode('utf-16-le'))
clear_value = drsblobs.AuthInfoClear()
clear_value.size = len(password_blob)
clear_value.password = password_blob
clear_authinfo = drsblobs.AuthenticationInformation()
clear_authinfo.LastUpdateTime = samba.unix2nttime(int(time.time()))
clear_authinfo.AuthType = lsa.TRUST_AUTH_TYPE_CLEAR
clear_authinfo.AuthInfo = clear_value
authinfo_array = drsblobs.AuthenticationInformationArray()
authinfo_array.count = 1
authinfo_array.array = [clear_authinfo]
outgoing = drsblobs.trustAuthInOutBlob()
outgoing.count = 1
outgoing.current = authinfo_array
self.auth_inoutblob = outgoing
if CreateTrustedDomainRelax is None:
# Samba Python bindings with no support for FIPS wrapper
# We have to generate AuthInfo ourselves which means
# we have to use RC4 encryption directly
confounder = [3] * 512
for i in range(512):
confounder[i] = random.randint(0, 255)
trustpass = drsblobs.trustDomainPasswords()
trustpass.confounder = confounder
trustpass.outgoing = outgoing
trustpass.incoming = outgoing
trustpass_blob = ndr_pack(trustpass)
encrypted_trustpass = arcfour_encrypt(self._pipe.session_key,
trustpass_blob)
auth_blob = lsa.DATA_BUF2()
auth_blob.size = len(encrypted_trustpass)
auth_blob.data = string_to_array(encrypted_trustpass)
auth_info = lsa.TrustDomainInfoAuthInfoInternal()
auth_info.auth_blob = auth_blob
self.auth_info = auth_info
def generate_ftinfo(self, another_domain):
"""
Generates TrustDomainInfoFullInfo2Internal structure
This structure allows to pass information about all domains associated
with the another domain's realm.
Only top level name and top level name exclusions are handled here.
"""
if another_domain.ftinfo_data is not None:
return another_domain.ftinfo_data
if not another_domain.ftinfo_records:
return None
ftinfo_records = []
info = lsa.ForestTrustInformation()
for rec in another_domain.ftinfo_records:
record = lsa.ForestTrustRecord()
record.flags = 0
record.time = rec['rec_time']
record.type = rec['rec_type']
record.forest_trust_data.string = rec['rec_name']
ftinfo_records.append(record)
info.count = len(ftinfo_records)
info.entries = ftinfo_records
another_domain.ftinfo_data = info
return info
def clear_ftinfo_conflict(self, another_domain, cinfo):
"""
Attempt to clean up the forest trust collisions
:param self: the forest we establish trust to
:param another_domain: a forest that establishes trust to 'self'
:param cinfo: lsa_ForestTrustCollisionInfo structure that contain
set of of lsa_ForestTrustCollisionRecord structures
:raises: TrustTopologyConflictSolved, TrustTopologyConflictError
This code tries to perform intelligent job of going
over individual collisions and making exclusion entries
for affected IPA namespaces.
There are three possible conflict configurations:
- conflict of DNS namespace (TLN conflict, LSA_TLN_DISABLED_CONFLICT)
- conflict of SID namespace (LSA_SID_DISABLED_CONFLICT)
- conflict of NetBIOS namespace (LSA_NB_DISABLED_CONFLICT)
we only can handle TLN conflicts because (a) excluding SID namespace
is not possible and (b) excluding NetBIOS namespace not possible.
These two types of conflicts should result in trust-add CLI error
These conflicts can come from external source (another forest) or
from internal source (another domain in the same forest). We only
can fix the problems with another forest.
To resolve TLN conflict we need to do following:
1. Retrieve forest trust information for the forest we conflict on
2. Add an exclusion entry for IPA DNS namespace to it
3. Set forest trust information for the forest we conflict on
4. Re-try establishing trust to the original forest
This all can only be done under privileges of Active Directory admin
that can change forest trusts. If we cannot have those privileges,
the work has to be done manually in the Windows UI for
'Active Directory Domains and Trusts' by the administrator of the
original forest.
"""
def domain_name_from_ftinfo(ftinfo):
"""
Returns a domain name string from a ForestTrustRecord
:param ftinfo: LSA ForestTrustRecord to parse
"""
if ftinfo.type == lsa.LSA_FOREST_TRUST_DOMAIN_INFO:
return ftinfo.forest_trust_data.dns_domain_name.string
elif ftinfo.type == lsa.LSA_FOREST_TRUST_TOP_LEVEL_NAME:
return ftinfo.forest_trust_data.string
elif ftinfo.type == lsa.LSA_FOREST_TRUST_TOP_LEVEL_NAME_EX:
# We should ignore TLN exclusion record because it
# is already an exclusion so we aren't going to
# change anything here
return None
else:
# Ignore binary blobs we don't know about
return None
# List of entries for unsolved conflicts
result = []
trust_timestamp = long(time.time()*1e7+116444736000000000)
# Collision information contains entries for specific trusted domains
# we collide with. Look into TLN collisions and add a TLN exclusion
# entry to the specific domain trust.
logger.error("Attempt to solve forest trust topology conflicts")
for rec in cinfo.entries:
if rec.type == lsa.LSA_FOREST_TRUST_COLLISION_TDO:
dominfo = self._pipe.lsaRQueryForestTrustInformation(
self._policy_handle,
rec.name,
lsa.LSA_FOREST_TRUST_DOMAIN_INFO)
# Oops, we were unable to retrieve trust topology for this
# trusted domain (forest).
if not dominfo:
result.append(rec)
logger.error("Unable to resolve conflict for "
"DNS domain %s in the forest %s "
"for domain trust %s. Trust cannot "
"be established unless this conflict "
"is fixed manually.",
another_domain.info['dns_domain'],
self.info['dns_domain'],
rec.name.string)
continue
# Copy over the entries, extend with TLN exclusion
entries = []
is_our_record = False
for e in dominfo.entries:
e1 = lsa.ForestTrustRecord()
e1.type = e.type
e1.flags = e.flags
e1.time = e.time
e1.forest_trust_data = e.forest_trust_data
# We either have a domain struct, a TLN name,
# or a TLN exclusion name in the list.
# The rest we should skip, those are binary blobs
dns_domain_name = domain_name_from_ftinfo(e)
# Search for a match in the topology of another domain
# if there is a match, we have to convert a record
# into a TLN exclusion to allow its routing to the
# another domain
for r in another_domain.ftinfo_records:
# r['rec_name'] cannot be None, thus we can ignore
# the case when dns_domain_name is None
if r['rec_name'] == dns_domain_name:
is_our_record = True
# Convert e1 into an exclusion record
e1.type = lsa.LSA_FOREST_TRUST_TOP_LEVEL_NAME_EX
e1.flags = 0
e1.time = trust_timestamp
e1.forest_trust_data.string = dns_domain_name
break
entries.append(e1)
# If no candidate for the exclusion entry was found
# make sure it is the other domain itself, this covers
# a most common case
if not is_our_record:
# Create TLN exclusion record for the top level domain
record = lsa.ForestTrustRecord()
record.type = lsa.LSA_FOREST_TRUST_TOP_LEVEL_NAME_EX
record.flags = 0
record.time = trust_timestamp
record.forest_trust_data.string = \
another_domain.info['dns_domain']
entries.append(record)
fti = lsa.ForestTrustInformation()
fti.count = len(entries)
fti.entries = entries
# Update the forest trust information now
ldname = lsa.StringLarge()
ldname.string = rec.name.string
cninfo = None
try:
cninfo = self._pipe.lsaRSetForestTrustInformation(
self._policy_handle,
ldname,
lsa.LSA_FOREST_TRUST_DOMAIN_INFO,
fti, 0)
except samba.NTSTATUSError as error:
# Handle NT_STATUS_INVALID_PARAMETER separately
if ntstatus.NT_STATUS_INVALID_PARAMETER == error.args[0]:
result.append(rec)
logger.error("Unable to resolve conflict for "
"DNS domain %s in the forest %s "
"for in-forest domain %s. Trust cannot "
"be established unless this conflict "
"is fixed manually.",
another_domain.info['dns_domain'],
self.info['dns_domain'],
rec.name.string)
else:
raise assess_dcerpc_error(error)
if cninfo:
result.append(rec)
logger.error("When defining exception for DNS "
"domain %s in forest %s for "
"trusted forest %s, "
"got collision info back:\n%s",
another_domain.info['dns_domain'],
self.info['dns_domain'],
rec.name.string,
ndr_print(cninfo))
else:
result.append(rec)
logger.error("Unable to resolve conflict for "
"DNS domain %s in the forest %s "
"for in-forest domain %s. Trust cannot "
"be established unless this conflict "
"is fixed manually.",
another_domain.info['dns_domain'],
self.info['dns_domain'],
rec.name.string)
if len(result) == 0:
logger.error("Successfully solved all conflicts")
raise TrustTopologyConflictSolved()
# Otherwise, raise TrustTopologyConflictError() exception
domains = [x.name.string for x in result]
raise errors.TrustTopologyConflictError(
forest=self.info['dns_domain'],
conflict=another_domain.info['dns_domain'],
domains=domains)
def update_ftinfo(self, another_domain):
"""
Updates forest trust information in this forest corresponding
to the another domain's information.
"""
if another_domain.ftinfo_records:
ftinfo = self.generate_ftinfo(another_domain)
# Set forest trust information -- we do it only against AD DC as
# smbd already has the information about itself
ldname = lsa.StringLarge()
ldname.string = another_domain.info['dns_domain']
ftlevel = lsa.LSA_FOREST_TRUST_DOMAIN_INFO
# RSetForestTrustInformation returns collision information
# for trust topology
cinfo = self._pipe.lsaRSetForestTrustInformation(
self._policy_handle,
ldname,
ftlevel,
ftinfo, 0)
if cinfo:
logger.error("When setting forest trust information, "
"got collision info back:\n%s",
ndr_print(cinfo))
self.clear_ftinfo_conflict(another_domain, cinfo)
def establish_trust(self, another_domain, trustdom_secret,
trust_type='bidirectional', trust_external=False):
"""
Establishes trust between our and another domain
Input: another_domain -- instance of TrustDomainInstance,
initialized with #retrieve call
trustdom_secret -- shared secred used for the trust
"""
if self.info['name'] == another_domain.info['name']:
# Check that NetBIOS names do not clash
raise errors.ValidationError(name='AD Trust Setup',
error=_('the IPA server and the '
'remote domain cannot share '
'the same NetBIOS name: %s')
% self.info['name'])
info = lsa.TrustDomainInfoInfoEx()
info.domain_name.string = another_domain.info['dns_domain']
info.netbios_name.string = another_domain.info['name']
info.sid = security.dom_sid(another_domain.info['sid'])
info.trust_direction = lsa.LSA_TRUST_DIRECTION_INBOUND
if trust_type == TRUST_BIDIRECTIONAL:
info.trust_direction |= lsa.LSA_TRUST_DIRECTION_OUTBOUND
info.trust_type = lsa.LSA_TRUST_TYPE_UPLEVEL
info.trust_attributes = 0
if trust_external:
info.trust_attributes |= lsa.LSA_TRUST_ATTRIBUTE_NON_TRANSITIVE
try:
dname = lsa.String()
dname.string = another_domain.info['dns_domain']
res = self._pipe.QueryTrustedDomainInfoByName(
self._policy_handle,
dname,
lsa.LSA_TRUSTED_DOMAIN_INFO_FULL_INFO
)
if res.info_ex.trust_type != lsa.LSA_TRUST_TYPE_UPLEVEL:
msg = _('There is already a trust to {ipa_domain} with '
'unsupported type {trust_type}. Please remove '
'it manually on AD DC side.')
ttype = trust_type_string(
res.info_ex.trust_type, res.info_ex.trust_attributes
)
err = msg.format(
ipa_domain=another_domain.info['dns_domain'],
trust_type=ttype)
raise errors.ValidationError(
name=_('AD domain controller'),
error=err
)
self._pipe.DeleteTrustedDomain(self._policy_handle,
res.info_ex.sid)
except RuntimeError as e:
# pylint: disable=unbalanced-tuple-unpacking
num, _message = e.args
# pylint: enable=unbalanced-tuple-unpacking
# Ignore anything but access denied (NT_STATUS_ACCESS_DENIED)
if num == -1073741790:
raise access_denied_error
try:
self.generate_auth(trustdom_secret)
if CreateTrustedDomainRelax is not None:
trustdom_handle = CreateTrustedDomainRelax(
self._pipe, self._policy_handle, info,
security.SEC_STD_DELETE,
self.auth_inoutblob, self.auth_inoutblob)
else:
# Samba Python bindings with no support for FIPS wrapper
# We keep using older code
trustdom_handle = self._pipe.CreateTrustedDomainEx2(
self._policy_handle,
info, self.auth_info,
security.SEC_STD_DELETE)
except RuntimeError as e:
raise assess_dcerpc_error(e)
# We should use proper trustdom handle in order to modify the
# trust settings. Samba insists this has to be done with LSA
# OpenTrustedDomain* calls, it is not enough to have a handle
# returned by the CreateTrustedDomainEx2 call.
trustdom_handle = self._pipe.OpenTrustedDomainByName(
self._policy_handle,
dname,
security.SEC_FLAG_MAXIMUM_ALLOWED)
try:
infocls = lsa.TrustDomainInfoSupportedEncTypes()
infocls.enc_types = security.KERB_ENCTYPE_RC4_HMAC_MD5
infocls.enc_types |= security.KERB_ENCTYPE_AES128_CTS_HMAC_SHA1_96
infocls.enc_types |= security.KERB_ENCTYPE_AES256_CTS_HMAC_SHA1_96
self._pipe.SetInformationTrustedDomain(
trustdom_handle,
lsa.LSA_TRUSTED_DOMAIN_SUPPORTED_ENCRYPTION_TYPES,
infocls)
except RuntimeError as e:
# We can ignore the error here -- changing enctypes is for
# improved security but the trust will work with default values as
# well. In particular, the call may fail against Windows 2003
# server as that one doesn't support AES encryption types
pass
if not trust_external:
try:
info = self._pipe.QueryTrustedDomainInfo(
trustdom_handle,
lsa.LSA_TRUSTED_DOMAIN_INFO_INFO_EX)
info.trust_attributes |= lsa.LSA_TRUST_ATTRIBUTE_FOREST_TRANSITIVE
self._pipe.SetInformationTrustedDomain(
trustdom_handle,
lsa.LSA_TRUSTED_DOMAIN_INFO_INFO_EX, info)
except RuntimeError as e:
logger.error(
'unable to set trust transitivity status: %s', str(e))
# Updating forest trust info may fail
# If it failed due to topology conflict, it may be fixed automatically
# update_ftinfo() will through exceptions in that case
# Note that MS-LSAD 3.1.4.7.16 says:
# -------------------------
# The server MUST also make sure that the trust attributes associated
# with the trusted domain object referenced by the TrustedDomainName
# parameter has the TRUST_ATTRIBUTE_FOREST_TRANSITIVE set.
# If the attribute is not present, the server MUST return
# STATUS_INVALID_PARAMETER.
# -------------------------
# Thus, we must not update forest trust info for the external trust
if self.info['is_pdc'] and not trust_external:
self.update_ftinfo(another_domain)
def verify_trust(self, another_domain):
def retrieve_netlogon_info_2(logon_server, domain, function_code, data):
try:
netr_pipe = netlogon.netlogon(domain.binding,
domain.parm, domain.creds)
result = netr_pipe.netr_LogonControl2Ex(
logon_server=logon_server,
function_code=function_code,
level=2,
data=data)
return result
except RuntimeError as e:
raise assess_dcerpc_error(e)
result = retrieve_netlogon_info_2(None, self,
netlogon.NETLOGON_CONTROL_TC_VERIFY,
another_domain.info['dns_domain'])
if result and result.flags and netlogon.NETLOGON_VERIFY_STATUS_RETURNED:
if result.pdc_connection_status[0] != 0 and \
result.tc_connection_status[0] != 0:
if result.pdc_connection_status[1] == "WERR_ACCESS_DENIED":
# Most likely AD DC hit another IPA replica which
# yet has no trust secret replicated
# Sleep and repeat again
self.validation_attempts += 1
if self.validation_attempts < 10:
sleep(5)
return self.verify_trust(another_domain)
# If we get here, we already failed 10 times
srv_record_templates = (
'_ldap._tcp.%s',
'_ldap._tcp.Default-First-Site-Name._sites.dc._msdcs.%s'
)
srv_records = ', '.join(
[srv_record % api.env.domain
for srv_record in srv_record_templates]
)
error_message = _(
'IPA master denied trust validation requests from AD '
'DC %(count)d times. Most likely AD DC contacted a '
'replica that has no trust information replicated '
'yet. Additionally, please check that AD DNS is able '
'to resolve %(records)s SRV records to the correct '
'IPA server.') % dict(count=self.validation_attempts,
records=srv_records)
raise errors.ACIError(info=error_message)
raise assess_dcerpc_error(result.pdc_connection_status)
return True
return False
@contextmanager
def discover_trust_instance(api, mydomain, trustdomain,
creds=None, server=None):
domain_validator = DomainValidator(api)
configured = domain_validator.is_configured()
if not configured:
yield None
return
td = TrustDomainInstance('')
td.parm.set('workgroup', mydomain)
cr = credentials.Credentials()
cr.set_kerberos_state(credentials.DONT_USE_KERBEROS)
cr.guess(td.parm)
cr.set_anonymous()
cr.set_workstation(domain_validator.flatname)
netrc = net.Net(creds=cr, lp=td.parm)
try:
if server:
result = netrc.finddc(address=server,
flags=nbt.NBT_SERVER_LDAP | nbt.NBT_SERVER_DS)
else:
result = netrc.finddc(domain=trustdomain,
flags=nbt.NBT_SERVER_LDAP | nbt.NBT_SERVER_DS)
except RuntimeError as e:
raise assess_dcerpc_error(e)
td.info['dc'] = unicode(result.pdc_dns_name)
td.info['name'] = unicode(result.dns_domain)
if type(creds) is bool:
# Rely on existing Kerberos credentials in the environment
td.creds = credentials.Credentials()
td.creds.set_kerberos_state(credentials.MUST_USE_KERBEROS)
enforce_smb_encryption(td.creds)
td.creds.guess(td.parm)
td.creds.set_workstation(domain_validator.flatname)
logger.error('environment: %s', str(os.environ))
yield td
else:
# Attempt to authenticate as HTTP/ipa.master and use cross-forest trust
# or as passed-in user in case of a one-way trust
domval = DomainValidator(api)
ccache_name = None
if creds:
domval._admin_creds = creds
ccache_name, _principal = domval.kinit_as_administrator(
trustdomain)
else:
raise errors.ValidationError(name=_('Credentials'),
error=_('Missing credentials for '
'cross-forest communication'))
td.creds = credentials.Credentials()
td.creds.set_kerberos_state(credentials.MUST_USE_KERBEROS)
enforce_smb_encryption(td.creds)
if ccache_name:
with ipautil.private_ccache(path=ccache_name):
td.creds.guess(td.parm)
td.creds.set_workstation(domain_validator.flatname)
yield td
def fetch_domains(api, mydomain, trustdomain, creds=None, server=None):
def communicate(td):
td.init_lsa_pipe(td.info['dc'])
netr_pipe = netlogon.netlogon(td.binding, td.parm, td.creds)
# Older FreeIPA versions used netr_DsrEnumerateDomainTrusts call
# but it doesn't provide information about non-domain UPNs associated
# with the forest, thus we have to use netr_DsRGetForestTrustInformation
domains = netr_pipe.netr_DsRGetForestTrustInformation(td.info['dc'],
None, 0)
return domains
domains = None
with discover_trust_instance(api, mydomain, trustdomain,
creds=creds, server=server) as td:
if td is None:
return None
if td.ftinfo_data is not None:
domains = td.ftinfo_data
else:
domains = communicate(td)
if domains is None:
return None
result = {'domains': {}, 'suffixes': {}}
# netr_DsRGetForestTrustInformation returns two types of entries:
# domain information -- name, NetBIOS name, SID of the domain
# top level name info -- a name suffix associated with the forest
# We should ignore forest root name/name suffix as it is already part
# of trust information for IPA purposes and only add what's inside the forest
ftinfo_records = []
ftinfo = drsblobs.ForestTrustInfo()
for t in domains.entries:
record = drsblobs.ForestTrustInfoRecord()
record.flags = t.flags
record.timestamp = t.time
record.type = t.type
if t.type == lsa.LSA_FOREST_TRUST_DOMAIN_INFO:
record.data.sid = t.forest_trust_data.domain_sid
record.data.dns_name.string = \
t.forest_trust_data.dns_domain_name.string
record.data.netbios_name.string = \
t.forest_trust_data.netbios_domain_name.string
tname = unicode(t.forest_trust_data.dns_domain_name.string)
if tname != trustdomain:
result['domains'][tname] = {
'cn': tname,
'ipantflatname': unicode(
t.forest_trust_data.netbios_domain_name.string),
'ipanttrusteddomainsid': unicode(
t.forest_trust_data.domain_sid)
}
elif t.type == lsa.LSA_FOREST_TRUST_TOP_LEVEL_NAME:
record.data.string = t.forest_trust_data.string
tname = unicode(t.forest_trust_data.string)
if tname == trustdomain:
continue
result['suffixes'][tname] = {'cn': tname}
elif t.type == lsa.LSA_FOREST_TRUST_TOP_LEVEL_NAME_EX:
record.data.string = t.forest_trust_data.string
rc = drsblobs.ForestTrustInfoRecordArmor()
rc.record = record
ftinfo_records.append(rc)
ftinfo.count = len(ftinfo_records)
ftinfo.records = ftinfo_records
result['ftinfo_data'] = ndr_pack(ftinfo)
return result
def enforce_smb_encryption(creds):
try:
creds.set_smb_encryption(credentials.SMB_ENCRYPTION_REQUIRED)
except AttributeError:
pass
def retrieve_remote_domain(hostname, local_flatname,
realm, realm_server=None,
realm_admin=None, realm_passwd=None):
def get_instance(local_flatname):
# Fetch data from foreign domain using password only
rd = TrustDomainInstance('')
rd.parm.set('workgroup', local_flatname)
rd.creds = credentials.Credentials()
rd.creds.set_kerberos_state(credentials.DONT_USE_KERBEROS)
rd.creds.guess(rd.parm)
return rd
rd = get_instance(local_flatname)
rd.creds.set_anonymous()
rd.creds.set_workstation(hostname)
if realm_server is None:
rd.retrieve_anonymously(realm, discover_srv=True, search_pdc=True)
else:
rd.retrieve_anonymously(realm_server,
discover_srv=False, search_pdc=True)
rd.read_only = True
if realm_admin and realm_passwd:
if 'name' in rd.info:
realm_netbios = ""
names = realm_admin.split('\\')
if len(names) > 1:
# realm admin is in DOMAIN\user format
# strip DOMAIN part as we'll enforce the one discovered
realm_admin = names[-1]
realm_netbios = names[0]
names = realm_admin.split('@')
if len(names) == 1:
if all([len(realm_netbios) != 0,
realm_netbios.lower() != rd.info['name'].lower()]):
raise errors.ValidationError(
name=_('Credentials'),
error=_('Non-Kerberos user name was specified, '
'please provide user@REALM variant instead'))
realm_admin = r"%s@%s" % (
realm_admin, rd.info['dns_domain'].upper())
realm = rd.info['dns_domain'].upper()
auth_string = r"%s%%%s" \
% (realm_admin, realm_passwd)
with ipautil.private_krb5_config(realm, realm_server, dir='/tmp'):
with ipautil.private_ccache():
td = get_instance(local_flatname)
td.creds.set_kerberos_state(credentials.MUST_USE_KERBEROS)
enforce_smb_encryption(td.creds)
td.creds.parse_string(auth_string)
td.creds.set_workstation(hostname)
if realm_server is None:
# we must have rd.info['dns_hostname'] then
# as it is part of the anonymous discovery
td.retrieve(rd.info['dns_hostname'])
else:
td.retrieve(realm_server)
td.read_only = False
return td
# Otherwise, use anonymously obtained data
return rd
class TrustDomainJoins:
def __init__(self, api):
self.api = api
self.local_domain = None
self.remote_domain = None
self.__allow_behavior = 0
domain_validator = DomainValidator(api)
self.configured = domain_validator.is_configured()
if self.configured:
self.local_flatname = domain_validator.flatname
self.local_dn = domain_validator.dn
self.__populate_local_domain()
def allow_behavior(self, *flags):
for f in flags:
self.__allow_behavior |= int(f)
def __populate_local_domain(self):
# Initialize local domain info using kerberos only
ld = TrustDomainInstance(self.local_flatname)
ld.creds = credentials.Credentials()
ld.creds.set_kerberos_state(credentials.MUST_USE_KERBEROS)
enforce_smb_encryption(ld.creds)
ld.creds.guess(ld.parm)
ld.creds.set_workstation(ld.hostname)
ld.retrieve(FQDN)
self.local_domain = ld
def populate_remote_domain(self, realm, realm_server=None,
realm_admin=None, realm_passwd=None):
self.remote_domain = retrieve_remote_domain(
self.local_domain.hostname,
self.local_domain.info['name'],
realm,
realm_server=realm_server,
realm_admin=realm_admin,
realm_passwd=realm_passwd)
def get_realmdomains(self):
"""
Generate list of records for forest trust information about
our realm domains. Note that the list generated currently
includes only top level domains, no exclusion domains, and
no TDO objects as we handle the latter in a separate way
"""
if self.local_domain.read_only:
return
self.local_domain.ftinfo_records = []
self.local_domain.ftinfo_data = None
realm_domains = self.api.Command.realmdomains_show()['result']
# Use realmdomains' modification timestamp
# to judge records' last update time
entry = self.api.Backend.ldap2.get_entry(
realm_domains['dn'], ['modifyTimestamp'])
# Convert the timestamp to Windows 64-bit timestamp format
trust_timestamp = long(
time.mktime(
entry.single_value.get('modifytimestamp').timetuple()
)*1e7+116444736000000000)
forest = DNSName(self.local_domain.info['dns_forest'])
# tforest is IPA forest. keep the line below for future checks
# tforest = DNSName(self.remote_domain.info['dns_forest'])
for dom in realm_domains['associateddomain']:
d = DNSName(dom)
# We should skip all DNS subdomains of our forest
# because we are going to add *.<forest> TLN anyway
if forest.is_superdomain(d) and forest != d:
continue
# We also should skip single label TLDs as they
# cannot be added as TLNs
if len(d.labels) == 1:
continue
ftinfo = dict()
ftinfo['rec_name'] = dom
ftinfo['rec_time'] = trust_timestamp
ftinfo['rec_type'] = lsa.LSA_FOREST_TRUST_TOP_LEVEL_NAME
self.local_domain.ftinfo_records.append(ftinfo)
def join_ad_full_credentials(self, realm, realm_server, realm_admin,
realm_passwd, trust_type):
if not self.configured:
return None
if not(isinstance(self.remote_domain, TrustDomainInstance)):
self.populate_remote_domain(
realm,
realm_server,
realm_admin,
realm_passwd
)
trust_external = bool(self.__allow_behavior & TRUST_JOIN_EXTERNAL)
if self.remote_domain.info['dns_domain'] != \
self.remote_domain.info['dns_forest']:
if not trust_external:
raise errors.NotAForestRootError(
forest=self.remote_domain.info['dns_forest'],
domain=self.remote_domain.info['dns_domain'])
if not self.remote_domain.read_only:
trustdom_pass = samba.generate_random_password(128, 128)
self.get_realmdomains()
# Establishing trust may throw an exception for topology
# conflict. If it was solved, re-establish the trust again
# Otherwise let the CLI to display a message about the conflict
with ipautil.private_krb5_config(realm, realm_server, dir='/tmp'):
try:
self.remote_domain.establish_trust(self.local_domain,
trustdom_pass,
trust_type,
trust_external)
except TrustTopologyConflictSolved:
# we solved topology conflict, retry again
self.remote_domain.establish_trust(self.local_domain,
trustdom_pass,
trust_type,
trust_external)
try:
self.local_domain.establish_trust(self.remote_domain,
trustdom_pass,
trust_type, trust_external)
except TrustTopologyConflictSolved:
self.local_domain.establish_trust(self.remote_domain,
trustdom_pass,
trust_type, trust_external)
# if trust is inbound, we don't need to verify it because
# AD DC will respond with WERR_NO_SUCH_DOMAIN --
# it only does verification for outbound trusts.
result = True
if trust_type == TRUST_BIDIRECTIONAL:
with ipautil.private_krb5_config(realm,
realm_server, dir='/tmp'):
result = self.remote_domain.verify_trust(self.local_domain)
return dict(
local=self.local_domain,
remote=self.remote_domain,
verified=result
)
return None
def join_ad_ipa_half(self, realm, realm_server, trustdom_passwd, trust_type):
if not self.configured:
return None
if not(isinstance(self.remote_domain, TrustDomainInstance)):
self.populate_remote_domain(realm, realm_server, realm_passwd=None)
trust_external = bool(self.__allow_behavior & TRUST_JOIN_EXTERNAL)
if self.remote_domain.info['dns_domain'] != \
self.remote_domain.info['dns_forest']:
if not trust_external:
raise errors.NotAForestRootError(
forest=self.remote_domain.info['dns_forest'],
domain=self.remote_domain.info['dns_domain'])
self.local_domain.establish_trust(self.remote_domain,
trustdom_passwd,
trust_type, trust_external)
return {
'local': self.local_domain,
'remote': self.remote_domain,
'verified': False,
}
| 79,465
|
Python
|
.py
| 1,647
| 33.620522
| 84
| 0.559585
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,688
|
p11helper.py
|
freeipa_freeipa/ipaserver/p11helper.py
|
#
# Copyright (C) 2014 FreeIPA Contributors see COPYING for license
#
import random
import ctypes.util
import binascii
import struct
import six
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import dsa, ec, rsa
from cffi import FFI
if six.PY3:
unicode = str
_ffi = FFI()
_ffi.cdef('''
/* p11-kit/pkcs11.h */
typedef unsigned long CK_FLAGS;
struct _CK_VERSION
{
unsigned char major;
unsigned char minor;
};
typedef unsigned long CK_SLOT_ID;
typedef CK_SLOT_ID *CK_SLOT_ID_PTR;
typedef unsigned long CK_SESSION_HANDLE;
typedef unsigned long CK_USER_TYPE;
typedef unsigned long CK_OBJECT_HANDLE;
typedef unsigned long CK_OBJECT_CLASS;
typedef unsigned long CK_KEY_TYPE;
typedef unsigned long CK_ATTRIBUTE_TYPE;
typedef unsigned long ck_flags_t;
typedef unsigned char CK_BBOOL;
typedef unsigned long int CK_ULONG;
typedef CK_ULONG *CK_ULONG_PTR;
struct _CK_ATTRIBUTE
{
CK_ATTRIBUTE_TYPE type;
void *pValue;
unsigned long ulValueLen;
};
typedef unsigned long CK_MECHANISM_TYPE;
struct _CK_MECHANISM
{
CK_MECHANISM_TYPE mechanism;
void *pParameter;
unsigned long ulParameterLen;
};
struct _CK_TOKEN_INFO
{
unsigned char label[32];
unsigned char manufacturer_id[32];
unsigned char model[16];
unsigned char serial_number[16];
ck_flags_t flags;
unsigned long max_session_count;
unsigned long session_count;
unsigned long max_rw_session_count;
unsigned long rw_session_count;
unsigned long max_pin_len;
unsigned long min_pin_len;
unsigned long total_public_memory;
unsigned long free_public_memory;
unsigned long total_private_memory;
unsigned long free_private_memory;
struct _CK_VERSION hardware_version;
struct _CK_VERSION firmware_version;
unsigned char utc_time[16];
};
typedef struct _CK_TOKEN_INFO CK_TOKEN_INFO;
typedef CK_TOKEN_INFO *CK_TOKEN_INFO_PTR;
typedef unsigned long CK_RV;
typedef ... *CK_NOTIFY;
struct _CK_FUNCTION_LIST;
typedef CK_RV (*CK_C_Initialize) (void *init_args);
typedef CK_RV (*CK_C_Finalize) (void *pReserved);
typedef ... *CK_C_GetInfo;
typedef ... *CK_C_GetFunctionList;
CK_RV C_GetFunctionList (struct _CK_FUNCTION_LIST **function_list);
typedef CK_RV (*CK_C_GetSlotList) (CK_BBOOL tokenPresent,
CK_SLOT_ID_PTR pSlotList,
CK_ULONG_PTR pulCount);
typedef ... *CK_C_GetSlotInfo;
typedef CK_RV (*CK_C_GetTokenInfo) (CK_SLOT_ID slotID,
CK_TOKEN_INFO_PTR pInfo);
typedef ... *CK_C_WaitForSlotEvent;
typedef ... *CK_C_GetMechanismList;
typedef ... *CK_C_GetMechanismInfo;
typedef ... *CK_C_InitToken;
typedef ... *CK_C_InitPIN;
typedef ... *CK_C_SetPIN;
typedef CK_RV (*CK_C_OpenSession) (CK_SLOT_ID slotID, CK_FLAGS flags,
void *application, CK_NOTIFY notify,
CK_SESSION_HANDLE *session);
typedef CK_RV (*CK_C_CloseSession) (CK_SESSION_HANDLE session);
typedef ... *CK_C_CloseAllSessions;
typedef ... *CK_C_GetSessionInfo;
typedef ... *CK_C_GetOperationState;
typedef ... *CK_C_SetOperationState;
typedef CK_RV (*CK_C_Login) (CK_SESSION_HANDLE session, CK_USER_TYPE user_type,
unsigned char *pin, unsigned long pin_len);
typedef CK_RV (*CK_C_Logout) (CK_SESSION_HANDLE session);
typedef CK_RV (*CK_C_CreateObject) (CK_SESSION_HANDLE session,
struct _CK_ATTRIBUTE *templ,
unsigned long count,
CK_OBJECT_HANDLE *object);
typedef ... *CK_C_CopyObject;
typedef CK_RV (*CK_C_DestroyObject) (CK_SESSION_HANDLE session,
CK_OBJECT_HANDLE object);
typedef ... *CK_C_GetObjectSize;
typedef CK_RV (*CK_C_GetAttributeValue) (CK_SESSION_HANDLE session,
CK_OBJECT_HANDLE object,
struct _CK_ATTRIBUTE *templ,
unsigned long count);
typedef CK_RV (*CK_C_SetAttributeValue) (CK_SESSION_HANDLE session,
CK_OBJECT_HANDLE object,
struct _CK_ATTRIBUTE *templ,
unsigned long count);
typedef CK_RV (*CK_C_FindObjectsInit) (CK_SESSION_HANDLE session,
struct _CK_ATTRIBUTE *templ,
unsigned long count);
typedef CK_RV (*CK_C_FindObjects) (CK_SESSION_HANDLE session,
CK_OBJECT_HANDLE *object,
unsigned long max_object_count,
unsigned long *object_count);
typedef CK_RV (*CK_C_FindObjectsFinal) (CK_SESSION_HANDLE session);
typedef ... *CK_C_EncryptInit;
typedef ... *CK_C_Encrypt;
typedef ... *CK_C_EncryptUpdate;
typedef ... *CK_C_EncryptFinal;
typedef ... *CK_C_DecryptInit;
typedef ... *CK_C_Decrypt;
typedef ... *CK_C_DecryptUpdate;
typedef ... *CK_C_DecryptFinal;
typedef ... *CK_C_DigestInit;
typedef ... *CK_C_Digest;
typedef ... *CK_C_DigestUpdate;
typedef ... *CK_C_DigestKey;
typedef ... *CK_C_DigestFinal;
typedef ... *CK_C_SignInit;
typedef ... *CK_C_Sign;
typedef ... *CK_C_SignUpdate;
typedef ... *CK_C_SignFinal;
typedef ... *CK_C_SignRecoverInit;
typedef ... *CK_C_SignRecover;
typedef ... *CK_C_VerifyInit;
typedef ... *CK_C_Verify;
typedef ... *CK_C_VerifyUpdate;
typedef ... *CK_C_VerifyFinal;
typedef ... *CK_C_VerifyRecoverInit;
typedef ... *CK_C_VerifyRecover;
typedef ... *CK_C_DigestEncryptUpdate;
typedef ... *CK_C_DecryptDigestUpdate;
typedef ... *CK_C_SignEncryptUpdate;
typedef ... *CK_C_DecryptVerifyUpdate;
typedef CK_RV (*CK_C_GenerateKey) (CK_SESSION_HANDLE session,
struct _CK_MECHANISM *mechanism,
struct _CK_ATTRIBUTE *templ,
unsigned long count,
CK_OBJECT_HANDLE *key);
typedef CK_RV (*CK_C_GenerateKeyPair) (CK_SESSION_HANDLE session,
struct _CK_MECHANISM *mechanism,
struct _CK_ATTRIBUTE *
public_key_template,
unsigned long
public_key_attribute_count,
struct _CK_ATTRIBUTE *
private_key_template,
unsigned long
private_key_attribute_count,
CK_OBJECT_HANDLE *public_key,
CK_OBJECT_HANDLE *private_key);
typedef CK_RV (*CK_C_WrapKey) (CK_SESSION_HANDLE session,
struct _CK_MECHANISM *mechanism,
CK_OBJECT_HANDLE wrapping_key,
CK_OBJECT_HANDLE key,
unsigned char *wrapped_key,
unsigned long *wrapped_key_len);
typedef CK_RV (*CK_C_UnwrapKey) (CK_SESSION_HANDLE session,
struct _CK_MECHANISM *mechanism,
CK_OBJECT_HANDLE unwrapping_key,
unsigned char *wrapped_key,
unsigned long wrapped_key_len,
struct _CK_ATTRIBUTE *templ,
unsigned long attribute_count,
CK_OBJECT_HANDLE *key);
typedef ... *CK_C_DeriveKey;
typedef ... *CK_C_SeedRandom;
typedef ... *CK_C_GenerateRandom;
typedef ... *CK_C_GetFunctionStatus;
typedef ... *CK_C_CancelFunction;
struct _CK_FUNCTION_LIST
{
struct _CK_VERSION version;
CK_C_Initialize C_Initialize;
CK_C_Finalize C_Finalize;
CK_C_GetInfo C_GetInfo;
CK_C_GetFunctionList C_GetFunctionList;
CK_C_GetSlotList C_GetSlotList;
CK_C_GetSlotInfo C_GetSlotInfo;
CK_C_GetTokenInfo C_GetTokenInfo;
CK_C_GetMechanismList C_GetMechanismList;
CK_C_GetMechanismInfo C_GetMechanismInfo;
CK_C_InitToken C_InitToken;
CK_C_InitPIN C_InitPIN;
CK_C_SetPIN C_SetPIN;
CK_C_OpenSession C_OpenSession;
CK_C_CloseSession C_CloseSession;
CK_C_CloseAllSessions C_CloseAllSessions;
CK_C_GetSessionInfo C_GetSessionInfo;
CK_C_GetOperationState C_GetOperationState;
CK_C_SetOperationState C_SetOperationState;
CK_C_Login C_Login;
CK_C_Logout C_Logout;
CK_C_CreateObject C_CreateObject;
CK_C_CopyObject C_CopyObject;
CK_C_DestroyObject C_DestroyObject;
CK_C_GetObjectSize C_GetObjectSize;
CK_C_GetAttributeValue C_GetAttributeValue;
CK_C_SetAttributeValue C_SetAttributeValue;
CK_C_FindObjectsInit C_FindObjectsInit;
CK_C_FindObjects C_FindObjects;
CK_C_FindObjectsFinal C_FindObjectsFinal;
CK_C_EncryptInit C_EncryptInit;
CK_C_Encrypt C_Encrypt;
CK_C_EncryptUpdate C_EncryptUpdate;
CK_C_EncryptFinal C_EncryptFinal;
CK_C_DecryptInit C_DecryptInit;
CK_C_Decrypt C_Decrypt;
CK_C_DecryptUpdate C_DecryptUpdate;
CK_C_DecryptFinal C_DecryptFinal;
CK_C_DigestInit C_DigestInit;
CK_C_Digest C_Digest;
CK_C_DigestUpdate C_DigestUpdate;
CK_C_DigestKey C_DigestKey;
CK_C_DigestFinal C_DigestFinal;
CK_C_SignInit C_SignInit;
CK_C_Sign C_Sign;
CK_C_SignUpdate C_SignUpdate;
CK_C_SignFinal C_SignFinal;
CK_C_SignRecoverInit C_SignRecoverInit;
CK_C_SignRecover C_SignRecover;
CK_C_VerifyInit C_VerifyInit;
CK_C_Verify C_Verify;
CK_C_VerifyUpdate C_VerifyUpdate;
CK_C_VerifyFinal C_VerifyFinal;
CK_C_VerifyRecoverInit C_VerifyRecoverInit;
CK_C_VerifyRecover C_VerifyRecover;
CK_C_DigestEncryptUpdate C_DigestEncryptUpdate;
CK_C_DecryptDigestUpdate C_DecryptDigestUpdate;
CK_C_SignEncryptUpdate C_SignEncryptUpdate;
CK_C_DecryptVerifyUpdate C_DecryptVerifyUpdate;
CK_C_GenerateKey C_GenerateKey;
CK_C_GenerateKeyPair C_GenerateKeyPair;
CK_C_WrapKey C_WrapKey;
CK_C_UnwrapKey C_UnwrapKey;
CK_C_DeriveKey C_DeriveKey;
CK_C_SeedRandom C_SeedRandom;
CK_C_GenerateRandom C_GenerateRandom;
CK_C_GetFunctionStatus C_GetFunctionStatus;
CK_C_CancelFunction C_CancelFunction;
CK_C_WaitForSlotEvent C_WaitForSlotEvent;
};
typedef unsigned char CK_BYTE;
typedef unsigned char CK_UTF8CHAR;
typedef CK_BYTE *CK_BYTE_PTR;
typedef CK_OBJECT_HANDLE *CK_OBJECT_HANDLE_PTR;
typedef struct _CK_ATTRIBUTE CK_ATTRIBUTE;
typedef struct _CK_ATTRIBUTE *CK_ATTRIBUTE_PTR;
typedef struct _CK_MECHANISM CK_MECHANISM;
typedef struct _CK_FUNCTION_LIST *CK_FUNCTION_LIST_PTR;
/* p11-kit/uri.h */
typedef enum {
DUMMY /* ..., */
} P11KitUriType;
typedef ... P11KitUri;
CK_ATTRIBUTE_PTR p11_kit_uri_get_attributes (P11KitUri *uri,
CK_ULONG *n_attrs);
int p11_kit_uri_any_unrecognized (P11KitUri *uri);
P11KitUri* p11_kit_uri_new (void);
int p11_kit_uri_parse (const char *string,
P11KitUriType uri_type,
P11KitUri *uri);
void p11_kit_uri_free (P11KitUri *uri);
/* p11helper.c */
struct ck_rsa_pkcs_oaep_params {
CK_MECHANISM_TYPE hash_alg;
unsigned long mgf;
unsigned long source;
void *source_data;
unsigned long source_data_len;
};
typedef struct ck_rsa_pkcs_oaep_params CK_RSA_PKCS_OAEP_PARAMS;
''')
_libp11_kit = _ffi.dlopen(ctypes.util.find_library('p11-kit'))
# utility
NULL = _ffi.NULL
unsigned_char = _ffi.typeof('unsigned char')
unsigned_long = _ffi.typeof('unsigned long')
sizeof = _ffi.sizeof
def new_ptr(ctype, *args):
return _ffi.new(_ffi.getctype(ctype, '*'), *args)
def new_array(ctype, *args):
return _ffi.new(_ffi.getctype(ctype, '[]'), *args)
# p11-kit/pkcs11.h
CK_SESSION_HANDLE = _ffi.typeof('CK_SESSION_HANDLE')
CK_OBJECT_HANDLE = _ffi.typeof('CK_OBJECT_HANDLE')
CKU_USER = 1
CKF_RW_SESSION = 0x2
CKF_SERIAL_SESSION = 0x4
CK_OBJECT_CLASS = _ffi.typeof('CK_OBJECT_CLASS')
CKO_PUBLIC_KEY = 2
CKO_PRIVATE_KEY = 3
CKO_SECRET_KEY = 4
CKO_VENDOR_DEFINED = 0x80000000
CK_KEY_TYPE = _ffi.typeof('CK_KEY_TYPE')
CKK_RSA = 0
CKK_AES = 0x1f
CKA_CLASS = 0
CKA_TOKEN = 1
CKA_PRIVATE = 2
CKA_LABEL = 3
CKA_TRUSTED = 0x86
CKA_KEY_TYPE = 0x100
CKA_ID = 0x102
CKA_SENSITIVE = 0x103
CKA_ENCRYPT = 0x104
CKA_DECRYPT = 0x105
CKA_WRAP = 0x106
CKA_UNWRAP = 0x107
CKA_SIGN = 0x108
CKA_SIGN_RECOVER = 0x109
CKA_VERIFY = 0x10a
CKA_VERIFY_RECOVER = 0x10b
CKA_DERIVE = 0x10c
CKA_MODULUS = 0x120
CKA_MODULUS_BITS = 0x121
CKA_PUBLIC_EXPONENT = 0x122
CKA_VALUE_LEN = 0x161
CKA_EXTRACTABLE = 0x162
CKA_LOCAL = 0x163
CKA_NEVER_EXTRACTABLE = 0x164
CKA_ALWAYS_SENSITIVE = 0x165
CKA_MODIFIABLE = 0x170
CKA_ALWAYS_AUTHENTICATE = 0x202
CKA_WRAP_WITH_TRUSTED = 0x210
CKM_RSA_PKCS_KEY_PAIR_GEN = 0
CKM_RSA_PKCS = 1
CKM_RSA_PKCS_OAEP = 9
CKM_SHA_1 = 0x220
CKM_AES_KEY_GEN = 0x1080
CKR_OK = 0
CKR_ATTRIBUTE_TYPE_INVALID = 0x12
CKR_USER_NOT_LOGGED_IN = 0x101
CKR_BUFFER_TOO_SMALL = 0x150
CK_BYTE = _ffi.typeof('CK_BYTE')
CK_BBOOL = _ffi.typeof('CK_BBOOL')
CK_ULONG = _ffi.typeof('CK_ULONG')
CK_BYTE_PTR = _ffi.typeof('CK_BYTE_PTR')
CK_FALSE = 0
CK_TRUE = 1
CK_OBJECT_HANDLE_PTR = _ffi.typeof('CK_OBJECT_HANDLE_PTR')
CK_ATTRIBUTE = _ffi.typeof('CK_ATTRIBUTE')
CK_MECHANISM = _ffi.typeof('CK_MECHANISM')
CK_FUNCTION_LIST_PTR = _ffi.typeof('CK_FUNCTION_LIST_PTR')
CK_SLOT_ID = _ffi.typeof('CK_SLOT_ID')
CK_TOKEN_INFO = _ffi.typeof('CK_TOKEN_INFO')
NULL_PTR = NULL
# p11-kit/uri.h
P11_KIT_URI_OK = 0
P11_KIT_URI_FOR_OBJECT = 2
p11_kit_uri_get_attributes = _libp11_kit.p11_kit_uri_get_attributes
p11_kit_uri_any_unrecognized = _libp11_kit.p11_kit_uri_any_unrecognized
p11_kit_uri_new = _libp11_kit.p11_kit_uri_new
p11_kit_uri_parse = _libp11_kit.p11_kit_uri_parse
p11_kit_uri_free = _libp11_kit.p11_kit_uri_free
# library.c
def loadLibrary(module):
"""Load the PKCS#11 library"""
# Load PKCS #11 library
if module:
# pylint: disable=no-member
pDynLib = _ffi.dlopen(module, _ffi.RTLD_NOW | _ffi.RTLD_LOCAL)
else:
raise Error("PKCS#11 module name is empty")
# Retrieve the entry point for C_GetFunctionList
pGetFunctionList = pDynLib.C_GetFunctionList
if pGetFunctionList == NULL:
raise Error(
f"Module '{module}' has no function 'C_GetFunctionList'."
)
# Store the handle so we can dlclose it later
return pGetFunctionList, pDynLib
# p11helper.c
# compat TODO
CKM_AES_KEY_WRAP = 0x2109
CKM_AES_KEY_WRAP_PAD = 0x210a
# TODO
CKA_COPYABLE = 0x0017
CKG_MGF1_SHA1 = 0x00000001
CKZ_DATA_SPECIFIED = 0x00000001
CK_RSA_PKCS_OAEP_PARAMS = _ffi.typeof('CK_RSA_PKCS_OAEP_PARAMS')
true_ptr = new_ptr(CK_BBOOL, CK_TRUE)
false_ptr = new_ptr(CK_BBOOL, CK_FALSE)
MAX_TEMPLATE_LEN = 32
#
# Constants
#
CONST_RSA_PKCS_OAEP_PARAMS_ptr = new_ptr(CK_RSA_PKCS_OAEP_PARAMS, dict(
hash_alg=CKM_SHA_1,
mgf=CKG_MGF1_SHA1,
source=CKZ_DATA_SPECIFIED,
source_data=NULL,
source_data_len=0,
))
#
# ipap11helper Exceptions
#
class P11HelperException(Exception):
"""parent class for all exceptions"""
P11HelperException.__name__ = 'Exception'
class Error(P11HelperException):
"""general error"""
class NotFound(P11HelperException):
"""key not found"""
class DuplicationError(P11HelperException):
"""key already exists"""
########################################################################
# Support functions
#
def pyobj_to_bool(pyobj):
if pyobj:
return true_ptr
return false_ptr
def convert_py2bool(mapping):
return tuple(pyobj_to_bool(py_obj) for py_obj in mapping)
def string_to_pybytes_or_none(str, len):
if str == NULL:
return None
return _ffi.buffer(str, len)[:]
def unicode_to_char_array(unicode):
"""
Convert a unicode string to the utf8 encoded char array
:param unicode: input python unicode object
"""
try:
utf8_str = unicode.encode('utf-8')
except Exception:
raise Error("Unable to encode UTF-8")
try:
result = new_array(unsigned_char, utf8_str)
except Exception:
raise Error("Unable to get bytes from string")
l = len(utf8_str)
return result, l
def char_array_to_unicode(array, l):
"""
Convert utf-8 encoded char array to unicode object
"""
return _ffi.buffer(array, l)[:].decode('utf-8')
def int_to_bytes(value):
try:
return binascii.unhexlify('{0:x}'.format(value))
except (TypeError, binascii.Error):
return binascii.unhexlify('0{0:x}'.format(value))
def bytes_to_int(value):
return int(binascii.hexlify(value), 16)
def check_return_value(rv, message):
"""
Tests result value of pkc11 operations
"""
if rv != CKR_OK:
try:
errmsg = "Error at %s: 0x%x\n" % (message, rv)
except Exception:
raise Error("An error occured during error message generation. "
"Please report this problem. Developers will use "
"a crystal ball to find out the root cause.")
else:
raise Error(errmsg)
def _fill_template_from_parts(attr, template_len, id, id_len, label, label_len,
class_, cka_wrap, cka_unwrap):
"""
Fill template structure with pointers to attributes passed as independent
variables.
Variables with NULL values will be omitted from template.
@warning input variables should not be modified when template is in use
"""
cnt = 0
if label != NULL:
attr[0].type = CKA_LABEL
attr[0].pValue = label
attr[0].ulValueLen = label_len
attr += 1
cnt += 1
assert cnt < template_len[0]
if id != NULL:
attr[0].type = CKA_ID
attr[0].pValue = id
attr[0].ulValueLen = id_len
attr += 1
cnt += 1
assert cnt < template_len[0]
if cka_wrap != NULL:
attr[0].type = CKA_WRAP
attr[0].pValue = cka_wrap
attr[0].ulValueLen = sizeof(CK_BBOOL)
attr += 1
cnt += 1
assert cnt < template_len[0]
if cka_unwrap != NULL:
attr[0].type = CKA_UNWRAP
attr[0].pValue = cka_unwrap
attr[0].ulValueLen = sizeof(CK_BBOOL)
attr += 1
cnt += 1
assert cnt < template_len[0]
if class_ != NULL:
attr[0].type = CKA_CLASS
attr[0].pValue = class_
attr[0].ulValueLen = sizeof(CK_OBJECT_CLASS)
attr += 1
cnt += 1
assert cnt < template_len[0]
template_len[0] = cnt
def _parse_uri(uri_str):
"""
Parse string to P11-kit representation of PKCS#11 URI.
"""
uri = p11_kit_uri_new()
if not uri:
raise Error("Cannot initialize URI parser")
try:
result = p11_kit_uri_parse(uri_str, P11_KIT_URI_FOR_OBJECT, uri)
if result != P11_KIT_URI_OK:
raise Error("Cannot parse URI")
if p11_kit_uri_any_unrecognized(uri):
raise Error("PKCS#11 URI contains unsupported attributes")
except Error:
p11_kit_uri_free(uri)
raise
return uri
def _set_wrapping_mech_parameters(mech_type, mech):
"""
Function set default param values for wrapping mechanism
:param mech_type: mechanism type
:param mech: filled structure with params based on mech type
Warning: do not dealloc param values, it is static variables
"""
if mech_type in (CKM_RSA_PKCS, CKM_AES_KEY_WRAP, CKM_AES_KEY_WRAP_PAD):
mech.pParameter = NULL
mech.ulParameterLen = 0
elif mech_type == CKM_RSA_PKCS_OAEP:
# Use the same configuration as openSSL
# https://www.openssl.org/docs/crypto/RSA_public_encrypt.html
mech.pParameter = CONST_RSA_PKCS_OAEP_PARAMS_ptr
mech.ulParameterLen = sizeof(CK_RSA_PKCS_OAEP_PARAMS)
else:
raise Error("Unsupported wrapping mechanism")
mech.mechanism = mech_type
########################################################################
# P11_Helper object
#
class P11_Helper:
@property
def p11(self):
return self.p11_ptr[0]
@property
def session(self):
return self.session_ptr[0]
def _find_key(self, template, template_len):
"""
Find keys matching specified template.
Function returns list of key handles via objects parameter.
:param template: PKCS#11 template for attribute matching
"""
result_objects = []
result_object_ptr = new_ptr(CK_OBJECT_HANDLE)
objectCount_ptr = new_ptr(CK_ULONG)
rv = self.p11.C_FindObjectsInit(self.session, template, template_len)
check_return_value(rv, "Find key init")
rv = self.p11.C_FindObjects(self.session, result_object_ptr, 1,
objectCount_ptr)
check_return_value(rv, "Find key")
while objectCount_ptr[0] > 0:
result_objects.append(result_object_ptr[0])
rv = self.p11.C_FindObjects(self.session, result_object_ptr, 1,
objectCount_ptr)
check_return_value(rv, "Check for duplicated key")
rv = self.p11.C_FindObjectsFinal(self.session)
check_return_value(rv, "Find objects final")
return result_objects
def _id_exists(self, id, id_len, class_):
"""
Test if object with specified label, id and class exists
:param id: key ID, (if value is NULL, will not be used to find key)
:param id_len: key ID length
:param class_ key: class
:return: True if object was found, False if object doesnt exists
"""
object_count_ptr = new_ptr(CK_ULONG)
result_object_ptr = new_ptr(CK_OBJECT_HANDLE)
class_ptr = new_ptr(CK_OBJECT_CLASS, class_)
class_sec_ptr = new_ptr(CK_OBJECT_CLASS, CKO_SECRET_KEY)
template_pub_priv = new_array(CK_ATTRIBUTE, (
(CKA_ID, id, id_len),
(CKA_CLASS, class_ptr, sizeof(CK_OBJECT_CLASS)),
))
template_sec = new_array(CK_ATTRIBUTE, (
(CKA_ID, id, id_len),
(CKA_CLASS, class_sec_ptr, sizeof(CK_OBJECT_CLASS)),
))
template_id = new_array(CK_ATTRIBUTE, (
(CKA_ID, id, id_len),
))
#
# Only one secret key with same ID is allowed
#
if class_ == CKO_SECRET_KEY:
rv = self.p11.C_FindObjectsInit(self.session, template_id, 1)
check_return_value(rv, "id, label exists init")
rv = self.p11.C_FindObjects(self.session, result_object_ptr, 1,
object_count_ptr)
check_return_value(rv, "id, label exists")
rv = self.p11.C_FindObjectsFinal(self.session)
check_return_value(rv, "id, label exists final")
if object_count_ptr[0] > 0:
return True
return False
#
# Public and private keys can share one ID, but
#
# test if secret key with same ID exists
rv = self.p11.C_FindObjectsInit(self.session, template_sec, 2)
check_return_value(rv, "id, label exists init")
rv = self.p11.C_FindObjects(self.session, result_object_ptr, 1,
object_count_ptr)
check_return_value(rv, "id, label exists")
rv = self.p11.C_FindObjectsFinal(self.session)
check_return_value(rv, "id, label exists final")
if object_count_ptr[0] > 0:
# object found
return True
# test if pub/private key with same id exists
object_count_ptr[0] = 0
rv = self.p11.C_FindObjectsInit(self.session, template_pub_priv, 2)
check_return_value(rv, "id, label exists init")
rv = self.p11.C_FindObjects(self.session, result_object_ptr, 1,
object_count_ptr)
check_return_value(rv, "id, label exists")
rv = self.p11.C_FindObjectsFinal(self.session)
check_return_value(rv, "id, label exists final")
if object_count_ptr[0] > 0:
# Object found
return True
# Object not found
return False
def __init__(self, token_label, user_pin, library_path):
self.p11_ptr = new_ptr(CK_FUNCTION_LIST_PTR)
self.session_ptr = new_ptr(CK_SESSION_HANDLE)
self.session_ptr[0] = 0
self.p11_ptr[0] = NULL
self.module_handle = None
# Parse method args
if isinstance(user_pin, unicode):
user_pin = user_pin.encode()
self.token_label = token_label
try:
pGetFunctionList, module_handle = loadLibrary(library_path)
except Exception:
raise Error(f"Could not load the library '{library_path}'.")
self.module_handle = module_handle
#
# Load the function list
#
pGetFunctionList(self.p11_ptr)
#
# Initialize
#
rv = self.p11.C_Initialize(NULL)
check_return_value(rv, "initialize")
#
# Get Slot
#
slot = self.get_slot()
if slot is None:
raise Error("No slot for label {} found".format(self.token_label))
#
# Start session
#
rv = self.p11.C_OpenSession(slot,
CKF_SERIAL_SESSION | CKF_RW_SESSION, NULL,
NULL, self.session_ptr)
check_return_value(rv, "open session")
#
# Login
#
rv = self.p11.C_Login(self.session, CKU_USER, user_pin, len(user_pin))
check_return_value(rv, "log in")
def get_slot(self):
"""Get slot where then token is located
:return: slot number or None when slot not found
"""
object_count_ptr = new_ptr(CK_ULONG)
# get slots ID
slots = None
for _i in range(0, 10):
# try max N times, then die to avoid infinite iteration
rv = self.p11.C_GetSlotList(CK_TRUE, NULL, object_count_ptr)
check_return_value(rv, "get slots IDs - prepare")
result_ids_ptr = new_array(CK_SLOT_ID, object_count_ptr[0])
rv = self.p11.C_GetSlotList(
CK_TRUE, result_ids_ptr, object_count_ptr)
if rv == CKR_BUFFER_TOO_SMALL:
continue
check_return_value(rv, "get slots IDs")
slots = result_ids_ptr
break # we have slots !!!
if slots is None:
raise Error("Failed to get slots")
for slot in slots:
token_info_ptr = new_ptr(CK_TOKEN_INFO)
rv = self.p11.C_GetTokenInfo(slot, token_info_ptr)
check_return_value(rv, 'get token info')
# softhsm always returns label 32 bytes long with padding made of
# whitespaces (#32), so we have to rstrip() padding and compare
# Label was created by softhsm-util so it is not our fault that
# there are #32 as padding (cffi initializes structures with
# zeroes)
# In case that this is not valid anymore, keep in mind backward
# compatibility
if self.token_label == char_array_to_unicode(
token_info_ptr[0].label, 32).rstrip():
return slot
return None
def finalize(self):
"""
Finalize operations with pkcs11 library
"""
if self.p11 == NULL:
return
#
# Logout
#
rv = self.p11.C_Logout(self.session)
check_return_value(rv, "log out")
#
# End session
#
rv = self.p11.C_CloseSession(self.session)
check_return_value(rv, "close session")
#
# Finalize
#
self.p11.C_Finalize(NULL)
self.p11_ptr[0] = NULL
self.session_ptr[0] = 0
self.module_handle = None
#################################################################
# Methods working with keys
#
def generate_master_key(self, label, id, key_length=16, cka_copyable=True,
cka_decrypt=False, cka_derive=False,
cka_encrypt=False, cka_extractable=True,
cka_modifiable=True, cka_private=True,
cka_sensitive=True, cka_sign=False,
cka_unwrap=True, cka_verify=False, cka_wrap=True,
cka_wrap_with_trusted=False):
"""
Generate master key
:return: master key handle
"""
if isinstance(id, unicode):
id = id.encode()
attrs = (
cka_copyable,
cka_decrypt,
cka_derive,
cka_encrypt,
cka_extractable,
cka_modifiable,
cka_private,
cka_sensitive,
cka_sign,
cka_unwrap,
cka_verify,
cka_wrap,
cka_wrap_with_trusted,
)
key_length_ptr = new_ptr(CK_ULONG, key_length)
master_key_ptr = new_ptr(CK_OBJECT_HANDLE)
label_unicode = label
id_length = len(id)
id_ = new_array(CK_BYTE, id)
# TODO check long overflow
label, label_length = unicode_to_char_array(label_unicode)
# TODO param?
mechanism_ptr = new_ptr(CK_MECHANISM, (
CKM_AES_KEY_GEN, NULL_PTR, 0
))
if key_length not in (16, 24, 32):
raise Error("generate_master_key: key length allowed values are: "
"16, 24 and 32")
if self._id_exists(id_, id_length, CKO_SECRET_KEY):
raise DuplicationError("Master key with same ID already exists")
# Process keyword boolean arguments
(_cka_copyable_ptr, cka_decrypt_ptr, cka_derive_ptr, cka_encrypt_ptr,
cka_extractable_ptr, cka_modifiable_ptr, cka_private_ptr,
cka_sensitive_ptr, cka_sign_ptr, cka_unwrap_ptr, cka_verify_ptr,
cka_wrap_ptr, cka_wrap_with_trusted_ptr,) = convert_py2bool(attrs)
symKeyTemplate = new_array(CK_ATTRIBUTE, (
(CKA_ID, id_, id_length),
(CKA_LABEL, label, label_length),
(CKA_TOKEN, true_ptr, sizeof(CK_BBOOL)),
(CKA_VALUE_LEN, key_length_ptr, sizeof(CK_ULONG)),
# TODO Softhsm doesn't support it
# (CKA_COPYABLE, cka_copyable_ptr, sizeof(CK_BBOOL)),
(CKA_DECRYPT, cka_decrypt_ptr, sizeof(CK_BBOOL)),
(CKA_DERIVE, cka_derive_ptr, sizeof(CK_BBOOL)),
(CKA_ENCRYPT, cka_encrypt_ptr, sizeof(CK_BBOOL)),
(CKA_EXTRACTABLE, cka_extractable_ptr, sizeof(CK_BBOOL)),
(CKA_MODIFIABLE, cka_modifiable_ptr, sizeof(CK_BBOOL)),
(CKA_PRIVATE, cka_private_ptr, sizeof(CK_BBOOL)),
(CKA_SENSITIVE, cka_sensitive_ptr, sizeof(CK_BBOOL)),
(CKA_SIGN, cka_sign_ptr, sizeof(CK_BBOOL)),
(CKA_UNWRAP, cka_unwrap_ptr, sizeof(CK_BBOOL)),
(CKA_VERIFY, cka_verify_ptr, sizeof(CK_BBOOL)),
(CKA_WRAP, cka_wrap_ptr, sizeof(CK_BBOOL)),
(CKA_WRAP_WITH_TRUSTED, cka_wrap_with_trusted_ptr,
sizeof(CK_BBOOL)),
))
rv = self.p11.C_GenerateKey(self.session, mechanism_ptr,
symKeyTemplate,
(sizeof(symKeyTemplate) //
sizeof(CK_ATTRIBUTE)), master_key_ptr)
check_return_value(rv, "generate master key")
return master_key_ptr[0]
def generate_replica_key_pair(self, label, id, modulus_bits=2048,
pub_cka_copyable=True, pub_cka_derive=False,
pub_cka_encrypt=False,
pub_cka_modifiable=True,
pub_cka_private=True, pub_cka_trusted=False,
pub_cka_verify=False,
pub_cka_verify_recover=False,
pub_cka_wrap=True,
priv_cka_always_authenticate=False,
priv_cka_copyable=True,
priv_cka_decrypt=False,
priv_cka_derive=False,
priv_cka_extractable=False,
priv_cka_modifiable=True,
priv_cka_private=True,
priv_cka_sensitive=True,
priv_cka_sign=False,
priv_cka_sign_recover=False,
priv_cka_unwrap=True,
priv_cka_wrap_with_trusted=False):
"""
Generate replica keys
:returns: tuple (public_key_handle, private_key_handle)
"""
if isinstance(id, unicode):
id = id.encode()
attrs_pub = (
pub_cka_copyable,
pub_cka_derive,
pub_cka_encrypt,
pub_cka_modifiable,
pub_cka_private,
pub_cka_trusted,
pub_cka_verify,
pub_cka_verify_recover,
pub_cka_wrap,
)
attrs_priv = (
priv_cka_always_authenticate,
priv_cka_copyable,
priv_cka_decrypt,
priv_cka_derive,
priv_cka_extractable,
priv_cka_modifiable,
priv_cka_private,
priv_cka_sensitive,
priv_cka_sign,
priv_cka_sign_recover,
priv_cka_unwrap,
priv_cka_wrap_with_trusted,
)
label_unicode = label
id_ = new_array(CK_BYTE, id)
id_length = len(id)
label, label_length = unicode_to_char_array(label_unicode)
public_key_ptr = new_ptr(CK_OBJECT_HANDLE)
private_key_ptr = new_ptr(CK_OBJECT_HANDLE)
mechanism_ptr = new_ptr(CK_MECHANISM,
(CKM_RSA_PKCS_KEY_PAIR_GEN, NULL_PTR, 0))
if self._id_exists(id_, id_length, CKO_PRIVATE_KEY):
raise DuplicationError("Private key with same ID already exists")
if self._id_exists(id_, id_length, CKO_PUBLIC_KEY):
raise DuplicationError("Public key with same ID already exists")
modulus_bits_ptr = new_ptr(CK_ULONG, modulus_bits)
# Process keyword boolean arguments
(_pub_cka_copyable_ptr, pub_cka_derive_ptr, pub_cka_encrypt_ptr,
pub_cka_modifiable_ptr, pub_cka_private_ptr, pub_cka_trusted_ptr,
pub_cka_verify_ptr, pub_cka_verify_recover_ptr, pub_cka_wrap_ptr,
) = convert_py2bool(attrs_pub)
(priv_cka_always_authenticate_ptr, _priv_cka_copyable_ptr,
priv_cka_decrypt_ptr, priv_cka_derive_ptr, priv_cka_extractable_ptr,
priv_cka_modifiable_ptr, priv_cka_private_ptr, priv_cka_sensitive_ptr,
priv_cka_sign_ptr, _priv_cka_sign_recover_ptr, priv_cka_unwrap_ptr,
priv_cka_wrap_with_trusted_ptr,) = convert_py2bool(attrs_priv)
# 65537 (RFC 6376 section 3.3.1)
public_exponent = new_array(CK_BYTE, (1, 0, 1))
publicKeyTemplate = new_array(CK_ATTRIBUTE, (
(CKA_ID, id_, id_length),
(CKA_LABEL, label, label_length),
(CKA_TOKEN, true_ptr, sizeof(CK_BBOOL)),
(CKA_MODULUS_BITS, modulus_bits_ptr, sizeof(CK_ULONG)),
(CKA_PUBLIC_EXPONENT, public_exponent, 3),
# TODO Softhsm doesn't support it
# (CKA_COPYABLE, pub_cka_copyable_p, sizeof(CK_BBOOL)),
(CKA_DERIVE, pub_cka_derive_ptr, sizeof(CK_BBOOL)),
(CKA_ENCRYPT, pub_cka_encrypt_ptr, sizeof(CK_BBOOL)),
(CKA_MODIFIABLE, pub_cka_modifiable_ptr, sizeof(CK_BBOOL)),
(CKA_PRIVATE, pub_cka_private_ptr, sizeof(CK_BBOOL)),
(CKA_TRUSTED, pub_cka_trusted_ptr, sizeof(CK_BBOOL)),
(CKA_VERIFY, pub_cka_verify_ptr, sizeof(CK_BBOOL)),
(CKA_VERIFY_RECOVER, pub_cka_verify_recover_ptr, sizeof(CK_BBOOL)),
(CKA_WRAP, pub_cka_wrap_ptr, sizeof(CK_BBOOL)),
))
privateKeyTemplate = new_array(CK_ATTRIBUTE, (
(CKA_ID, id_, id_length),
(CKA_LABEL, label, label_length),
(CKA_TOKEN, true_ptr, sizeof(CK_BBOOL)),
(CKA_ALWAYS_AUTHENTICATE, priv_cka_always_authenticate_ptr,
sizeof(CK_BBOOL)),
# TODO Softhsm doesn't support it
# (CKA_COPYABLE, priv_cka_copyable_ptr, sizeof(CK_BBOOL)),
(CKA_DECRYPT, priv_cka_decrypt_ptr, sizeof(CK_BBOOL)),
(CKA_DERIVE, priv_cka_derive_ptr, sizeof(CK_BBOOL)),
(CKA_EXTRACTABLE, priv_cka_extractable_ptr, sizeof(CK_BBOOL)),
(CKA_MODIFIABLE, priv_cka_modifiable_ptr, sizeof(CK_BBOOL)),
(CKA_PRIVATE, priv_cka_private_ptr, sizeof(CK_BBOOL)),
(CKA_SENSITIVE, priv_cka_sensitive_ptr, sizeof(CK_BBOOL)),
(CKA_SIGN, priv_cka_sign_ptr, sizeof(CK_BBOOL)),
(CKA_SIGN_RECOVER, priv_cka_sign_ptr, sizeof(CK_BBOOL)),
(CKA_UNWRAP, priv_cka_unwrap_ptr, sizeof(CK_BBOOL)),
(CKA_WRAP_WITH_TRUSTED, priv_cka_wrap_with_trusted_ptr,
sizeof(CK_BBOOL)),
))
rv = self.p11.C_GenerateKeyPair(self.session, mechanism_ptr,
publicKeyTemplate,
(sizeof(publicKeyTemplate) //
sizeof(CK_ATTRIBUTE)),
privateKeyTemplate,
(sizeof(privateKeyTemplate) //
sizeof(CK_ATTRIBUTE)),
public_key_ptr,
private_key_ptr)
check_return_value(rv, "generate key pair")
return public_key_ptr[0], private_key_ptr[0]
def find_keys(self, objclass=CKO_VENDOR_DEFINED, label=None, id=None,
cka_wrap=None, cka_unwrap=None, uri=None):
"""
Find key
"""
if isinstance(id, unicode):
id = id.encode()
if isinstance(uri, unicode):
uri = uri.encode()
class_ = objclass
class_ptr = new_ptr(CK_OBJECT_CLASS, class_)
ckawrap = NULL
ckaunwrap = NULL
if id is not None:
id_ = new_array(CK_BYTE, id)
id_length = len(id)
else:
id_ = NULL
id_length = 0
label_unicode, label = label, NULL
cka_wrap_bool = cka_wrap
cka_unwrap_bool = cka_unwrap
label_length = 0
uri_str = uri
uri = NULL
template = new_array(CK_ATTRIBUTE, MAX_TEMPLATE_LEN)
template_len_ptr = new_ptr(CK_ULONG, MAX_TEMPLATE_LEN)
# TODO check long overflow
if label_unicode is not None:
label, label_length = unicode_to_char_array(label_unicode)
if cka_wrap_bool is not None:
if cka_wrap_bool:
ckawrap = true_ptr
else:
ckawrap = false_ptr
if cka_unwrap_bool is not None:
if cka_unwrap_bool:
ckaunwrap = true_ptr
else:
ckaunwrap = false_ptr
if class_ == CKO_VENDOR_DEFINED:
class_ptr = NULL
try:
if uri_str is None:
_fill_template_from_parts(template, template_len_ptr, id_,
id_length, label, label_length,
class_ptr, ckawrap, ckaunwrap)
else:
uri = _parse_uri(uri_str)
template = (p11_kit_uri_get_attributes(uri, template_len_ptr))
# Do not deallocate URI while you are using the template.
# Template contains pointers to values inside URI!
result_list = self._find_key(template, template_len_ptr[0])
return result_list
finally:
if uri != NULL:
p11_kit_uri_free(uri)
def delete_key(self, key_handle):
"""
delete key
"""
# TODO check long overflow
rv = self.p11.C_DestroyObject(self.session, key_handle)
check_return_value(rv, "object deletion")
def _export_RSA_public_key(self, object):
"""
export RSA public key
"""
class_ptr = new_ptr(CK_OBJECT_CLASS, CKO_PUBLIC_KEY)
key_type_ptr = new_ptr(CK_KEY_TYPE, CKK_RSA)
obj_template = new_array(CK_ATTRIBUTE, (
(CKA_MODULUS, NULL_PTR, 0),
(CKA_PUBLIC_EXPONENT, NULL_PTR, 0),
(CKA_CLASS, class_ptr, sizeof(CK_OBJECT_CLASS)),
(CKA_KEY_TYPE, key_type_ptr, sizeof(CK_KEY_TYPE)),
))
rv = self.p11.C_GetAttributeValue(self.session, object, obj_template,
(sizeof(obj_template) //
sizeof(CK_ATTRIBUTE)))
check_return_value(rv, "get RSA public key values - prepare")
# Set proper size for attributes
modulus = new_array(CK_BYTE,
obj_template[0].ulValueLen * sizeof(CK_BYTE))
obj_template[0].pValue = modulus
exponent = new_array(CK_BYTE,
obj_template[1].ulValueLen * sizeof(CK_BYTE))
obj_template[1].pValue = exponent
rv = self.p11.C_GetAttributeValue(self.session, object, obj_template,
(sizeof(obj_template) //
sizeof(CK_ATTRIBUTE)))
check_return_value(rv, "get RSA public key values")
# Check if the key is RSA public key
if class_ptr[0] != CKO_PUBLIC_KEY:
raise Error("export_RSA_public_key: required public key class")
if key_type_ptr[0] != CKK_RSA:
raise Error("export_RSA_public_key: required RSA key type")
try:
n = bytes_to_int(string_to_pybytes_or_none(
modulus, obj_template[0].ulValueLen))
except Exception:
raise Error("export_RSA_public_key: internal error: unable to "
"convert modulus")
try:
e = bytes_to_int(string_to_pybytes_or_none(
exponent, obj_template[1].ulValueLen))
except Exception:
raise Error("export_RSA_public_key: internal error: unable to "
"convert exponent")
# set modulus and exponent
rsa_ = rsa.RSAPublicNumbers(e, n)
try:
pkey = rsa_.public_key(default_backend())
except Exception:
raise Error("export_RSA_public_key: internal error: "
"EVP_PKEY_set1_RSA failed")
try:
ret = pkey.public_bytes(
format=serialization.PublicFormat.SubjectPublicKeyInfo,
encoding=serialization.Encoding.DER,
)
except Exception:
ret = None
return ret
def export_public_key(self, key_handle):
"""
Export public key
Export public key in SubjectPublicKeyInfo (RFC5280) DER encoded format
"""
object = key_handle
class_ptr = new_ptr(CK_OBJECT_CLASS, CKO_PUBLIC_KEY)
key_type_ptr = new_ptr(CK_KEY_TYPE, CKK_RSA)
# TODO check long overflow
obj_template = new_array(CK_ATTRIBUTE, (
(CKA_CLASS, class_ptr, sizeof(CK_OBJECT_CLASS)),
(CKA_KEY_TYPE, key_type_ptr, sizeof(CK_KEY_TYPE)),
))
rv = self.p11.C_GetAttributeValue(self.session, object, obj_template,
(sizeof(obj_template) //
sizeof(CK_ATTRIBUTE)))
check_return_value(rv, "export_public_key: get RSA public key values")
if class_ptr[0] != CKO_PUBLIC_KEY:
raise Error("export_public_key: required public key class")
if key_type_ptr[0] == CKK_RSA:
return self._export_RSA_public_key(object)
else:
raise Error("export_public_key: unsupported key type")
def _import_RSA_public_key(self, label, label_length, id, id_length, pkey,
cka_copyable, cka_derive, cka_encrypt,
cka_modifiable, cka_private, cka_trusted,
cka_verify, cka_verify_recover, cka_wrap):
"""
Import RSA public key
"""
class_ptr = new_ptr(CK_OBJECT_CLASS, CKO_PUBLIC_KEY)
keyType_ptr = new_ptr(CK_KEY_TYPE, CKK_RSA)
cka_token = true_ptr
if not isinstance(pkey, rsa.RSAPublicKey):
raise Error("Required RSA public key")
rsa_ = pkey.public_numbers()
# convert BIGNUM to binary array
modulus = new_array(CK_BYTE, int_to_bytes(rsa_.n))
modulus_len = sizeof(modulus) - 1
if modulus_len == 0:
raise Error("import_RSA_public_key: BN_bn2bin modulus error")
exponent = new_array(CK_BYTE, int_to_bytes(rsa_.e))
exponent_len = sizeof(exponent) - 1
if exponent_len == 0:
raise Error("import_RSA_public_key: BN_bn2bin exponent error")
template = new_array(CK_ATTRIBUTE, (
(CKA_ID, id, id_length),
(CKA_CLASS, class_ptr, sizeof(CK_OBJECT_CLASS)),
(CKA_KEY_TYPE, keyType_ptr, sizeof(CK_KEY_TYPE)),
(CKA_TOKEN, cka_token, sizeof(CK_BBOOL)),
(CKA_LABEL, label, label_length),
(CKA_MODULUS, modulus, modulus_len),
(CKA_PUBLIC_EXPONENT, exponent, exponent_len),
# TODO Softhsm doesn't support it
# (CKA_COPYABLE, cka_copyable, sizeof(CK_BBOOL)),
(CKA_DERIVE, cka_derive, sizeof(CK_BBOOL)),
(CKA_ENCRYPT, cka_encrypt, sizeof(CK_BBOOL)),
(CKA_MODIFIABLE, cka_modifiable, sizeof(CK_BBOOL)),
(CKA_PRIVATE, cka_private, sizeof(CK_BBOOL)),
(CKA_TRUSTED, cka_trusted, sizeof(CK_BBOOL)),
(CKA_VERIFY, cka_verify, sizeof(CK_BBOOL)),
(CKA_VERIFY_RECOVER, cka_verify_recover, sizeof(CK_BBOOL)),
(CKA_WRAP, cka_wrap, sizeof(CK_BBOOL)),
))
object_ptr = new_ptr(CK_OBJECT_HANDLE)
rv = self.p11.C_CreateObject(self.session, template,
(sizeof(template) //
sizeof(CK_ATTRIBUTE)), object_ptr)
check_return_value(rv, "create public key object")
return object_ptr[0]
def import_public_key(self, label, id, data, cka_copyable=True,
cka_derive=False, cka_encrypt=False,
cka_modifiable=True, cka_private=True,
cka_trusted=False, cka_verify=True,
cka_verify_recover=True, cka_wrap=False):
"""
Import RSA public key
"""
if isinstance(id, unicode):
id = id.encode()
if isinstance(data, unicode):
data = data.encode()
label_unicode = label
id_ = new_array(CK_BYTE, id)
id_length = len(id)
attrs_pub = (
cka_copyable,
cka_derive,
cka_encrypt,
cka_modifiable,
cka_private,
cka_trusted,
cka_verify,
cka_verify_recover,
cka_wrap,
)
label, label_length = unicode_to_char_array(label_unicode)
if self._id_exists(id_, id_length, CKO_PUBLIC_KEY):
raise DuplicationError("Public key with same ID already exists")
# Process keyword boolean arguments
(cka_copyable_ptr, cka_derive_ptr, cka_encrypt_ptr, cka_modifiable_ptr,
cka_private_ptr, cka_trusted_ptr, cka_verify_ptr,
cka_verify_recover_ptr, cka_wrap_ptr,) = convert_py2bool(attrs_pub)
# decode from ASN1 DER
try:
pkey = serialization.load_der_public_key(data, default_backend())
except Exception:
raise Error("import_public_key: d2i_PUBKEY error")
if isinstance(pkey, rsa.RSAPublicKey):
ret = self._import_RSA_public_key(label, label_length, id_,
id_length, pkey,
cka_copyable_ptr,
cka_derive_ptr,
cka_encrypt_ptr,
cka_modifiable_ptr,
cka_private_ptr,
cka_trusted_ptr,
cka_verify_ptr,
cka_verify_recover_ptr,
cka_wrap_ptr)
elif isinstance(pkey, dsa.DSAPublicKey):
raise Error("DSA is not supported")
elif isinstance(pkey, ec.EllipticCurvePublicKey):
raise Error("EC is not supported")
else:
raise Error("Unsupported key type")
return ret
def export_wrapped_key(self, key, wrapping_key, wrapping_mech):
"""
Export wrapped key
"""
object_key = key
object_wrapping_key = wrapping_key
wrapped_key_len_ptr = new_ptr(CK_ULONG, 0)
wrapping_mech_ptr = new_ptr(CK_MECHANISM, (wrapping_mech, NULL, 0))
# currently we don't support parameter in mechanism
# TODO check long overflow
# TODO export method
# fill mech parameters
_set_wrapping_mech_parameters(wrapping_mech_ptr.mechanism,
wrapping_mech_ptr)
rv = self.p11.C_WrapKey(self.session, wrapping_mech_ptr,
object_wrapping_key, object_key, NULL,
wrapped_key_len_ptr)
check_return_value(rv, "key wrapping: get buffer length")
wrapped_key = new_array(CK_BYTE, wrapped_key_len_ptr[0])
rv = self.p11.C_WrapKey(self.session, wrapping_mech_ptr,
object_wrapping_key, object_key, wrapped_key,
wrapped_key_len_ptr)
check_return_value(rv, "key wrapping: wrapping")
result = string_to_pybytes_or_none(wrapped_key, wrapped_key_len_ptr[0])
return result
def import_wrapped_secret_key(self, label, id, data, unwrapping_key,
wrapping_mech, key_type, cka_copyable=True,
cka_decrypt=False, cka_derive=False,
cka_encrypt=False, cka_extractable=True,
cka_modifiable=True, cka_private=True,
cka_sensitive=True, cka_sign=False,
cka_unwrap=True, cka_verify=False,
cka_wrap=True, cka_wrap_with_trusted=False):
"""
Import wrapped secret key
"""
if isinstance(id, unicode):
id = id.encode()
if isinstance(data, unicode):
data = data.encode()
wrapped_key = new_array(CK_BYTE, data)
wrapped_key_len = len(data)
unwrapping_key_object = unwrapping_key
unwrapped_key_object_ptr = new_ptr(CK_OBJECT_HANDLE, 0)
label_unicode = label
id_ = new_array(CK_BYTE, id)
id_length = len(id)
wrapping_mech_ptr = new_ptr(CK_MECHANISM, (wrapping_mech, NULL, 0))
key_class_ptr = new_ptr(CK_OBJECT_CLASS, CKO_SECRET_KEY)
key_type_ptr = new_ptr(CK_KEY_TYPE, key_type)
attrs = (
cka_copyable,
cka_decrypt,
cka_derive,
cka_encrypt,
cka_extractable,
cka_modifiable,
cka_private,
cka_sensitive,
cka_sign,
cka_unwrap,
cka_verify,
cka_wrap,
cka_wrap_with_trusted,
)
_set_wrapping_mech_parameters(wrapping_mech_ptr.mechanism,
wrapping_mech_ptr)
label, label_length = unicode_to_char_array(label_unicode)
if self._id_exists(id_, id_length, key_class_ptr[0]):
raise DuplicationError("Secret key with same ID already exists")
# Process keyword boolean arguments
(_cka_copyable_ptr, cka_decrypt_ptr, cka_derive_ptr, cka_encrypt_ptr,
cka_extractable_ptr, cka_modifiable_ptr, cka_private_ptr,
cka_sensitive_ptr, cka_sign_ptr, cka_unwrap_ptr, cka_verify_ptr,
cka_wrap_ptr, cka_wrap_with_trusted_ptr,) = convert_py2bool(attrs)
template = new_array(CK_ATTRIBUTE, (
(CKA_CLASS, key_class_ptr, sizeof(CK_OBJECT_CLASS)),
(CKA_KEY_TYPE, key_type_ptr, sizeof(CK_KEY_TYPE)),
(CKA_ID, id_, id_length),
(CKA_LABEL, label, label_length),
(CKA_TOKEN, true_ptr, sizeof(CK_BBOOL)),
# TODO Softhsm doesn't support it
# (CKA_COPYABLE, cka_copyable_ptr, sizeof(CK_BBOOL)),
(CKA_DECRYPT, cka_decrypt_ptr, sizeof(CK_BBOOL)),
(CKA_DERIVE, cka_derive_ptr, sizeof(CK_BBOOL)),
(CKA_ENCRYPT, cka_encrypt_ptr, sizeof(CK_BBOOL)),
(CKA_EXTRACTABLE, cka_extractable_ptr, sizeof(CK_BBOOL)),
(CKA_MODIFIABLE, cka_modifiable_ptr, sizeof(CK_BBOOL)),
(CKA_PRIVATE, cka_private_ptr, sizeof(CK_BBOOL)),
(CKA_SENSITIVE, cka_sensitive_ptr, sizeof(CK_BBOOL)),
(CKA_SIGN, cka_sign_ptr, sizeof(CK_BBOOL)),
(CKA_UNWRAP, cka_unwrap_ptr, sizeof(CK_BBOOL)),
(CKA_VERIFY, cka_verify_ptr, sizeof(CK_BBOOL)),
(CKA_WRAP, cka_wrap_ptr, sizeof(CK_BBOOL)),
(CKA_WRAP_WITH_TRUSTED, cka_wrap_with_trusted_ptr,
sizeof(CK_BBOOL)),
))
rv = self.p11.C_UnwrapKey(self.session, wrapping_mech_ptr,
unwrapping_key_object, wrapped_key,
wrapped_key_len, template,
sizeof(template) // sizeof(CK_ATTRIBUTE),
unwrapped_key_object_ptr)
check_return_value(rv, "import_wrapped_key: key unwrapping")
return unwrapped_key_object_ptr[0]
def import_wrapped_private_key(self, label, id, data, unwrapping_key,
wrapping_mech, key_type,
cka_always_authenticate=False,
cka_copyable=True, cka_decrypt=False,
cka_derive=False, cka_extractable=True,
cka_modifiable=True, cka_private=True,
cka_sensitive=True, cka_sign=True,
cka_sign_recover=True, cka_unwrap=False,
cka_wrap_with_trusted=False):
"""
Import wrapped private key
"""
if isinstance(id, unicode):
id = id.encode()
if isinstance(data, unicode):
data = data.encode()
wrapped_key = new_array(CK_BYTE, data)
wrapped_key_len = len(data)
unwrapping_key_object = unwrapping_key
unwrapped_key_object_ptr = new_ptr(CK_OBJECT_HANDLE, 0)
label_unicode = label
id_ = new_array(CK_BYTE, id)
id_length = len(id)
wrapping_mech_ptr = new_ptr(CK_MECHANISM, (wrapping_mech, NULL, 0))
key_class_ptr = new_ptr(CK_OBJECT_CLASS, CKO_PRIVATE_KEY)
key_type_ptr = new_ptr(CK_KEY_TYPE, key_type)
attrs_priv = (
cka_always_authenticate,
cka_copyable,
cka_decrypt,
cka_derive,
cka_extractable,
cka_modifiable,
cka_private,
cka_sensitive,
cka_sign,
cka_sign_recover,
cka_unwrap,
cka_wrap_with_trusted,
)
label, label_length = unicode_to_char_array(label_unicode)
if self._id_exists(id_, id_length, CKO_SECRET_KEY):
raise DuplicationError("Secret key with same ID already exists")
# Process keyword boolean arguments
(cka_always_authenticate_ptr, _cka_copyable_ptr, cka_decrypt_ptr,
cka_derive_ptr, cka_extractable_ptr, cka_modifiable_ptr,
cka_private_ptr, cka_sensitive_ptr, cka_sign_ptr,
_cka_sign_recover_ptr, cka_unwrap_ptr, cka_wrap_with_trusted_ptr,
) = convert_py2bool(attrs_priv)
template = new_array(CK_ATTRIBUTE, (
(CKA_CLASS, key_class_ptr, sizeof(CK_OBJECT_CLASS)),
(CKA_KEY_TYPE, key_type_ptr, sizeof(CK_KEY_TYPE)),
(CKA_ID, id_, id_length),
(CKA_LABEL, label, label_length),
(CKA_TOKEN, true_ptr, sizeof(CK_BBOOL)),
(CKA_ALWAYS_AUTHENTICATE, cka_always_authenticate_ptr,
sizeof(CK_BBOOL)),
# TODO Softhsm doesn't support it
# (CKA_COPYABLE, cka_copyable_ptr, sizeof(CK_BBOOL)),
(CKA_DECRYPT, cka_decrypt_ptr, sizeof(CK_BBOOL)),
(CKA_DERIVE, cka_derive_ptr, sizeof(CK_BBOOL)),
(CKA_EXTRACTABLE, cka_extractable_ptr, sizeof(CK_BBOOL)),
(CKA_MODIFIABLE, cka_modifiable_ptr, sizeof(CK_BBOOL)),
(CKA_PRIVATE, cka_private_ptr, sizeof(CK_BBOOL)),
(CKA_SENSITIVE, cka_sensitive_ptr, sizeof(CK_BBOOL)),
(CKA_SIGN, cka_sign_ptr, sizeof(CK_BBOOL)),
(CKA_SIGN_RECOVER, cka_sign_ptr, sizeof(CK_BBOOL)),
(CKA_UNWRAP, cka_unwrap_ptr, sizeof(CK_BBOOL)),
(CKA_WRAP_WITH_TRUSTED, cka_wrap_with_trusted_ptr,
sizeof(CK_BBOOL)),
))
rv = self.p11.C_UnwrapKey(self.session, wrapping_mech_ptr,
unwrapping_key_object, wrapped_key,
wrapped_key_len, template,
sizeof(template) // sizeof(CK_ATTRIBUTE),
unwrapped_key_object_ptr)
check_return_value(rv, "import_wrapped_key: key unwrapping")
return unwrapped_key_object_ptr[0]
def set_attribute(self, key_object, attr, value):
"""
Set object attributes
"""
object = key_object
attribute_ptr = new_ptr(CK_ATTRIBUTE)
attribute_ptr.type = attr
if attr in (CKA_ALWAYS_AUTHENTICATE,
CKA_ALWAYS_SENSITIVE,
CKA_COPYABLE,
CKA_ENCRYPT,
CKA_EXTRACTABLE,
CKA_DECRYPT,
CKA_DERIVE,
CKA_LOCAL,
CKA_MODIFIABLE,
CKA_NEVER_EXTRACTABLE,
CKA_PRIVATE,
CKA_SENSITIVE,
CKA_SIGN,
CKA_SIGN_RECOVER,
CKA_TOKEN,
CKA_TRUSTED,
CKA_UNWRAP,
CKA_VERIFY,
CKA_VERIFY_RECOVER,
CKA_WRAP,
CKA_WRAP_WITH_TRUSTED):
attribute_ptr.pValue = true_ptr if value else false_ptr
attribute_ptr.ulValueLen = sizeof(CK_BBOOL)
elif attr == CKA_ID:
if not isinstance(value, bytes):
raise Error("Bytestring value expected")
attribute_ptr.pValue = new_array(CK_BYTE, value)
attribute_ptr.ulValueLen = len(value)
elif attr == CKA_LABEL:
if not isinstance(value, unicode):
raise Error("Unicode value expected")
label, label_length = unicode_to_char_array(value)
attribute_ptr.pValue = label
attribute_ptr.ulValueLen = label_length
elif attr == CKA_KEY_TYPE:
if not isinstance(value, int):
raise Error("Integer value expected")
attribute_ptr.pValue = new_ptr(unsigned_long, value)
attribute_ptr.ulValueLen = sizeof(unsigned_long)
else:
raise Error("Unknown attribute")
template = new_array(CK_ATTRIBUTE, (attribute_ptr[0],))
rv = self.p11.C_SetAttributeValue(self.session, object, template,
(sizeof(template) //
sizeof(CK_ATTRIBUTE)))
check_return_value(rv, "set_attribute")
def get_attribute(self, key_object, attr):
object = key_object
attribute_ptr = new_ptr(CK_ATTRIBUTE)
attribute_ptr.type = attr
attribute_ptr.pValue = NULL_PTR
attribute_ptr.ulValueLen = 0
template = new_array(CK_ATTRIBUTE, (attribute_ptr[0],))
rv = self.p11.C_GetAttributeValue(self.session, object, template,
(sizeof(template) //
sizeof(CK_ATTRIBUTE)))
if rv == CKR_ATTRIBUTE_TYPE_INVALID or template[0].ulValueLen == -1:
raise NotFound("attribute does not exist")
check_return_value(rv, "get_attribute init")
value = new_array(unsigned_char, template[0].ulValueLen)
template[0].pValue = value
rv = self.p11.C_GetAttributeValue(self.session, object, template,
(sizeof(template) //
sizeof(CK_ATTRIBUTE)))
check_return_value(rv, "get_attribute")
if attr in (CKA_ALWAYS_AUTHENTICATE,
CKA_ALWAYS_SENSITIVE,
CKA_COPYABLE,
CKA_ENCRYPT,
CKA_EXTRACTABLE,
CKA_DECRYPT,
CKA_DERIVE,
CKA_LOCAL,
CKA_MODIFIABLE,
CKA_NEVER_EXTRACTABLE,
CKA_PRIVATE,
CKA_SENSITIVE,
CKA_SIGN,
CKA_SIGN_RECOVER,
CKA_TOKEN,
CKA_TRUSTED,
CKA_UNWRAP,
CKA_VERIFY,
CKA_VERIFY_RECOVER,
CKA_WRAP,
CKA_WRAP_WITH_TRUSTED):
ret = bool(_ffi.cast(_ffi.getctype(CK_BBOOL, '*'), value)[0])
elif attr == CKA_LABEL:
ret = char_array_to_unicode(value, template[0].ulValueLen)
elif attr in (CKA_MODULUS, CKA_PUBLIC_EXPONENT, CKA_ID):
ret = string_to_pybytes_or_none(value, template[0].ulValueLen)
elif attr == CKA_KEY_TYPE:
ret = _ffi.cast(_ffi.getctype(unsigned_long, '*'), value)[0]
else:
raise Error("Unknown attribute")
return ret
# Key Classes
KEY_CLASS_PUBLIC_KEY = CKO_PUBLIC_KEY
KEY_CLASS_PRIVATE_KEY = CKO_PRIVATE_KEY
KEY_CLASS_SECRET_KEY = CKO_SECRET_KEY
# Key types
KEY_TYPE_RSA = CKK_RSA
KEY_TYPE_AES = CKK_AES
# Wrapping mech type
MECH_RSA_PKCS = CKM_RSA_PKCS
MECH_RSA_PKCS_OAEP = CKM_RSA_PKCS_OAEP
MECH_AES_KEY_WRAP = CKM_AES_KEY_WRAP
MECH_AES_KEY_WRAP_PAD = CKM_AES_KEY_WRAP_PAD
def gen_key_id(key_id_len=16):
"""
Generate random softhsm KEY_ID
:param key_id_len: this should be 16
:return: random softhsm KEY_ID in bytes representation
"""
return struct.pack(
"B" * key_id_len, # key_id must be bytes
*(random.randint(0, 255) for _ in range(key_id_len))
)
def generate_master_key(p11, keylabel=u"dnssec-master", key_length=16,
disable_old_keys=True):
assert isinstance(p11, P11_Helper)
key_id = None
while True:
# check if key with this ID exist in LDAP or softHSM
# id is 16 Bytes long
key_id = gen_key_id()
keys = p11.find_keys(KEY_CLASS_SECRET_KEY,
label=keylabel,
id=key_id)
if not keys:
break # we found unique id
p11.generate_master_key(keylabel,
key_id,
key_length=key_length,
cka_wrap=True,
cka_unwrap=True)
if disable_old_keys:
# set CKA_WRAP=False for old master keys
master_keys = p11.find_keys(KEY_CLASS_SECRET_KEY,
label=keylabel,
cka_wrap=True)
for handle in master_keys:
# don't disable wrapping for new key
# compare IDs not handle
if key_id != p11.get_attribute(handle, CKA_ID):
p11.set_attribute(handle, CKA_WRAP, False)
| 65,253
|
Python
|
.py
| 1,546
| 30.726391
| 79
| 0.57231
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,689
|
masters.py
|
freeipa_freeipa/ipaserver/masters.py
|
#
# Copyright (C) 2018 FreeIPA Contributors see COPYING for license
#
"""Helpers services in for cn=masters,cn=ipa,cn=etc
"""
from __future__ import absolute_import
import collections
import logging
import random
from ipapython.dn import DN
from ipalib import api
from ipalib import errors
logger = logging.getLogger(__name__)
# constants for ipaConfigString
CONFIGURED_SERVICE = u'configuredService'
ENABLED_SERVICE = u'enabledService'
HIDDEN_SERVICE = u'hiddenService'
PAC_TKT_SIGN_SUPPORTED = u'pacTktSignSupported'
PKINIT_ENABLED = u'pkinitEnabled'
# The service name as stored in cn=masters,cn=ipa,cn=etc. The values are:
# 0: systemd service name
# 1: start order for system service
# 2: LDAP server entry CN, also used as SERVICE_LIST key
service_definition = collections.namedtuple(
"service_definition",
"systemd_name startorder service_entry"
)
SERVICES = [
service_definition('krb5kdc', 10, 'KDC'),
service_definition('kadmin', 20, 'KPASSWD'),
service_definition('named', 30, 'DNS'),
service_definition('httpd', 40, 'HTTP'),
service_definition('ipa-custodia', 41, 'KEYS'),
service_definition('pki-tomcatd', 50, 'CA'),
service_definition('pki-tomcatd', 51, 'KRA'),
service_definition('smb', 60, 'ADTRUST'),
service_definition('winbind', 70, 'EXTID'),
service_definition('ipa-otpd', 80, 'OTPD'),
service_definition('ipa-ods-exporter', 90, 'DNSKeyExporter'),
service_definition('ods-enforcerd', 100, 'DNSSEC'),
service_definition('ipa-dnskeysyncd', 110, 'DNSKeySync'),
]
SERVICE_LIST = {s.service_entry: s for s in SERVICES}
def find_providing_servers(svcname, conn=None, preferred_hosts=(), api=api):
"""Find servers that provide the given service.
:param svcname: The service to find
:param preferred_hosts: preferred servers
:param conn: a connection to the LDAP server
:param api: ipalib.API instance
:return: list of host names in randomized order (possibly empty)
Preferred servers are moved to the front of the list if and only if they
are found as providing servers.
"""
assert isinstance(preferred_hosts, (tuple, list))
if svcname not in SERVICE_LIST:
raise ValueError("Unknown service '{}'.".format(svcname))
if conn is None:
conn = api.Backend.ldap2
dn = DN(api.env.container_masters, api.env.basedn)
query_filter = conn.combine_filters(
[
conn.make_filter(
{
'objectClass': 'ipaConfigObject',
'cn': svcname
},
rules=conn.MATCH_ALL,
),
conn.make_filter(
{
'ipaConfigString': [ENABLED_SERVICE, HIDDEN_SERVICE]
},
rules=conn.MATCH_ANY
),
],
rules=conn.MATCH_ALL
)
try:
entries, _trunc = conn.find_entries(
filter=query_filter,
attrs_list=['ipaConfigString'],
base_dn=dn
)
except errors.NotFound:
return []
# DNS is case insensitive
preferred_hosts = list(host_name.lower() for host_name in preferred_hosts)
servers = []
for entry in entries:
servername = entry.dn[1].value.lower()
cfgstrings = entry.get('ipaConfigString', [])
# always consider enabled services
if ENABLED_SERVICE in cfgstrings:
servers.append(servername)
# use hidden services on preferred hosts
elif HIDDEN_SERVICE in cfgstrings and servername in preferred_hosts:
servers.append(servername)
# unique list of host names
servers = list(set(servers))
# shuffle the list like DNS SRV would randomize it
random.shuffle(servers)
# Move preferred hosts to front
for host_name in reversed(preferred_hosts):
try:
servers.remove(host_name)
except ValueError:
# preferred server not found, log and ignore
logger.warning(
"Lookup failed: Preferred host %s does not provide %s.",
host_name, svcname
)
else:
servers.insert(0, host_name)
logger.debug("Discovery: available servers for service '%s' are %s",
svcname, ', '.join(servers))
return servers
def find_providing_server(svcname, conn=None, preferred_hosts=(), api=api):
"""Find a server that provides the given service.
:param svcname: The service to find
:param conn: a connection to the LDAP server
:param host_name: the preferred server
:param api: ipalib.API instance
:return: the selected host name or None
"""
servers = find_providing_servers(
svcname, conn=conn, preferred_hosts=preferred_hosts, api=api
)
if not servers:
logger.debug("Discovery: no '%s' service found.", svcname)
return None
else:
logger.debug("Discovery: using %s for '%s' service",
servers[0], svcname)
return servers[0]
def get_masters(conn=None, api=api):
"""Get all master hostnames
:param conn: a connection to the LDAP server
:param api: ipalib.API instance
:return: list of hostnames
"""
if conn is None:
conn = api.Backend.ldap2
dn = DN(api.env.container_masters, api.env.basedn)
entries = conn.get_entries(dn, conn.SCOPE_ONELEVEL, None, ['cn'])
return list(e['cn'][0] for e in entries)
def is_service_enabled(svcname, conn=None, api=api):
"""Check if service is enabled on any master
The check function only looks for presence of service entries. It
ignores enabled/hidden flags.
:param svcname: The service to find
:param conn: a connection to the LDAP server
:param api: ipalib.API instance
:return: True/False
"""
if svcname not in SERVICE_LIST:
raise ValueError("Unknown service '{}'.".format(svcname))
if conn is None:
conn = api.Backend.ldap2
dn = DN(api.env.container_masters, api.env.basedn)
query_filter = conn.make_filter(
{
'objectClass': 'ipaConfigObject',
'cn': svcname
},
rules='&'
)
try:
conn.find_entries(
filter=query_filter,
attrs_list=[],
base_dn=dn
)
except errors.NotFound:
return False
else:
return True
| 6,398
|
Python
|
.py
| 176
| 29.340909
| 78
| 0.648531
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,690
|
__init__.py
|
freeipa_freeipa/ipaserver/__init__.py
|
# Authors:
# Jason Gerard DeRose <jderose@redhat.com>
#
# Copyright (C) 2008 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Package containing server backend.
"""
| 822
|
Python
|
.py
| 21
| 38.095238
| 71
| 0.7725
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,691
|
dns_data_management.py
|
freeipa_freeipa/ipaserver/dns_data_management.py
|
#
# Copyright (C) 2016 FreeIPA Contributors see COPYING for license
#
from __future__ import absolute_import
import logging
import six
from collections import defaultdict, OrderedDict
from dns import (
rdata,
rdataclass,
rdatatype,
zone,
)
from time import sleep, time
from ipalib import errors
from ipalib.constants import IPA_CA_RECORD
from ipalib.dns import record_name_format
from ipapython.dnsutil import DNSName
from ipaserver.install import installutils
if six.PY3:
unicode=str
logger = logging.getLogger(__name__)
IPA_DEFAULT_MASTER_SRV_REC = (
# srv record name, port
(DNSName('_ldap._tcp'), 389),
# Kerberos records are provided for MIT KRB5 < 1.15 and AD
(DNSName('_kerberos._tcp'), 88),
(DNSName('_kerberos._udp'), 88),
(DNSName('_kerberos-master._tcp'), 88),
(DNSName('_kerberos-master._udp'), 88),
(DNSName('_kpasswd._tcp'), 464),
(DNSName('_kpasswd._udp'), 464),
)
IPA_DEFAULT_MASTER_URI_REC = (
# URI record name, URI template
# MIT KRB5 1.15+ prefers URI records for service discovery
# scheme: always krb5srv
# flags: empty or 'm' for primary server
# transport: 'tcp', 'udp', or 'kkdcp')
# residual: 'hostname', 'hostname:port', or 'https://' URL
(DNSName('_kerberos'), "krb5srv:m:tcp:{hostname}"),
(DNSName('_kerberos'), "krb5srv:m:udp:{hostname}"),
(DNSName('_kpasswd'), "krb5srv:m:tcp:{hostname}"),
(DNSName('_kpasswd'), "krb5srv:m:udp:{hostname}"),
)
IPA_DEFAULT_ADTRUST_SRV_REC = (
# srv record name, port
(DNSName('_ldap._tcp.Default-First-Site-Name._sites.dc._msdcs'), 389),
(DNSName('_ldap._tcp.dc._msdcs'), 389),
(DNSName('_kerberos._tcp.Default-First-Site-Name._sites.dc._msdcs'), 88),
(DNSName('_kerberos._udp.Default-First-Site-Name._sites.dc._msdcs'), 88),
(DNSName('_kerberos._tcp.dc._msdcs'), 88),
(DNSName('_kerberos._udp.dc._msdcs'), 88),
)
IPA_DEFAULT_NTP_SRV_REC = (
# srv record name, port
(DNSName("_ntp._udp"), 123),
)
IPA_DEFAULT_KRB_TXT_REC = (
(DNSName('_kerberos'), "\"{realm}\""),
)
CA_RECORDS_DNS_TIMEOUT = 15 # timeout in seconds
class IPADomainIsNotManagedByIPAError(Exception):
pass
class IPASystemRecords:
# fixme do it configurable
PRIORITY_HIGH = 0
PRIORITY_LOW = 50
# FIXME: use TTL from config
TTL = 3600
def __init__(self, api_instance, all_servers=False):
self.api_instance = api_instance
self.domain_abs = DNSName(self.api_instance.env.domain).make_absolute()
self.servers_data = OrderedDict()
self.__init_data(all_servers=all_servers)
def reload_data(self):
"""
After any change made to IPA servers, this method must be called to
update data in the object, otherwise invalid records may be
created/updated
"""
self.__init_data()
def __get_server_attrs(self, server_result):
weight = int(server_result.get('ipaserviceweight', ['100'])[0])
location = server_result.get('ipalocation_location', [None])[0]
roles = set(server_result.get('enabled_role_servrole', ()))
return weight, location, roles
def __get_location_suffix(self, location):
return location + DNSName('_locations') + self.domain_abs
def __init_data(self, all_servers=False):
self.servers_data.clear()
kwargs = dict(no_members=False)
if not all_servers:
# only active, fully installed masters]
kwargs["servrole"] = "IPA master"
servers = self.api_instance.Command.server_find(**kwargs)
for s in servers['result']:
weight, location, roles = self.__get_server_attrs(s)
self.servers_data[s['cn'][0]] = {
'weight': weight,
'location': location,
'roles': roles,
}
def __add_srv_records(
self, zone_obj, hostname, rname_port_map,
weight=100, priority=0, location=None
):
assert isinstance(hostname, DNSName)
assert isinstance(priority, int)
assert isinstance(weight, int)
if location:
suffix = self.__get_location_suffix(location)
else:
suffix = self.domain_abs
for name, port in rname_port_map:
rd = rdata.from_text(
rdataclass.IN, rdatatype.SRV,
'{0} {1} {2} {3}'.format(
priority, weight, port, hostname.make_absolute()
)
)
r_name = name.derelativize(suffix)
rdataset = zone_obj.get_rdataset(
r_name, rdatatype.SRV, create=True)
rdataset.add(rd, ttl=self.TTL)
def __add_uri_records(
self, zone_obj, hostname, rname_uri_map,
weight=100, priority=0, location=None
):
assert isinstance(hostname, DNSName)
assert isinstance(priority, int)
assert isinstance(weight, int)
if location:
suffix = self.__get_location_suffix(location)
else:
suffix = self.domain_abs
for name, uri_template in rname_uri_map:
uri = uri_template.format(hostname=hostname.make_absolute())
rd = rdata.from_text(
rdataclass.IN, rdatatype.URI,
'{0} {1} {2}'.format(
priority, weight, uri
)
)
r_name = name.derelativize(suffix)
rdataset = zone_obj.get_rdataset(
r_name, rdatatype.URI, create=True)
rdataset.add(rd, ttl=self.TTL)
def __add_ca_records_from_hostname(self, zone_obj, hostname):
assert isinstance(hostname, DNSName) and hostname.is_absolute()
r_name = DNSName(IPA_CA_RECORD) + self.domain_abs
rrsets = None
end_time = time() + CA_RECORDS_DNS_TIMEOUT
while True:
try:
# function logs errors
rrsets = installutils.resolve_rrsets_nss(hostname)
except OSError:
# also retry on EAI_AGAIN, EAI_FAIL
pass
if rrsets:
break
if time() >= end_time:
break
sleep(3)
if not rrsets:
logger.error('unable to resolve host name %s to IP address, '
'ipa-ca DNS record will be incomplete', hostname)
return
for rrset in rrsets:
for rd in rrset:
logger.debug("Adding CA IP %s for %s", rd.to_text(), hostname)
rdataset = zone_obj.get_rdataset(
r_name, rd.rdtype, create=True)
rdataset.add(rd, ttl=self.TTL)
def __add_kerberos_txt_rec(self, zone_obj, location=None):
# FIXME: with external DNS, this should generate records for all
# realmdomains
if location:
suffix = self.__get_location_suffix(location)
else:
suffix = self.domain_abs
r_name = DNSName('_kerberos') + suffix
rd = rdata.from_text(rdataclass.IN, rdatatype.TXT,
self.api_instance.env.realm)
rdataset = zone_obj.get_rdataset(
r_name, rdatatype.TXT, create=True
)
rdataset.add(rd, ttl=self.TTL)
def _add_base_dns_records_for_server(
self, zone_obj, hostname, roles=None, include_master_role=True,
include_kerberos_realm=True,
):
server = self.servers_data[hostname]
if roles:
eff_roles = server['roles'] & set(roles)
else:
eff_roles = server['roles']
hostname_abs = DNSName(hostname).make_absolute()
if include_kerberos_realm:
self.__add_kerberos_txt_rec(zone_obj, location=None)
# get master records
if include_master_role:
self.__add_srv_records(
zone_obj,
hostname_abs,
IPA_DEFAULT_MASTER_SRV_REC,
weight=server['weight']
)
self.__add_uri_records(
zone_obj,
hostname_abs,
IPA_DEFAULT_MASTER_URI_REC,
weight=server['weight']
)
if 'CA server' in eff_roles:
self.__add_ca_records_from_hostname(zone_obj, hostname_abs)
if 'AD trust controller' in eff_roles:
self.__add_srv_records(
zone_obj,
hostname_abs,
IPA_DEFAULT_ADTRUST_SRV_REC,
weight=server['weight']
)
if 'NTP server' in eff_roles:
self.__add_srv_records(
zone_obj,
hostname_abs,
IPA_DEFAULT_NTP_SRV_REC,
weight=server['weight']
)
def _get_location_dns_records_for_server(
self, zone_obj, hostname, locations,
roles=None, include_master_role=True,
include_kerberos_realm=True):
server = self.servers_data[hostname]
if roles:
eff_roles = server['roles'] & roles
else:
eff_roles = server['roles']
hostname_abs = DNSName(hostname).make_absolute()
# generate locations specific records
for location in locations:
if location == self.servers_data[hostname]['location']:
priority = self.PRIORITY_HIGH
else:
priority = self.PRIORITY_LOW
if include_kerberos_realm:
self.__add_kerberos_txt_rec(zone_obj, location)
if include_master_role:
self.__add_srv_records(
zone_obj,
hostname_abs,
IPA_DEFAULT_MASTER_SRV_REC,
weight=server['weight'],
priority=priority,
location=location
)
self.__add_uri_records(
zone_obj,
hostname_abs,
IPA_DEFAULT_MASTER_URI_REC,
weight=server['weight'],
priority=priority,
location=location
)
if 'AD trust controller' in eff_roles:
self.__add_srv_records(
zone_obj,
hostname_abs,
IPA_DEFAULT_ADTRUST_SRV_REC,
weight=server['weight'],
priority=priority,
location=location
)
if 'NTP server' in eff_roles:
self.__add_srv_records(
zone_obj,
hostname_abs,
IPA_DEFAULT_NTP_SRV_REC,
weight=server['weight'],
priority=priority,
location=location
)
return zone_obj
def __prepare_records_update_dict(self, node):
update_dict = defaultdict(list)
for rdataset in node:
for rdata in rdataset:
option_name = (record_name_format % rdatatype.to_text(
rdata.rdtype).lower())
update_dict[option_name].append(unicode(rdata.to_text()))
return update_dict
def __update_dns_records(
self, record_name, nodes, set_cname_template=True
):
update_dict = self.__prepare_records_update_dict(nodes)
cname_template = {
'addattr': ['objectclass=idnsTemplateObject'],
'setattr': [
r'idnsTemplateAttribute;cnamerecord=%s'
r'.\{substitutionvariable_ipalocation\}._locations' %
record_name.relativize(self.domain_abs)
]
}
try:
if set_cname_template:
# only srv records should have configured cname templates
update_dict.update(cname_template)
self.api_instance.Command.dnsrecord_mod(
self.domain_abs, record_name,
**update_dict
)
except errors.NotFound:
# because internal API magic, addattr and setattr doesn't work with
# dnsrecord-add well, use dnsrecord-mod instead later
update_dict.pop('addattr', None)
update_dict.pop('setattr', None)
self.api_instance.Command.dnsrecord_add(
self.domain_abs, record_name, **update_dict)
if set_cname_template:
try:
self.api_instance.Command.dnsrecord_mod(
self.domain_abs,
record_name, **cname_template)
except errors.EmptyModlist:
pass
except errors.EmptyModlist:
pass
def get_base_records(
self, servers=None, roles=None, include_master_role=True,
include_kerberos_realm=True
):
"""
Generate IPA service records for specific servers and roles
:param servers: list of server which will be used in records,
if None all IPA servers will be used
:param roles: roles for which DNS records will be generated,
if None all roles will be used
:param include_master_role: generate records required by IPA master
role
:return: dns.zone.Zone object that contains base DNS records
"""
zone_obj = zone.Zone(self.domain_abs, relativize=False)
if servers is None:
servers = list(self.servers_data)
for server in servers:
self._add_base_dns_records_for_server(zone_obj, server,
roles=roles, include_master_role=include_master_role,
include_kerberos_realm=include_kerberos_realm
)
return zone_obj
def get_locations_records(
self, servers=None, roles=None, include_master_role=True,
include_kerberos_realm=True
):
"""
Generate IPA location records for specific servers and roles.
:param servers: list of server which will be used in records,
if None all IPA servers will be used
:param roles: roles for which DNS records will be generated,
if None all roles will be used
:param include_master_role: generate records required by IPA master
role
:return: dns.zone.Zone object that contains location DNS records
"""
zone_obj = zone.Zone(self.domain_abs, relativize=False)
if servers is None:
servers = list(self.servers_data)
locations_result = self.api_instance.Command.location_find()['result']
locations = [l['idnsname'][0] for l in locations_result]
for server in servers:
self._get_location_dns_records_for_server(
zone_obj, server,
locations, roles=roles,
include_master_role=include_master_role,
include_kerberos_realm=include_kerberos_realm)
return zone_obj
def update_base_records(self):
"""
Update base DNS records for IPA services
:return: [(record_name, node), ...], [(record_name, node, error), ...]
where the first list contains successfully updated records, and the
second list contains failed updates with particular exceptions
"""
fail = []
success = []
names_requiring_cname_templates = set(
rec[0].derelativize(self.domain_abs) for rec in (
IPA_DEFAULT_MASTER_SRV_REC
+ IPA_DEFAULT_MASTER_URI_REC
+ IPA_DEFAULT_KRB_TXT_REC
+ IPA_DEFAULT_ADTRUST_SRV_REC
+ IPA_DEFAULT_NTP_SRV_REC
)
)
# Remove the ipa-ca record(s). They will be reconstructed in
# get_base_records().
r_name = DNSName(IPA_CA_RECORD) + self.domain_abs
try:
self.api_instance.Command.dnsrecord_del(
self.domain_abs, r_name, del_all=True)
except errors.NotFound:
pass
base_zone = self.get_base_records()
for record_name, node in base_zone.items():
set_cname_template = record_name in names_requiring_cname_templates
try:
self.__update_dns_records(
record_name, node, set_cname_template)
except errors.PublicError as e:
fail.append((record_name, node, e))
else:
success.append((record_name, node))
return success, fail
def update_locations_records(self):
"""
Update locations DNS records for IPA services
:return: [(record_name, node), ...], [(record_name, node, error), ...]
where the first list contains successfully updated records, and the
second list contains failed updates with particular exceptions
"""
fail = []
success = []
location_zone = self.get_locations_records()
for record_name, nodes in location_zone.items():
try:
self.__update_dns_records(
record_name, nodes,
set_cname_template=False)
except errors.PublicError as e:
fail.append((record_name, nodes, e))
else:
success.append((record_name, nodes))
return success, fail
def update_dns_records(self):
"""
Update all IPA DNS records
:return: (sucessfully_updated_base_records, failed_base_records,
sucessfully_updated_locations_records, failed_locations_records)
For format see update_base_records or update_locations_method
:raise IPADomainIsNotManagedByIPAError: if IPA domain is not managed by
IPA DNS
"""
try:
self.api_instance.Command.dnszone_show(self.domain_abs)
except errors.NotFound:
raise IPADomainIsNotManagedByIPAError()
return (
self.update_base_records(),
self.update_locations_records()
)
def remove_location_records(self, location):
"""
Remove all location records
:param location: DNSName object
:return: list of successfuly removed record names, list of record
names that cannot be removed and returned exception in tuples
[rname1, ...], [(rname2, exc), ...]
"""
success = []
failed = []
location = DNSName(location)
loc_records = []
for records in (
IPA_DEFAULT_MASTER_SRV_REC,
IPA_DEFAULT_ADTRUST_SRV_REC,
IPA_DEFAULT_NTP_SRV_REC
):
for name, _port in records:
loc_records.append(
name + self.__get_location_suffix(location))
for rname in loc_records:
try:
self.api_instance.Command.dnsrecord_del(
self.domain_abs, rname, del_all=True)
except errors.NotFound:
pass
except errors.PublicError as e:
failed.append((rname, e))
else:
success.append(rname)
return success, failed
@classmethod
def records_list_from_node(cls, name, node):
records = []
for rdataset in node:
for rd in rdataset:
records.append(
'{name} {ttl} {rdclass} {rdtype} {rdata}'.format(
name=name.ToASCII(),
ttl=rdataset.ttl,
rdclass=rdataclass.to_text(rd.rdclass),
rdtype=rdatatype.to_text(rd.rdtype),
rdata=rd.to_text()
)
)
return records
@classmethod
def records_list_from_zone(cls, zone_obj, sort=True):
records = []
for name, node in zone_obj.items():
records.extend(IPASystemRecords.records_list_from_node(name, node))
if sort:
records.sort()
return records
| 20,106
|
Python
|
.py
| 506
| 28.235178
| 79
| 0.567696
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,692
|
wsgi.py
|
freeipa_freeipa/ipaserver/wsgi.py
|
#
# Copyright (C) 2021 FreeIPA Contributors see COPYING for license
#
"""WSGI server application
"""
import gc
import logging
import os
import sys
# Some dependencies like Dogtag's pki.client library and custodia use
# python-requsts to make HTTPS connection. python-requests prefers
# PyOpenSSL over Python's stdlib ssl module. PyOpenSSL is build on top
# of python-cryptography which trigger a execmem SELinux violation
# in the context of Apache HTTPD (httpd_execmem).
# When requests is imported, it always tries to import pyopenssl glue
# code from urllib3's contrib directory. The import of PyOpenSSL is
# enough to trigger the SELinux denial.
# Block any import of PyOpenSSL's SSL module by raising an ImportError
sys.modules["OpenSSL.SSL"] = None
from ipaplatform.paths import paths
from ipalib import api
from ipapython import ipaldap
logger = logging.getLogger(os.path.basename(__file__))
def populate_schema_cache(api=api):
"""populate schema cache in parent process
LDAP server schema is available for anonymous binds.
"""
conn = ipaldap.ldap_initialize(api.env.ldap_uri)
try:
ipaldap.schema_cache.get_schema(api.env.ldap_uri, conn)
except Exception as e:
logger.error("Failed to pre-populate LDAP schema cache: %s", e)
finally:
try:
conn.unbind_s()
except AttributeError:
# SimpleLDAPObject has no attribute '_l'
pass
def create_application():
api.bootstrap(context="server", confdir=paths.ETC_IPA, log=None)
try:
api.finalize()
except Exception as e:
logger.error("Failed to start IPA: %s", e)
raise
# speed up first request to each worker by 200ms
populate_schema_cache()
# collect garbage and freeze all objects that are currently tracked by
# cyclic garbage collector. We assume that vast majority of currently
# loaded objects won't be removed in requests. This speeds up GC
# collections and improve CoW memory handling.
gc.collect()
if hasattr(gc, "freeze"):
# Python 3.7+
gc.freeze()
# This is the WSGI callable:
def application(environ, start_response):
if not environ["wsgi.multithread"]:
return api.Backend.wsgi_dispatch(environ, start_response)
else:
logger.error(
"IPA does not work with the threaded MPM, "
"use the pre-fork MPM"
)
raise RuntimeError("threaded MPM detected")
return application
if __name__ == "__main__":
application = create_application()
| 2,592
|
Python
|
.py
| 68
| 32.602941
| 74
| 0.703468
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,693
|
topology.py
|
freeipa_freeipa/ipaserver/topology.py
|
#
# Copyright (C) 2016 FreeIPA Contributors see COPYING for license
#
"""
set of functions and classes useful for management of domain level 1 topology
"""
from copy import deepcopy
from ipalib import _
from ipapython.graph import Graph
CURR_TOPOLOGY_DISCONNECTED = _("""
Replication topology in suffix '%(suffix)s' is disconnected:
%(errors)s""")
REMOVAL_DISCONNECTS_TOPOLOGY = _("""
Removal of '%(hostname)s' leads to disconnected topology in suffix '%(suffix)s':
%(errors)s""")
def create_topology_graph(masters, segments):
"""
Create an oriented graph from topology defined by masters and segments.
:param masters
:param segments
:returns: Graph
"""
graph = Graph()
for m in masters:
graph.add_vertex(m['cn'][0])
for s in segments:
direction = s['iparepltoposegmentdirection'][0]
left = s['iparepltoposegmentleftnode'][0]
right = s['iparepltoposegmentrightnode'][0]
try:
if direction == u'both':
graph.add_edge(left, right)
graph.add_edge(right, left)
elif direction == u'left-right':
graph.add_edge(left, right)
elif direction == u'right-left':
graph.add_edge(right, left)
except ValueError: # ignore segments with deleted master
pass
return graph
def get_topology_connection_errors(graph):
"""
Traverse graph from each master and find out which masters are not
reachable.
:param graph: topology graph where vertices are masters
:returns: list of errors, error is: (master, visited, not_visited)
"""
connect_errors = []
master_cns = list(graph.vertices)
master_cns.sort()
for m in master_cns:
visited = graph.bfs(m)
not_visited = graph.vertices - visited
if not_visited:
connect_errors.append((m, list(visited), list(not_visited)))
return connect_errors
def map_masters_to_suffixes(masters):
masters_to_suffix = {}
managed_suffix_attr = 'iparepltopomanagedsuffix_topologysuffix'
for master in masters:
if managed_suffix_attr not in master:
continue
managed_suffixes = master[managed_suffix_attr]
if managed_suffixes is None:
continue
for suffix_name in managed_suffixes:
try:
masters_to_suffix[suffix_name].append(master)
except KeyError:
masters_to_suffix[suffix_name] = [master]
return masters_to_suffix
def _create_topology_graphs(api_instance):
"""
Construct a topology graph for each topology suffix
:param api_instance: instance of IPA API
"""
masters = api_instance.Command.server_find(
u'', sizelimit=0, no_members=False)['result']
suffix_to_masters = map_masters_to_suffixes(masters)
topology_graphs = {}
for suffix_name, masters in suffix_to_masters.items():
segments = api_instance.Command.topologysegment_find(
suffix_name, sizelimit=0).get('result')
topology_graphs[suffix_name] = create_topology_graph(masters, segments)
return topology_graphs
def _format_topology_errors(topo_errors):
msg_lines = []
for error in topo_errors:
msg_lines.append(
_("Topology does not allow server %(server)s to replicate with "
"servers:")
% {'server': error[0]}
)
for srv in error[2]:
msg_lines.append(" %s" % srv)
return "\n".join(msg_lines)
class TopologyConnectivity:
"""
a simple class abstracting the replication connectivity in managed topology
"""
def __init__(self, api_instance):
self.api = api_instance
self.graphs = _create_topology_graphs(self.api)
@property
def errors(self):
errors_by_suffix = {}
for suffix in self.graphs:
errors_by_suffix[suffix] = get_topology_connection_errors(
self.graphs[suffix]
)
return errors_by_suffix
def errors_after_master_removal(self, master_cn):
graphs_before = deepcopy(self.graphs)
for s in self.graphs:
try:
self.graphs[s].remove_vertex(master_cn)
except ValueError:
pass
errors_after_removal = self.errors
self.graphs = graphs_before
return errors_after_removal
def check_current_state(self):
err_msg = ""
for suffix, errors in self.errors.items():
if errors:
err_msg = "\n".join([
err_msg,
CURR_TOPOLOGY_DISCONNECTED % dict(
suffix=suffix,
errors=_format_topology_errors(errors)
)])
if err_msg:
raise ValueError(err_msg)
def check_state_after_removal(self, master_cn):
err_msg = ""
errors_after_removal = self.errors_after_master_removal(master_cn)
for suffix, errors in errors_after_removal.items():
if errors:
err_msg = "\n".join([
err_msg,
REMOVAL_DISCONNECTS_TOPOLOGY % dict(
hostname=master_cn,
suffix=suffix,
errors=_format_topology_errors(errors)
)
])
if err_msg:
raise ValueError(err_msg)
| 5,480
|
Python
|
.py
| 148
| 27.75
| 80
| 0.607947
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,694
|
dcerpc_common.py
|
freeipa_freeipa/ipaserver/dcerpc_common.py
|
import six
from ipalib import _
if six.PY3:
unicode = str
# Both constants can be used as masks against trust direction
# because bi-directional has two lower bits set.
TRUST_ONEWAY = 1
TRUST_BIDIRECTIONAL = 3
# Trust join behavior
# External trust -- allow creating trust to a non-root domain in the forest
TRUST_JOIN_EXTERNAL = 1
# We don't want to import any of Samba Python code here just for constants
# Since these constants set in MS-ADTS, we can rely on their stability
LSA_TRUST_ATTRIBUTE_NON_TRANSITIVE = 0x00000001
_trust_direction_dict = {
1: _('Trusting forest'),
2: _('Trusted forest'),
3: _('Two-way trust')
}
_trust_status_dict = {
True: _('Established and verified'),
False: _('Waiting for confirmation by remote side')
}
_trust_type_dict_unknown = _('Unknown')
# Trust type is a combination of ipanttrusttype and ipanttrustattributes
# We shift trust attributes by 3 bits to left so bit 0 becomes bit 3 and
# 2+(1 << 3) becomes 10.
_trust_type_dict = {
1: _('Non-Active Directory domain'),
2: _('Active Directory domain'),
3: _('RFC4120-compliant Kerberos realm'),
10: _('Non-transitive external trust to a domain in '
'another Active Directory forest'),
11: _('Non-transitive external trust to an RFC4120-'
'compliant Kerberos realm')
}
def trust_type_string(level, attrs):
"""
Returns a string representing a type of the trust.
The original field is an enum:
LSA_TRUST_TYPE_DOWNLEVEL = 0x00000001,
LSA_TRUST_TYPE_UPLEVEL = 0x00000002,
LSA_TRUST_TYPE_MIT = 0x00000003
"""
transitive = int(attrs) & LSA_TRUST_ATTRIBUTE_NON_TRANSITIVE
string = _trust_type_dict.get(int(level) | (transitive << 3),
_trust_type_dict_unknown)
return unicode(string)
def trust_direction_string(level):
"""
Returns a string representing a direction of the trust.
The original field is a bitmask taking two bits in use
LSA_TRUST_DIRECTION_INBOUND = 0x00000001,
LSA_TRUST_DIRECTION_OUTBOUND = 0x00000002
"""
string = _trust_direction_dict.get(int(level), _trust_type_dict_unknown)
return unicode(string)
def trust_status_string(level):
string = _trust_status_dict.get(level, _trust_type_dict_unknown)
return unicode(string)
| 2,378
|
Python
|
.py
| 60
| 34.483333
| 76
| 0.6859
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,695
|
kem.py
|
freeipa_freeipa/ipaserver/secrets/kem.py
|
# Copyright (C) 2015 IPA Project Contributors, see COPYING for license
from __future__ import print_function, absolute_import
import errno
import os
from configparser import ConfigParser
from ipaplatform.paths import paths
from ipapython.dn import DN
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa, ec
# pylint: disable=relative-import
from ipaserver.custodia.message.kem import (
KEMKeysStore, KEY_USAGE_SIG, KEY_USAGE_ENC, KEY_USAGE_MAP
)
# pylint: enable=relative-import
from jwcrypto.common import json_decode, json_encode
from jwcrypto.common import base64url_encode
from jwcrypto.jwk import JWK
from ipaserver.secrets.common import iSecLdap
from binascii import unhexlify
import ldap
IPA_REL_BASE_DN = 'cn=custodia,cn=ipa,cn=etc'
IPA_KEYS_QUERY = '(&(ipaKeyUsage={usage:s})(memberPrincipal={princ:s}))'
IPA_CHECK_QUERY = '(cn=enc/{host:s})'
RFC5280_USAGE_MAP = {KEY_USAGE_SIG: 'digitalSignature',
KEY_USAGE_ENC: 'dataEncipherment'}
class KEMLdap(iSecLdap):
@property
def keysbase(self):
return '%s,%s' % (IPA_REL_BASE_DN, self.basedn)
def _encode_int(self, i):
I = hex(i).rstrip("L").lstrip("0x")
return base64url_encode(unhexlify((len(I) % 2) * '0' + I))
def _parse_public_key(self, ipa_public_key):
public_key = serialization.load_der_public_key(ipa_public_key,
default_backend())
num = public_key.public_numbers()
if isinstance(num, rsa.RSAPublicNumbers):
return {'kty': 'RSA',
'e': self._encode_int(num.e),
'n': self._encode_int(num.n)}
elif isinstance(num, ec.EllipticCurvePublicNumbers):
if num.curve.name == 'secp256r1':
curve = 'P-256'
elif num.curve.name == 'secp384r1':
curve = 'P-384'
elif num.curve.name == 'secp521r1':
curve = 'P-521'
else:
raise TypeError('Unsupported Elliptic Curve')
return {'kty': 'EC',
'crv': curve,
'x': self._encode_int(num.x),
'y': self._encode_int(num.y)}
else:
raise TypeError('Unknown Public Key type')
def get_key(self, usage, principal):
conn = self.connect()
scope = ldap.SCOPE_SUBTREE
ldap_filter = self.build_filter(IPA_KEYS_QUERY,
{'usage': RFC5280_USAGE_MAP[usage],
'princ': principal})
r = conn.search_s(self.keysbase, scope, ldap_filter)
if len(r) != 1:
raise ValueError("Incorrect number of results (%d) searching for "
"public key for %s" % (len(r), principal))
ipa_public_key = r[0][1]['ipaPublicKey'][0]
jwk = self._parse_public_key(ipa_public_key)
jwk['use'] = KEY_USAGE_MAP[usage]
return json_encode(jwk)
def check_host_keys(self, host):
conn = self.connect()
scope = ldap.SCOPE_SUBTREE
ldap_filter = self.build_filter(IPA_CHECK_QUERY, {'host': host})
r = conn.search_s(self.keysbase, scope, ldap_filter)
if not r:
raise ValueError("No public keys were found for %s" % host)
return True
def _format_public_key(self, key):
if isinstance(key, str):
jwkey = json_decode(key)
if 'kty' not in jwkey:
raise ValueError('Invalid key, missing "kty" attribute')
if jwkey['kty'] == 'RSA':
pubnum = rsa.RSAPublicNumbers(jwkey['e'], jwkey['n'])
pubkey = pubnum.public_key(default_backend())
elif jwkey['kty'] == 'EC':
if jwkey['crv'] == 'P-256':
curve = ec.SECP256R1
elif jwkey['crv'] == 'P-384':
curve = ec.SECP384R1
elif jwkey['crv'] == 'P-521':
curve = ec.SECP521R1
else:
raise TypeError('Unsupported Elliptic Curve')
pubnum = ec.EllipticCurvePublicNumbers(
jwkey['x'], jwkey['y'], curve)
pubkey = pubnum.public_key(default_backend())
else:
raise ValueError('Unknown key type: %s' % jwkey['kty'])
elif isinstance(key, rsa.RSAPublicKey):
pubkey = key
elif isinstance(key, ec.EllipticCurvePublicKey):
pubkey = key
else:
raise TypeError('Unknown key type: %s' % type(key))
return pubkey.public_bytes(
encoding=serialization.Encoding.DER,
format=serialization.PublicFormat.SubjectPublicKeyInfo)
def _get_dn(self, usage, principal):
servicename, host = principal.split('@')[0].split('/')
name = '%s/%s' % (KEY_USAGE_MAP[usage], host)
service_rdn = ('cn', servicename) if servicename != 'host' else DN()
return DN(('cn', name), service_rdn, self.keysbase)
def set_key(self, usage, principal, key):
"""
Write key for the host or service.
Service keys are nested one level beneath the 'cn=custodia'
container, in the 'cn=<servicename>' container; this allows
fine-grained control over key management permissions for
specific services.
The container is assumed to exist.
"""
public_key = self._format_public_key(key)
dn = self._get_dn(usage, principal)
conn = self.connect()
try:
mods = [('objectClass', [b'nsContainer',
b'ipaKeyPolicy',
b'ipaPublicKeyObject',
b'groupOfPrincipals']),
('cn', dn[0].value.encode('utf-8')),
('ipaKeyUsage', RFC5280_USAGE_MAP[usage].encode('utf-8')),
('memberPrincipal', principal.encode('utf-8')),
('ipaPublicKey', public_key)]
conn.add_s(str(dn), mods)
except ldap.ALREADY_EXISTS:
mods = [(ldap.MOD_REPLACE, 'ipaPublicKey', public_key)]
conn.modify_s(str(dn), mods)
def del_key(self, usage, principal):
"""Delete key for host or service
:returns: DN of removed key or None when key was not found
"""
dn = self._get_dn(usage, principal)
conn = self.connect()
try:
conn.delete_s(str(dn))
except ldap.NO_SUCH_OBJECT:
return None
else:
return dn
def newServerKeys(path, keyid):
skey = JWK(generate='RSA', use='sig', kid=keyid)
ekey = JWK(generate='RSA', use='enc', kid=keyid)
with open(path, 'w') as f:
os.fchmod(f.fileno(), 0o600)
os.fchown(f.fileno(), 0, 0)
f.write('[%s,%s]' % (skey.export(), ekey.export()))
return [skey.get_op_key('verify'), ekey.get_op_key('encrypt')]
class IPAKEMKeys(KEMKeysStore):
"""A KEM Keys Store.
This is a store that holds public keys of registered
clients allowed to use KEM messages. It takes the form
of an authorizer merely for the purpose of attaching
itself to a 'request' so that later on the KEM Parser
can fetch the appropariate key to verify/decrypt an
incoming request and make the payload available.
The KEM Parser will actually perform additional
authorization checks in this case.
SimplePathAuthz is extended here as we want to attach the
store only to requests on paths we are configured to
manage.
"""
def __init__(self, config=None, ipaconf=paths.IPA_DEFAULT_CONF):
super(IPAKEMKeys, self).__init__(config)
conf = ConfigParser()
self.host = None
self.realm = None
self.ldap_uri = config.get('ldap_uri', None)
if conf.read(ipaconf):
self.host = conf.get('global', 'host')
self.realm = conf.get('global', 'realm')
if self.ldap_uri is None:
self.ldap_uri = conf.get('global', 'ldap_uri', raw=True)
self._server_keys = None
def find_key(self, kid, usage):
if kid is None:
raise TypeError('Key ID is None, should be a SPN')
conn = KEMLdap(self.ldap_uri)
return conn.get_key(usage, kid)
def generate_server_keys(self):
self.generate_keys('host')
def generate_keys(self, servicename):
principal = '%s/%s@%s' % (servicename, self.host, self.realm)
# Neutralize the key with read if any
self._server_keys = None
# Generate private key and store it
pubkeys = newServerKeys(self.config['server_keys'], principal)
# Store public key in LDAP
ldapconn = KEMLdap(self.ldap_uri)
ldapconn.set_key(KEY_USAGE_SIG, principal, pubkeys[0])
ldapconn.set_key(KEY_USAGE_ENC, principal, pubkeys[1])
def remove_server_keys_file(self):
"""Remove keys from disk
The method does not fail when the file is missing.
"""
try:
os.unlink(self.config['server_keys'])
except OSError as e:
if e.errno != errno.ENOENT:
raise
return False
else:
return True
def remove_server_keys(self):
"""Remove keys from LDAP and disk
"""
self.remove_keys('host')
def remove_keys(self, servicename):
"""Remove keys from LDAP and disk
"""
self.remove_server_keys_file()
principal = '%s/%s@%s' % (servicename, self.host, self.realm)
if self.ldap_uri is not None:
ldapconn = KEMLdap(self.ldap_uri)
ldapconn.del_key(KEY_USAGE_SIG, principal)
ldapconn.del_key(KEY_USAGE_ENC, principal)
@property
def server_keys(self):
if self._server_keys is None:
with open(self.config['server_keys']) as f:
jsonkeys = f.read()
dictkeys = json_decode(jsonkeys)
self._server_keys = (JWK(**dictkeys[KEY_USAGE_SIG]),
JWK(**dictkeys[KEY_USAGE_ENC]))
return self._server_keys
# Manual testing
if __name__ == '__main__':
IKK = IPAKEMKeys({'paths': '/',
'server_keys': '/etc/ipa/custodia/server.keys'})
IKK.generate_server_keys()
print(('SIG', IKK.server_keys[0].export_public()))
print(('ENC', IKK.server_keys[1].export_public()))
print(IKK.find_key('host/%s@%s' % (IKK.host, IKK.realm),
usage=KEY_USAGE_SIG))
print(IKK.find_key('host/%s@%s' % (IKK.host, IKK.realm),
usage=KEY_USAGE_ENC))
| 10,836
|
Python
|
.py
| 247
| 33.230769
| 78
| 0.583065
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,696
|
client.py
|
freeipa_freeipa/ipaserver/secrets/client.py
|
# Copyright (C) 2015 IPA Project Contributors, see COPYING for license
from __future__ import print_function, absolute_import
import contextlib
import os
import secrets
from base64 import b64encode
# pylint: disable=relative-import
from ipaserver.custodia.message.kem import (
KEMClient, KEY_USAGE_SIG, KEY_USAGE_ENC
)
# pylint: enable=relative-import
from jwcrypto.common import json_decode
from jwcrypto.jwk import JWK
from ipalib.krb_utils import krb5_format_service_principal_name
from ipaserver.secrets.kem import IPAKEMKeys
from ipaserver.secrets.store import IPASecStore
from ipaplatform.paths import paths
import gssapi
import requests
@contextlib.contextmanager
def ccache_env(ccache):
"""Temporarily set KRB5CCNAME environment variable
"""
orig_ccache = os.environ.get('KRB5CCNAME')
os.environ['KRB5CCNAME'] = ccache
try:
yield
finally:
os.environ.pop('KRB5CCNAME', None)
if orig_ccache is not None:
os.environ['KRB5CCNAME'] = orig_ccache
class CustodiaClient:
def __init__(self, client_service, keyfile, keytab, server, realm,
ldap_uri=None, auth_type=None):
if client_service.endswith(realm) or "@" not in client_service:
raise ValueError(
"Client service name must be a GSS name (service@host), "
"not '{}'.".format(client_service)
)
self.client_service = client_service
self.keytab = keytab
self.server = server
self.realm = realm
self.ldap_uri = ldap_uri
self.auth_type = auth_type
self.service_name = gssapi.Name(
'HTTP@{}'.format(server), gssapi.NameType.hostbased_service
)
self.keystore = IPASecStore()
# use in-process MEMORY ccache. Handler process don't need a TGT.
self.ccache = 'MEMORY:Custodia_{}'.format(secrets.token_hex())
with ccache_env(self.ccache):
# Init creds immediately to make sure they are valid. Creds
# can also be re-inited by _auth_header to avoid expiry.
self.creds = self._init_creds()
self.ikk = IPAKEMKeys(
{'server_keys': keyfile, 'ldap_uri': ldap_uri}
)
self.kemcli = KEMClient(
self._server_keys(), self._client_keys()
)
def _client_keys(self):
return self.ikk.server_keys
def _server_keys(self):
principal = krb5_format_service_principal_name(
'host', self.server, self.realm
)
sk = JWK(**json_decode(self.ikk.find_key(principal, KEY_USAGE_SIG)))
ek = JWK(**json_decode(self.ikk.find_key(principal, KEY_USAGE_ENC)))
return sk, ek
def _init_creds(self):
name = gssapi.Name(
self.client_service, gssapi.NameType.hostbased_service
)
store = {
'client_keytab': self.keytab,
'ccache': self.ccache
}
return gssapi.Credentials(name=name, store=store, usage='initiate')
def _auth_header(self):
if self.creds.lifetime < 300:
self.creds = self._init_creds()
ctx = gssapi.SecurityContext(
name=self.service_name,
creds=self.creds
)
authtok = ctx.step()
return {'Authorization': 'Negotiate %s' % b64encode(
authtok).decode('ascii')}
def fetch_key(self, keyname, store=True):
# Prepare URL
url = 'https://%s/ipa/keys/%s' % (self.server, keyname)
# Prepare signed/encrypted request
encalg = ('RSA-OAEP', 'A256CBC-HS512')
request = self.kemcli.make_request(keyname, encalg=encalg)
# Prepare Authentication header
headers = self._auth_header()
# Perform request
# pylint: disable-next=missing-timeout
r = requests.get(
url, headers=headers,
verify=paths.IPA_CA_CRT,
params={'type': 'kem', 'value': request}
)
r.raise_for_status()
reply = r.json()
if 'type' not in reply or reply['type'] != 'kem':
raise RuntimeError('Invlid JSON response type')
value = self.kemcli.parse_reply(keyname, reply['value'])
if store:
self.keystore.set('keys/%s' % keyname, value)
else:
return value
return None
| 4,380
|
Python
|
.py
| 114
| 29.921053
| 76
| 0.623233
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,697
|
service.py
|
freeipa_freeipa/ipaserver/secrets/service.py
|
# Copyright (C) 2017 IPA Project Contributors, see COPYING for license
import argparse
import ipaserver.custodia.server # pylint: disable=relative-import
argparser = argparse.ArgumentParser(
prog='ipa-custodia',
description='IPA Custodia service'
)
argparser.add_argument(
'--debug',
action='store_true',
help='Debug mode'
)
argparser.add_argument(
'configfile',
nargs='?',
type=argparse.FileType('r'),
help="Path to IPA's custodia server config",
default='/etc/ipa/custodia/custodia.conf'
)
def main():
return ipaserver.custodia.server.main(argparser)
if __name__ == '__main__':
main()
| 644
|
Python
|
.py
| 23
| 24.608696
| 71
| 0.716612
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,698
|
store.py
|
freeipa_freeipa/ipaserver/secrets/store.py
|
# Copyright (C) 2015 IPA Project Contributors, see COPYING for license
from __future__ import print_function, absolute_import
import os
import sys
from ipaserver.custodia.plugin import CSStore
from ipaplatform.paths import paths
from ipaplatform.constants import constants
from ipapython import ipautil
class UnknownKeyName(Exception):
pass
class InvalidKeyArguments(Exception):
pass
class DBMAPHandler:
dbtype = None
supports_extra_args = False
def __init__(self, config, dbmap, nickname):
dbtype = dbmap.get('type')
if dbtype is None or dbtype != self.dbtype:
raise ValueError(
"Invalid type '{}', expected '{}'".format(
dbtype, self.dbtype
)
)
self.config = config
self.dbmap = dbmap
self.nickname = nickname
def export_key(self):
raise NotImplementedError
def import_key(self, value):
raise NotImplementedError
class DBMAPCommandHandler(DBMAPHandler):
def __init__(self, config, dbmap, nickname):
super().__init__(config, dbmap, nickname)
self.runas = dbmap.get('runas')
self.command = os.path.join(
paths.IPA_CUSTODIA_HANDLER,
dbmap['command']
)
def run_handler(self, extra_args=(), stdin=None):
"""Run handler script to export / import key material
"""
args = [self.command]
args.extend(extra_args)
kwargs = dict(
runas=self.runas,
encoding='utf-8',
)
if stdin:
args.extend(['--import', '-'])
kwargs.update(stdin=stdin)
else:
args.extend(['--export', '-'])
kwargs.update(capture_output=True)
result = ipautil.run(args, **kwargs)
if stdin is None:
return result.output
else:
return None
def log_error(error):
print(error, file=sys.stderr)
class NSSWrappedCertDB(DBMAPCommandHandler):
"""
Store that extracts private keys from an NSSDB, wrapped with the
private key of the primary CA.
"""
dbtype = 'NSSDB'
supports_extra_args = True
OID_DES_EDE3_CBC = '1.2.840.113549.3.7'
def __init__(self, config, dbmap, nickname, *extra_args):
super().__init__(config, dbmap, nickname)
# Extra args is either a single OID specifying desired wrap
# algorithm, or empty. If empty, we must assume that the
# client is an old version that only supports DES-EDE3-CBC.
#
# Using either the client's requested algorithm or the
# default of DES-EDE3-CBC, we pass it along to the handler
# via the --algorithm option. The handler, in turn, passes
# it along to the 'pki ca-authority-key-export' program
# (which is part of Dogtag).
#
if len(extra_args) > 1:
raise InvalidKeyArguments("Too many arguments")
if len(extra_args) == 1:
self.alg = extra_args[0]
else:
self.alg = self.OID_DES_EDE3_CBC
def export_key(self):
return self.run_handler([
'--nickname', self.nickname,
'--algorithm', self.alg,
])
class NSSCertDB(DBMAPCommandHandler):
dbtype = 'NSSDB'
def export_key(self):
return self.run_handler(['--nickname', self.nickname])
def import_key(self, value):
return self.run_handler(
['--nickname', self.nickname],
stdin=value
)
# Exfiltrate the DM password Hash so it can be set in replica's and this
# way let a replica be install without knowing the DM password and yet
# still keep the DM password synchronized across replicas
class DMLDAP(DBMAPCommandHandler):
dbtype = 'DMLDAP'
def __init__(self, config, dbmap, nickname):
super().__init__(config, dbmap, nickname)
if nickname != 'DMHash':
raise UnknownKeyName("Unknown Key Named '%s'" % nickname)
def export_key(self):
return self.run_handler()
def import_key(self, value):
self.run_handler(stdin=value)
class PEMFileHandler(DBMAPCommandHandler):
dbtype = 'PEM'
def export_key(self):
return self.run_handler()
def import_key(self, value):
return self.run_handler(stdin=value)
NAME_DB_MAP = {
'ca': {
'type': 'NSSDB',
'handler': NSSCertDB,
'command': 'ipa-custodia-pki-tomcat',
'runas': constants.PKI_USER,
},
'ca_wrapped': {
'type': 'NSSDB',
'handler': NSSWrappedCertDB,
'command': 'ipa-custodia-pki-tomcat-wrapped',
'runas': constants.PKI_USER,
},
'ra': {
'type': 'PEM',
'handler': PEMFileHandler,
'command': 'ipa-custodia-ra-agent',
'runas': None, # import needs root permission to write to directory
},
'dm': {
'type': 'DMLDAP',
'handler': DMLDAP,
'command': 'ipa-custodia-dmldap',
'runas': None, # root
}
}
class IPASecStore(CSStore):
def __init__(self, config=None):
self.config = config
def _get_handler(self, key):
path = key.split('/', 3)
if len(path) < 3 or path[0] != 'keys':
raise ValueError('Invalid name')
if path[1] not in NAME_DB_MAP:
raise UnknownKeyName("Unknown DB named '%s'" % path[1])
dbmap = NAME_DB_MAP[path[1]]
handler = dbmap['handler']
if len(path) > 3 and not handler.supports_extra_args:
raise InvalidKeyArguments('Handler does not support extra args')
return handler(self.config, dbmap, path[2], *path[3:])
def get(self, key):
try:
key_handler = self._get_handler(key)
value = key_handler.export_key()
except Exception as e: # pylint: disable=broad-except
log_error('Error retrieving key "%s": %s' % (key, str(e)))
value = None
return value
def set(self, key, value, replace=False):
try:
key_handler = self._get_handler(key)
key_handler.import_key(value)
except Exception as e: # pylint: disable=broad-except
log_error('Error storing key "%s": %s' % (key, str(e)))
def list(self, keyfilter=None):
raise NotImplementedError
def cut(self, key):
raise NotImplementedError
def span(self, key):
raise NotImplementedError
# backwards compatibility with FreeIPA 4.3 and 4.4.
iSecStore = IPASecStore
| 6,541
|
Python
|
.py
| 181
| 28.110497
| 76
| 0.610178
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,699
|
common.py
|
freeipa_freeipa/ipaserver/secrets/common.py
|
# Copyright (C) 2015 IPA Project Contributors, see COPYING for license
from __future__ import print_function
import ldap
import ldap.sasl
import ldap.filter
from ipapython.ipaldap import ldap_initialize
class iSecLdap:
def __init__(self, uri, auth_type=None):
self.uri = uri
if auth_type is not None:
self.auth_type = auth_type
else:
if uri.startswith('ldapi'):
self.auth_type = 'EXTERNAL'
else:
self.auth_type = 'GSSAPI'
self._basedn = None
@property
def basedn(self):
if self._basedn is None:
conn = self.connect()
r = conn.search_s('', ldap.SCOPE_BASE)
self._basedn = r[0][1]['defaultnamingcontext'][0].decode('utf-8')
return self._basedn
def connect(self):
conn = ldap_initialize(self.uri)
if self.auth_type == 'EXTERNAL':
auth_tokens = ldap.sasl.external(None)
elif self.auth_type == 'GSSAPI':
auth_tokens = ldap.sasl.sasl({}, 'GSSAPI')
else:
raise ValueError(
'Invalid authentication type: %s' % self.auth_type)
conn.sasl_interactive_bind_s('', auth_tokens)
return conn
def build_filter(self, formatstr, args):
escaped_args = dict()
for key, value in args.items():
escaped_args[key] = ldap.filter.escape_filter_chars(value)
return formatstr.format(**escaped_args)
| 1,486
|
Python
|
.py
| 40
| 28.275
| 77
| 0.597637
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|