id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
16,700
|
pemfile.py
|
freeipa_freeipa/ipaserver/secrets/handlers/pemfile.py
|
#
# Copyright (C) 2019 IPA Project Contributors, see COPYING for license
#
"""Export / import PEM cert and key file as PKCS#12 data
"""
import base64
import json
import os
from ipaplatform.paths import paths
from ipaplatform.tasks import tasks
from ipapython import ipautil
from . import common
def export_key(args, tmpdir):
"""Export cert and private from PEM files as PKCS#12 file.
The PKCS#12 file is encrypted with a password.
"""
pk12file = os.path.join(tmpdir, 'export.p12')
password = ipautil.ipa_generate_password()
pk12pwfile = os.path.join(tmpdir, 'passwd')
with open(pk12pwfile, 'w') as f:
f.write(password)
# OpenSSL does not support pkcs12 export of a cert without key
cmd = [
paths.OPENSSL, 'pkcs12', '-export',
'-in', args.certfile,
'-out', pk12file,
'-inkey', args.keyfile,
'-password', 'file:{pk12pwfile}'.format(pk12pwfile=pk12pwfile),
'-keypbe', 'AES-256-CBC',
'-certpbe', 'AES-256-CBC',
'-macalg', 'sha384',
]
fips_enabled = tasks.is_fips_enabled()
if fips_enabled:
cmd.append('-nomac')
ipautil.run(cmd)
with open(pk12file, 'rb') as f:
p12data = f.read()
data = {
'export password': password,
'pkcs12 data': p12data,
}
common.json_dump(data, args.exportfile)
def import_key(args, tmpdir):
"""Export key and certificate from a PKCS#12 file to key and cert files.
"""
data = json.load(args.importfile)
password = data['export password']
p12data = base64.b64decode(data['pkcs12 data'])
pk12pwfile = os.path.join(tmpdir, 'passwd')
with open(pk12pwfile, 'w') as f:
f.write(password)
pk12file = os.path.join(tmpdir, 'import.p12')
with open(pk12file, 'wb') as f:
f.write(p12data)
# get the certificate from the file
cmd = [
paths.OPENSSL, 'pkcs12',
'-in', pk12file,
'-clcerts', '-nokeys',
'-out', args.certfile,
'-password', 'file:{pk12pwfile}'.format(pk12pwfile=pk12pwfile),
]
fips_enabled = tasks.is_fips_enabled()
if fips_enabled:
cmd.append('-nomacver')
ipautil.run(cmd, umask=0o027)
# get the private key from the file
cmd = [
paths.OPENSSL, 'pkcs12',
'-in', pk12file,
'-nocerts', '-nodes',
'-out', args.keyfile,
'-password', 'file:{pk12pwfile}'.format(pk12pwfile=pk12pwfile),
]
if fips_enabled:
cmd.append('-nomacver')
ipautil.run(cmd, umask=0o027)
def default_parser():
parser = common.mkparser(
description='ipa-custodia PEM file handler'
)
parser.add_argument(
'--certfile',
help='path to PEM encoded cert file',
required=True
)
parser.add_argument(
'keyfile',
help='path to PEM encoded key file',
required=True
)
return parser
def ra_agent_parser():
parser = common.mkparser(
description='ipa-custodia RA agent cert handler'
)
parser.set_defaults(
certfile=paths.RA_AGENT_PEM,
keyfile=paths.RA_AGENT_KEY
)
return parser
def main(parser=None):
if parser is None:
parser = default_parser()
common.main(parser, export_key, import_key)
if __name__ == '__main__':
main()
| 3,331
|
Python
|
.py
| 108
| 24.833333
| 76
| 0.631497
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,701
|
nsswrappedcert.py
|
freeipa_freeipa/ipaserver/secrets/handlers/nsswrappedcert.py
|
#
# Copyright (C) 2019 IPA Project Contributors, see COPYING for license
#
"""Export and wrap key from NSS DB
"""
import os
from ipaplatform.paths import paths
from ipapython import ipautil
from ipapython.certdb import NSSDatabase
from . import common
def export_key(args, tmpdir):
"""Export key and certificate from the NSS DB
The private key is encrypted using key wrapping.
"""
wrapped_key_file = os.path.join(tmpdir, 'wrapped_key')
certificate_file = os.path.join(tmpdir, 'certificate')
ipautil.run([
paths.PKI,
'-d', args.nssdb_path,
'-C', args.nssdb_pwdfile,
'ca-authority-key-export',
'--wrap-nickname', args.wrap_nickname,
'--target-nickname', args.nickname,
'--algorithm', args.algorithm,
'-o', wrapped_key_file
])
nssdb = NSSDatabase(args.nssdb_path)
nssdb.run_certutil([
'-L',
'-n', args.nickname,
'-a',
'-o', certificate_file,
])
with open(wrapped_key_file, 'rb') as f:
wrapped_key = f.read()
with open(certificate_file, 'r') as f:
certificate = f.read()
data = {
'wrapped_key': wrapped_key,
'certificate': certificate
}
common.json_dump(data, args.exportfile)
def default_parser():
"""Generic interface
"""
parser = common.mkparser(
supports_import=False,
description='ipa-custodia NSS wrapped cert handler',
)
parser.add_argument(
'--nssdb',
dest='nssdb_path',
help='path to NSS DB',
required=True
)
parser.add_argument(
'--pwdfile',
dest='nssdb_pwdfile',
help='path to password file for NSS DB',
required=True
)
parser.add_argument(
'--wrap-nickname',
dest='wrap_nickname',
help='nick name of wrapping key',
required=True
)
parser.add_argument(
'--nickname',
dest='nickname',
help='nick name of target key',
required=True
)
return parser
def pki_tomcat_parser():
"""Hard-code Dogtag's NSS DB, its password file, and CA key for wrapping
"""
parser = common.mkparser(
supports_import=False,
description='ipa-custodia pki-tomcat NSS wrapped cert handler',
)
parser.add_argument(
'--nickname',
dest='nickname',
help='nick name of target key',
required=True
)
# Caller must specify a cipher. This gets passed on to
# the 'pki ca-authority-key-export' command (part of
# Dogtag) via its own --algorithm option.
parser.add_argument(
'--algorithm',
dest='algorithm',
help='OID of symmetric wrap algorithm',
required=True
)
parser.set_defaults(
nssdb_path=paths.PKI_TOMCAT_ALIAS_DIR,
nssdb_pwdfile=paths.PKI_TOMCAT_ALIAS_PWDFILE_TXT,
wrap_nickname='caSigningCert cert-pki-ca',
)
return parser
def main(parser=None):
if parser is None:
parser = default_parser()
common.main(parser, export_key, None)
if __name__ == '__main__':
main()
| 3,123
|
Python
|
.py
| 108
| 22.527778
| 76
| 0.619286
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,702
|
dmldap.py
|
freeipa_freeipa/ipaserver/secrets/handlers/dmldap.py
|
#
# Copyright (C) 2019 IPA Project Contributors, see COPYING for license
#
"""Export / import Directory Manager password hash
"""
import json
import os
from ipalib import api
from ipalib import errors
from ipaplatform.paths import paths
from ipapython.dn import DN
from ipapython.ipaldap import LDAPClient, realm_to_ldapi_uri
from . import common
CN_CONFIG = DN(('cn', 'config'))
ROOTPW = 'nsslapd-rootpw'
def export_key(args, tmpdir, conn):
entry = conn.get_entry(CN_CONFIG, [ROOTPW])
data = {
'dmhash': entry.single_value[ROOTPW],
}
common.json_dump(data, args.exportfile)
def import_key(args, tmpdir, conn):
data = json.load(args.importfile)
dmhash = data['dmhash'].encode('ascii')
entry = conn.get_entry(CN_CONFIG, [ROOTPW])
entry.single_value[ROOTPW] = dmhash
try:
conn.update_entry(entry)
except errors.EmptyModlist:
pass
def main():
parser = common.mkparser(
description='ipa-custodia LDAP DM hash handler'
)
if os.getegid() != 0:
parser.error("Must be run as root user.\n")
# create LDAP connection using LDAPI and EXTERNAL bind as root
if not api.isdone('bootstrap'):
api.bootstrap(confdir=paths.ETC_IPA, log=None)
realm = api.env.realm
ldap_uri = realm_to_ldapi_uri(realm)
conn = LDAPClient(ldap_uri=ldap_uri, no_schema=True)
try:
conn.external_bind()
except Exception as e:
parser.error("Failed to connect to {}: {}\n".format(ldap_uri, e))
with conn:
common.main(parser, export_key, import_key, conn=conn)
if __name__ == '__main__':
main()
| 1,625
|
Python
|
.py
| 50
| 28.04
| 73
| 0.685019
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,703
|
nsscert.py
|
freeipa_freeipa/ipaserver/secrets/handlers/nsscert.py
|
#
# Copyright (C) 2019 IPA Project Contributors, see COPYING for license
#
"""Export / import cert and key from NSS DB as PKCS#12 data
"""
import base64
import json
import os
from ipaplatform.paths import paths
from ipapython import ipautil
from ipapython.certdb import NSSDatabase
from . import common
def export_key(args, tmpdir):
"""Export key and certificate from the NSS DB to a PKCS#12 file.
The PKCS#12 file is encrypted with a password.
"""
pk12file = os.path.join(tmpdir, 'export.p12')
password = ipautil.ipa_generate_password()
pk12pk12pwfile = os.path.join(tmpdir, 'passwd')
with open(pk12pk12pwfile, 'w') as f:
f.write(password)
nssdb = NSSDatabase(args.nssdb_path)
nssdb.run_pk12util([
"-o", pk12file,
"-n", args.nickname,
"-k", args.nssdb_pwdfile,
"-w", pk12pk12pwfile,
])
with open(pk12file, 'rb') as f:
p12data = f.read()
data = {
'export password': password,
'pkcs12 data': p12data,
}
common.json_dump(data, args.exportfile)
def import_key(args, tmpdir):
"""Import key and certificate from a PKCS#12 file to a NSS DB.
"""
data = json.load(args.importfile)
password = data['export password']
p12data = base64.b64decode(data['pkcs12 data'])
pk12pwfile = os.path.join(tmpdir, 'passwd')
with open(pk12pwfile, 'w') as f:
f.write(password)
pk12file = os.path.join(tmpdir, 'import.p12')
with open(pk12file, 'wb') as f:
f.write(p12data)
nssdb = NSSDatabase(args.nssdb_path)
nssdb.run_pk12util([
"-i", pk12file,
"-n", args.nickname,
"-k", args.nssdb_pwdfile,
"-w", pk12pwfile,
])
def default_parser():
"""Generic interface
"""
parser = common.mkparser(
description='ipa-custodia NSS cert handler'
)
parser.add_argument(
'--nssdb',
dest='nssdb_path',
help='path to NSS DB',
required=True
)
parser.add_argument(
'--pwdfile',
dest='nssdb_pwdfile',
help='path to password file for NSS DB',
required=True
)
parser.add_argument(
'--nickname',
help='nick name of certificate',
required=True
)
return parser
def pki_tomcat_parser():
"""Hard-code Dogtag's NSSDB and its password file
"""
parser = common.mkparser(
description='ipa-custodia pki-tomcat NSS cert handler'
)
parser.add_argument(
'--nickname',
help='nick name of certificate',
required=True
)
parser.set_defaults(
nssdb_path=paths.PKI_TOMCAT_ALIAS_DIR,
nssdb_pwdfile=paths.PKI_TOMCAT_ALIAS_PWDFILE_TXT,
)
return parser
def main(parser=None):
if parser is None:
parser = default_parser()
common.main(parser, export_key, import_key)
if __name__ == '__main__':
main()
| 2,909
|
Python
|
.py
| 100
| 23.27
| 71
| 0.634733
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,704
|
common.py
|
freeipa_freeipa/ipaserver/secrets/handlers/common.py
|
#
# Copyright (C) 2019 IPA Project Contributors, see COPYING for license
#
"""Common helpers for handlers
"""
import argparse
import base64
import json
import shutil
import tempfile
def default_json(obj):
"""JSON encoder default handler
"""
if isinstance(obj, (bytes, bytearray)):
return base64.b64encode(obj).decode('ascii')
raise TypeError(
"Object of type {} is not JSON serializable".format(type(obj))
)
def json_dump(data, exportfile):
"""Dump JSON to file
"""
json.dump(
data,
exportfile,
default=default_json,
separators=(',', ':'),
sort_keys=True
)
def mkparser(supports_import=True, **kwargs):
"""Create default parser for handler with export / import args
All commands support export to file or stdout. Most commands can also
import from a file or stdin. Export and import are mutually exclusive
options.
"""
parser = argparse.ArgumentParser(**kwargs)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument(
'--export',
help='JSON export file ("-" for stdout)',
dest='exportfile',
type=argparse.FileType('w')
)
if supports_import:
group.add_argument(
'--import',
help='JSON import file ("-" for stdin)',
dest='importfile',
type=argparse.FileType('r')
)
return parser
def main(parser, export_func, import_func=None, **kwargs):
"""Common main function for handlers
"""
args = parser.parse_args()
if args.exportfile is not None:
func = export_func
else:
func = import_func
tmpdir = tempfile.mkdtemp()
try:
func(args, tmpdir, **kwargs)
finally:
shutil.rmtree(tmpdir)
| 1,805
|
Python
|
.py
| 63
| 22.761905
| 73
| 0.640462
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,705
|
root.py
|
freeipa_freeipa/ipaserver/custodia/root.py
|
# Copyright (C) 2015 Custodia Project Contributors - see LICENSE file
from __future__ import absolute_import
import json
from ipaserver.custodia.plugin import HTTPConsumer, PluginOption
from ipaserver.custodia.secrets import Secrets
class Root(HTTPConsumer):
store = PluginOption('store', None, None)
def __init__(self, config, section):
super(Root, self).__init__(config, section)
if self.store_name is not None:
self.add_sub('secrets', Secrets(config, section))
def GET(self, request, response):
msg = json.dumps({'message': "Quis custodiet ipsos custodes?"})
return msg.encode('utf-8')
| 652
|
Python
|
.py
| 14
| 41.142857
| 71
| 0.708861
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,706
|
plugin.py
|
freeipa_freeipa/ipaserver/custodia/plugin.py
|
# Copyright (C) 2016 Custodia Project Contributors - see LICENSE file
from __future__ import absolute_import
import abc
import configparser
import grp
import inspect
import json
import pwd
import re
from jwcrypto.common import json_encode
import six
from .log import CustodiaLoggingAdapter, auditlog, getLogger
logger = getLogger(__name__)
class _Required:
__slots__ = ()
def __repr__(self):
return 'REQUIRED'
class INHERIT_GLOBAL: # noqa: N801
__slots__ = ('default',)
def __init__(self, default):
self.default = default
def __repr__(self):
return 'INHERIT_GLOBAL({})'.format(self.default)
REQUIRED = _Required()
class CustodiaException(Exception):
pass
class HTTPError(CustodiaException):
def __init__(self, code=None, message=None):
self.code = code if code is not None else 500
self.mesg = message
errstring = '%d: %s' % (self.code, self.mesg)
super(HTTPError, self).__init__(errstring)
class CSStoreError(CustodiaException):
pass
class CSStoreExists(CustodiaException):
pass
class CSStoreUnsupported(CustodiaException):
pass
class CSStoreDenied(CustodiaException):
pass
class OptionHandler:
"""Handler and parser for plugin options
"""
def __init__(self, parser, section):
self.parser = parser
self.section = section
# handler is reserved to look up the plugin class
self.seen = {'handler'}
def get(self, po):
"""Lookup value for a PluginOption instance
Args:
po: PluginOption
Returns: converted value
"""
name = po.name
typ = po.typ
default = po.default
handler = getattr(self, '_get_{}'.format(typ), None)
if handler is None:
raise ValueError(typ)
self.seen.add(name)
if not self.parser.has_option(self.section, name):
if default is REQUIRED:
raise NameError(self.section, name)
if isinstance(default, INHERIT_GLOBAL):
return handler('global', name, default.default)
# don't return default here, give the handler a chance to modify
# the default, e.g. pw_uid with default='root' returns 0.
return handler(self.section, name, default)
def check_surplus(self):
surplus = []
for name, _value in self.parser.items(self.section):
if (name not in self.seen and not
self.parser.has_option(configparser.DEFAULTSECT, name)):
surplus.append(name)
return surplus
def _get_int(self, section, name, default):
return self.parser.getint(section, name, fallback=default)
def _get_oct(self, section, name, default):
value = self.parser.get(section, name, fallback=default)
return int(value, 8)
def _get_hex(self, section, name, default):
value = self.parser.get(section, name, fallback=default)
return int(value, 16)
def _get_float(self, section, name, default):
return self.parser.getfloat(section, name, fallback=default)
def _get_bool(self, section, name, default):
return self.parser.getboolean(section, name, fallback=default)
def _get_regex(self, section, name, default):
value = self.parser.get(section, name, fallback=default)
if not value:
return None
else:
return re.compile(value)
def _get_str(self, section, name, default):
return self.parser.get(section, name, fallback=default)
def _split_string(self, value):
if ',' in value:
values = value.split(',')
else:
values = value.split(' ')
return list(v.strip() for v in values if v.strip())
def _get_str_set(self, section, name, default):
try:
value = self.parser.get(section, name)
except configparser.NoOptionError:
return default
if not value or not value.strip():
return None
else:
return set(self._split_string(value))
def _get_str_list(self, section, name, default):
try:
value = self.parser.get(section, name)
except configparser.NoOptionError:
return default
if not value or not value.strip():
return None
else:
return self._split_string(value)
def _get_store(self, section, name, default):
return self.parser.get(section, name, fallback=default)
def _get_pwd_uid(self, section, name, default):
value = self.parser.get(section, name, fallback=default)
try:
return int(value)
except ValueError:
return pwd.getpwnam(value).pw_uid
def _get_grp_gid(self, section, name, default):
value = self.parser.get(section, name, fallback=default)
try:
return int(value)
except ValueError:
return grp.getgrnam(value).gr_gid
def _get_json(self, section, name, default):
value = self.parser.get(section, name, fallback=default)
return json.loads(value)
class PluginOption:
"""Plugin option
code::
class MyPlugin(CustodiaPlugin):
number = PluginOption(int, REQUIRED, 'my value')
values = PluginOption('str_list', 'foo bar', 'a list of strings')
config::
[myplugin]
handler = MyPlugin
number = 1
values = egg spam python
**Supported value types**
*str*
plain string
*str_set*
set of comma-separated or space-separated strings
*str_list*
ordered list of comma-separated or space-separated strings
*int*
number (converted from base 10)
*hex*
number (converted from base 16)
*oct*
number (converted from base 8)
*float*
floating point number
*bool*
boolean (true: on, true, yes, 1; false: off, false, no, 0)
*regex*
regular expression string
*store*
special value for refer to a store plugin
*pwd_uid*
numeric user id or user name
*grp_gid*
numeric group id or group name
*json*
JSON string
"""
__slots__ = ('name', 'typ', 'default', 'doc')
def __init__(self, typ, default, doc):
self.name = None
if typ in {str, int, float, bool, oct, hex}:
self.typ = typ.__name__
else:
self.typ = typ
self.default = default
self.doc = doc
def __repr__(self):
if self.default is REQUIRED:
msg = "<Required option {0.name} ({0.typ}): {0.doc}>"
else:
msg = ("<Option {0.name} ({0.typ}, default: '{0.default}'): "
"{0.doc}>")
return msg.format(self)
class CustodiaPluginMeta(abc.ABCMeta):
def __new__(cls, name, bases, namespace, **kwargs):
ncls = super(CustodiaPluginMeta, cls).__new__(
cls, name, bases, namespace, **kwargs)
sig = inspect.signature(ncls.__init__)
args = list(sig.parameters)
if args[1:3] != ['config', 'section']:
# old-style plugin class
ncls._options = None
return ncls
# new-style plugin class
# every plugin has a debug option. In case it is not set, the debug
# flag from [global] is inherited.
if not hasattr(ncls, 'debug'):
ncls.debug = PluginOption(bool, INHERIT_GLOBAL(False), '')
# get options
options = []
for name, value in inspect.getmembers(ncls):
if not isinstance(value, PluginOption):
continue
value.name = name
options.append(value)
ncls._options = tuple(options)
return ncls
@six.add_metaclass(CustodiaPluginMeta)
class CustodiaPlugin:
"""Abstract base class for all Custodia plugins
"""
_options = ()
def __init__(self, config, section=None):
origin, debug = self._configure(config, section)
self._auditlog = auditlog
self.section = section # plugin loader sets section for old plugins
self.origin = origin
self.logger = CustodiaLoggingAdapter(self, debug)
def audit_key_access(self, *args, **kwargs):
self._auditlog.key_access(self.origin, *args, **kwargs)
def audit_svc_access(self, *args, **kwargs):
self._auditlog.svc_access(self.origin, *args, **kwargs)
def _configure(self, config, section):
if section is not None and self._options is not None:
# new style configuration
opt = OptionHandler(config, section)
for option in self._options:
value = opt.get(option)
# special case for store
if option.typ == 'store':
if option.name != 'store':
raise ValueError(option.name)
self.store_name = value
self.store = None
else:
setattr(self, option.name, value)
surplus = opt.check_surplus()
if surplus:
raise ValueError('Surplus options in {}: {}'.format(
section, surplus))
origin = '%s-[%s]' % (type(self).__name__, section)
debug = self.debug # pylint: disable=no-member
else:
# old style configuration
if config is None:
config = {}
self.config = config
# special case for store
if 'store' in config:
self.store_name = self.config.get('store')
self.store = None
origin = config.get('facility_name', self.__class__.__name__)
debug = config.get('debug', 'false').lower() == 'true'
return origin, debug
def _attach_store(self, config, cfgparser, context):
"""Attach nested store
"""
if getattr(self, 'store', None) is not None:
# already attached
return
store_plugin = config['stores'].get(self.store_name)
if store_plugin is None:
raise ValueError(
"'{}' references non-existing store '{}'".format(
self.section, self.store_name))
self.store = store_plugin
store_plugin.finalize_init(config, cfgparser, context=self)
def finalize_init(self, config, cfgparser, context=None):
"""Two-phase initialization
Args:
config: server config dictionary
cfgparser: configparser instance
context: initialization context (None for global)
"""
if getattr(self, 'store_name', None) is not None:
self._attach_store(config, cfgparser, context)
class CSStore(CustodiaPlugin):
"""Base class for stores
"""
@abc.abstractmethod
def get(self, key):
pass
@abc.abstractmethod
def set(self, key, value, replace=False):
pass
# relax ABC for now, see https://github.com/latchset/custodia/issues/84
# @abc.abstractmethod
def span(self, key):
raise NotImplementedError
# @abc.abstractmethod
def list(self, keyfilter=None):
raise NotImplementedError
# @abc.abstractmethod
def cut(self, key):
raise NotImplementedError
class HTTPAuthorizer(CustodiaPlugin):
"""Base class for authorizers
"""
@abc.abstractmethod
def handle(self, request):
pass
class HTTPAuthenticator(CustodiaPlugin):
"""Base class for authenticators
"""
@abc.abstractmethod
def handle(self, request):
pass
DEFAULT_CTYPE = 'text/html; charset=utf-8'
SUPPORTED_COMMANDS = ['GET', 'PUT', 'POST', 'DELETE']
class HTTPConsumer(CustodiaPlugin):
"""Base class for consumers
"""
def __init__(self, config, section=None):
super(HTTPConsumer, self).__init__(config, section)
self.subs = dict()
self.root = self
def add_sub(self, name, sub):
self.subs[name] = sub
if hasattr(sub, 'root'):
sub.root = self.root
def _find_handler(self, request):
base = self
command = request.get('command', 'GET')
if command not in SUPPORTED_COMMANDS:
raise HTTPError(501)
trail = request.get('trail', None)
if trail is not None:
for comp in trail:
subs = getattr(base, 'subs', {})
if comp in subs:
base = subs[comp]
trail.pop(0)
else:
break
handler = getattr(base, command)
if handler is None:
raise HTTPError(400)
return handler
def handle(self, request):
handler = self._find_handler(request)
response = {'headers': dict()}
# Handle request
output = handler(request, response)
if output is None:
output = response.get('output')
ct = response['headers'].get('Content-Type')
if ct is None:
ct = response['headers']['Content-Type'] = DEFAULT_CTYPE
if 'application/json' in ct and isinstance(output, (dict, list)):
output = json_encode(output).encode('utf-8')
response['headers']['Content-Length'] = str(len(output))
response['output'] = output
if output is not None and not hasattr(output, 'read') \
and not isinstance(output, six.binary_type):
msg = "Handler {} returned unsupported type {} ({}):\n{!r}"
raise TypeError(msg.format(handler, type(output), ct, output))
if output is not None and 'Content-Length' not in response['headers']:
if hasattr(output, 'read'):
# LOG: warning file-type objects should set Content-Length
pass
else:
response['headers']['Content-Length'] = str(len(output))
return response
| 14,004
|
Python
|
.py
| 372
| 28.505376
| 78
| 0.598004
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,707
|
secrets.py
|
freeipa_freeipa/ipaserver/custodia/secrets.py
|
# Copyright (C) 2015 Custodia Project Contributors - see LICENSE file
from __future__ import absolute_import
import json
import os
from base64 import b64decode, b64encode
from ipaserver.custodia import log
from ipaserver.custodia.message.common import UnallowedMessage
from ipaserver.custodia.message.common import UnknownMessageType
from ipaserver.custodia.message.formats import Validator
from ipaserver.custodia.plugin import (
CSStoreDenied, CSStoreError, CSStoreExists, CSStoreUnsupported
)
from ipaserver.custodia.plugin import HTTPConsumer, HTTPError, PluginOption
class Secrets(HTTPConsumer):
allowed_keytypes = PluginOption('str_set', 'simple', None)
store = PluginOption('store', None, None)
def __init__(self, config, section):
super(Secrets, self).__init__(config, section)
self._validator = Validator(self.allowed_keytypes)
def _db_key(self, trail):
if len(trail) < 2:
self.logger.debug(
"Forbidden action: Operation only permitted within a "
"container")
raise HTTPError(403)
return os.path.join('keys', *trail)
def _db_container_key(self, default, trail):
f = None
if len(trail) > 1:
f = self._db_key(trail)
elif len(trail) == 1 and trail[0] != '':
self.logger.debug(
"Forbidden action: Wrong container path. Container names must "
"end with '/'")
raise HTTPError(403)
elif default is None:
self.logger.debug("Forbidden action: No default namespace")
raise HTTPError(403)
else:
# Use the default namespace
f = self._db_key([default, ''])
return f
def _parse(self, request, query, name):
return self._validator.parse(request, query, name)
def _parse_query(self, request, name):
# default to simple
query = request.get('query', '')
if len(query) == 0:
query = {'type': 'simple', 'value': ''}
return self._parse(request, query, name)
def _parse_bin_body(self, request, name):
body = request.get('body')
if body is None:
raise HTTPError(400)
value = b64encode(bytes(body)).decode('utf-8')
payload = {'type': 'simple', 'value': value}
return self._parse(request, payload, name)
def _parse_body(self, request, name):
body = request.get('body')
if body is None:
raise HTTPError(400)
value = json.loads(bytes(body).decode('utf-8'))
return self._parse(request, value, name)
def _parse_maybe_body(self, request, name):
body = request.get('body')
if body is None:
value = {'type': 'simple', 'value': ''}
else:
value = json.loads(bytes(body).decode('utf-8'))
return self._parse(request, value, name)
def _parent_exists(self, default, trail):
# check that the containers exist
basename = self._db_container_key(trail[0], trail[:-1] + [''])
try:
keylist = self.root.store.list(basename)
except CSStoreError:
raise HTTPError(500)
self.logger.debug('parent_exists: %s (%s, %r) -> %r',
basename, default, trail, keylist)
if keylist is not None:
return True
# create default namespace if it is the only missing piece
if len(trail) == 2 and default == trail[0]:
container = self._db_container_key(default, '')
self.root.store.span(container)
return True
return False
def _format_reply(self, request, response, handler, output):
reply = handler.reply(output)
# special case to allow *very* simple clients
if handler.msg_type == 'simple':
binary = False
accept = request.get('headers', {}).get('Accept', None)
if accept is not None:
types = accept.split(',')
for t in types:
if t.strip() == 'application/json':
binary = False
elif t.strip() == 'application/octet-stream':
binary = True
if binary:
response['headers'][
'Content-Type'] = 'application/octet-stream'
response['output'] = b64decode(reply['value'])
return
if reply is not None:
response['headers'][
'Content-Type'] = 'application/json; charset=utf-8'
response['output'] = reply
def GET(self, request, response):
trail = request.get('trail', [])
if len(trail) == 0 or trail[-1] == '':
self._list(trail, request, response)
else:
self._get_key(trail, request, response)
def PUT(self, request, response):
trail = request.get('trail', [])
if len(trail) == 0 or trail[-1] == '':
raise HTTPError(405)
else:
self._set_key(trail, request, response)
def DELETE(self, request, response):
trail = request.get('trail', [])
if len(trail) == 0:
raise HTTPError(405)
if trail[-1] == '':
self._destroy(trail, request, response)
else:
self._del_key(trail, request, response)
def POST(self, request, response):
trail = request.get('trail', [])
if len(trail) > 0 and trail[-1] == '':
self._create(trail, request, response)
else:
raise HTTPError(405)
def _list(self, trail, request, response):
try:
name = '/'.join(trail)
msg = self._parse_query(request, name)
except Exception as e:
raise HTTPError(406, str(e))
default = request.get('default_namespace', None)
basename = self._db_container_key(default, trail)
try:
keylist = self.root.store.list(basename)
self.logger.debug('list %s returned %r', basename, keylist)
if keylist is None:
raise HTTPError(404)
response['headers'][
'Content-Type'] = 'application/json; charset=utf-8'
response['output'] = msg.reply(keylist)
except CSStoreDenied:
self.logger.exception(
"List: Permission to perform this operation was denied")
raise HTTPError(403)
except CSStoreError:
self.logger.exception('List: Internal server error')
raise HTTPError(500)
except CSStoreUnsupported:
self.logger.exception('List: Unsupported operation')
raise HTTPError(501)
def _create(self, trail, request, response):
try:
name = '/'.join(trail)
msg = self._parse_maybe_body(request, name)
except Exception as e:
raise HTTPError(406, str(e))
default = request.get('default_namespace', None)
basename = self._db_container_key(None, trail)
try:
if len(trail) > 2:
ok = self._parent_exists(default, trail[:-1])
if not ok:
raise HTTPError(404)
self.root.store.span(basename)
except CSStoreDenied:
self.logger.exception(
"Create: Permission to perform this operation was denied")
raise HTTPError(403)
except CSStoreExists:
self.logger.debug('Create: Key already exists')
response['code'] = 200
return
except CSStoreError:
self.logger.exception('Create: Internal server error')
raise HTTPError(500)
except CSStoreUnsupported:
self.logger.exception('Create: Unsupported operation')
raise HTTPError(501)
output = msg.reply(None)
if output is not None:
response['headers'][
'Content-Type'] = 'application/json; charset=utf-8'
response['output'] = output
response['code'] = 201
def _destroy(self, trail, request, response):
try:
name = '/'.join(trail)
msg = self._parse_maybe_body(request, name)
except Exception as e:
raise HTTPError(406, str(e))
basename = self._db_container_key(None, trail)
try:
keylist = self.root.store.list(basename)
if keylist is None:
raise HTTPError(404)
if len(keylist) != 0:
raise HTTPError(409)
ret = self.root.store.cut(basename.rstrip('/'))
except CSStoreDenied:
self.logger.exception(
"Delete: Permission to perform this operation was denied")
raise HTTPError(403)
except CSStoreError:
self.logger.exception('Delete: Internal server error')
raise HTTPError(500)
except CSStoreUnsupported:
self.logger.exception('Delete: Unsupported operation')
raise HTTPError(501)
if ret is False:
raise HTTPError(404)
output = msg.reply(None)
if output is None:
response['code'] = 204
else:
response['headers'][
'Content-Type'] = 'application/json; charset=utf-8'
response['output'] = output
response['code'] = 200
def _client_name(self, request):
if 'remote_user' in request:
return request['remote_user']
elif 'creds' in request:
creds = request['creds']
return '<pid={pid:d} uid={uid:d} gid={gid:d}>'.format(**creds)
else:
return 'Unknown'
def _audit(self, ok, fail, fn, trail, request, response):
action = fail
client = self._client_name(request)
key = '/'.join(trail)
try:
fn(trail, request, response)
action = ok
finally:
self.audit_key_access(action, client, key)
def _get_key(self, trail, request, response):
self._audit(log.AUDIT_GET_ALLOWED, log.AUDIT_GET_DENIED,
self._int_get_key, trail, request, response)
def _int_get_key(self, trail, request, response):
try:
name = '/'.join(trail)
handler = self._parse_query(request, name)
except Exception as e:
raise HTTPError(406, str(e))
key = self._db_key(trail)
try:
output = self.root.store.get(key)
if output is None:
raise HTTPError(404)
elif len(output) == 0:
raise HTTPError(406)
self._format_reply(request, response, handler, output)
except CSStoreDenied:
self.logger.exception(
"Get: Permission to perform this operation was denied")
raise HTTPError(403)
except CSStoreError:
self.logger.exception('Get: Internal server error')
raise HTTPError(500)
except CSStoreUnsupported:
self.logger.exception('Get: Unsupported operation')
raise HTTPError(501)
def _set_key(self, trail, request, response):
self._audit(log.AUDIT_SET_ALLOWED, log.AUDIT_SET_DENIED,
self._int_set_key, trail, request, response)
def _int_set_key(self, trail, request, response):
try:
name = '/'.join(trail)
content_type = request.get('headers', {}).get('Content-Type', '')
content_type_value = content_type.split(';')[0].strip()
if content_type_value == 'application/octet-stream':
msg = self._parse_bin_body(request, name)
elif content_type_value == 'application/json':
msg = self._parse_body(request, name)
else:
raise ValueError('Invalid Content-Type')
except UnknownMessageType as e:
raise HTTPError(406, str(e))
except UnallowedMessage as e:
raise HTTPError(406, str(e))
except Exception as e:
raise HTTPError(400, str(e))
# must _db_key first as access control is done here for now
# otherwise users would e able to probe containers in namespaces
# they do not have access to.
key = self._db_key(trail)
try:
default = request.get('default_namespace', None)
ok = self._parent_exists(default, trail)
if not ok:
raise HTTPError(404)
ok = self.root.store.set(key, msg.payload)
except CSStoreDenied:
self.logger.exception(
"Set: Permission to perform this operation was denied")
raise HTTPError(403)
except CSStoreExists:
self.logger.exception('Set: Key already exist')
raise HTTPError(409)
except CSStoreError:
self.logger.exception('Set: Internal Server Error')
raise HTTPError(500)
except CSStoreUnsupported:
self.logger.exception('Set: Unsupported operation')
raise HTTPError(501)
output = msg.reply(None)
if output is not None:
response['headers'][
'Content-Type'] = 'application/json; charset=utf-8'
response['output'] = output
response['code'] = 201
def _del_key(self, trail, request, response):
self._audit(log.AUDIT_DEL_ALLOWED, log.AUDIT_DEL_DENIED,
self._int_del_key, trail, request, response)
def _int_del_key(self, trail, request, response):
try:
name = '/'.join(trail)
msg = self._parse_maybe_body(request, name)
except Exception as e:
raise HTTPError(406, str(e))
key = self._db_key(trail)
try:
ret = self.root.store.cut(key)
except CSStoreDenied:
self.logger.exception(
"Delete: Permission to perform this operation was denied")
raise HTTPError(403)
except CSStoreError:
self.logger.exception('Delete: Internal Server Error')
raise HTTPError(500)
except CSStoreUnsupported:
self.logger.exception('Delete: Unsupported operation')
raise HTTPError(501)
if ret is False:
raise HTTPError(404)
output = msg.reply(None)
if output is None:
response['code'] = 204
else:
response['headers'][
'Content-Type'] = 'application/json; charset=utf-8'
response['output'] = output
response['code'] = 200
| 14,709
|
Python
|
.py
| 354
| 30.338983
| 79
| 0.573655
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,708
|
log.py
|
freeipa_freeipa/ipaserver/custodia/log.py
|
# Copyright (C) 2015 Custodia Project Contributors - see LICENSE file
from __future__ import absolute_import
import logging
import sys
import warnings
import six
LOGGING_FORMAT = "%(asctime)s - %(origin)-32s - %(message)s"
LOGGING_DATEFORMAT = "%Y-%m-%d %H:%M:%S"
class OriginContextFilter(logging.Filter):
"""Context filter to include 'origin' attribute in record
"""
def filter(self, record):
if not hasattr(record, 'origin'):
record.origin = record.name.split('.')[-1]
return True
class CustodiaFormatter(logging.Formatter):
def format(self, record):
# customize record.exc_text, Formatter.format() does not modify
# exc_text when it has been set before.
short_exc = False
if record.exc_info and not record.exc_text:
if getattr(record, "exc_fullstack", True):
record.exc_text = self.formatException(record.exc_info)
else:
short_exc = True
record.exc_text = u"{0.__name__}: {1}".format(
record.exc_info[0], record.exc_info[1]
)
result = super(CustodiaFormatter, self).format(record)
if short_exc:
# format() adds \n between message and exc_text
text, exc = result.rsplit(u'\n', 1)
return u"{0} ({1})".format(text, exc)
else:
return result
class CustodiaLoggingAdapter(logging.LoggerAdapter):
def __init__(self, plugin, debug):
logger = logging.getLogger(
'{0.__class__.__module__}.{0.__class__.__name__}'.format(plugin)
)
logger.setLevel(logging.DEBUG if debug else logging.INFO)
extra = {'origin': plugin.origin}
super(CustodiaLoggingAdapter, self).__init__(logger, extra=extra)
def exception(self, msg, *args, **kwargs):
"""Like standard exception() logger but only print stack in debug mode
"""
extra = kwargs.setdefault('extra', {})
extra['exc_fullstack'] = self.isEnabledFor(logging.DEBUG)
kwargs['exc_info'] = True
self.log(logging.ERROR, msg, *args, **kwargs)
def getLogger(name):
"""Create logger with custom exception() method
"""
def exception(self, msg, *args, **kwargs):
extra = kwargs.setdefault('extra', {})
extra['exc_fullstack'] = self.isEnabledFor(logging.DEBUG)
kwargs['exc_info'] = True
self.log(logging.ERROR, msg, *args, **kwargs)
logger = logging.getLogger(name)
logger.exception = six.create_bound_method(exception, logger)
return logger
def setup_logging(debug=False, auditfile=None, handler=None):
root_logger = logging.getLogger()
# default is stream handler to stderr
if handler is None:
handler = logging.StreamHandler(sys.stderr)
# remove handler instance from root handler to prevent multiple
# output handlers.
handler_cls = type(handler)
root_logger.handlers[:] = list(
h for h in root_logger.handlers if not isinstance(h, handler_cls)
)
# configure handler
handler.setFormatter(CustodiaFormatter(
fmt=LOGGING_FORMAT, datefmt=LOGGING_DATEFORMAT
))
handler.addFilter(OriginContextFilter())
root_logger.addHandler(handler)
# set logging level
custodia_logger = getLogger('custodia')
if debug:
custodia_logger.setLevel(logging.DEBUG)
custodia_logger.debug('Custodia debug logger enabled')
# If the global debug is enabled, turn debug on in all 'custodia.*'
# loggers
logdict = logging.Logger.manager.loggerDict
for name, obj in logdict.items():
if not isinstance(obj, logging.Logger):
continue
if name.startswith('custodia.'):
obj.setLevel(logging.DEBUG)
else:
custodia_logger.setLevel(logging.INFO)
# setup file handler for audit log
audit_logger = logging.getLogger('custodia.audit')
if auditfile is not None and len(audit_logger.handlers) == 0:
audit_fmt = logging.Formatter(LOGGING_FORMAT, LOGGING_DATEFORMAT)
audit_hdrl = logging.FileHandler(auditfile)
audit_hdrl.setFormatter(audit_fmt)
audit_logger.addHandler(audit_hdrl)
custodia_logger.debug('Custodia audit log: %s', auditfile)
AUDIT_NONE = 0
AUDIT_GET_ALLOWED = 1
AUDIT_GET_DENIED = 2
AUDIT_SET_ALLOWED = 3
AUDIT_SET_DENIED = 4
AUDIT_DEL_ALLOWED = 5
AUDIT_DEL_DENIED = 6
AUDIT_LAST = 7
AUDIT_SVC_NONE = 8
AUDIT_SVC_AUTH_PASS = 9
AUDIT_SVC_AUTH_FAIL = 10
AUDIT_SVC_AUTHZ_PASS = 11
AUDIT_SVC_AUTHZ_FAIL = 12
AUDIT_SVC_LAST = 13
AUDIT_MESSAGES = [
"AUDIT FAILURE",
"ALLOWED: '%(client)s' requested key '%(key)s'", # AUDIT_GET_ALLOWED
"DENIED: '%(client)s' requested key '%(key)s'", # AUDIT_GET_DENIED
"ALLOWED: '%(client)s' stored key '%(key)s'", # AUDIT_SET_ALLOWED
"DENIED: '%(client)s' stored key '%(key)s'", # AUDIT_SET_DENIED
"ALLOWED: '%(client)s' deleted key '%(key)s'", # AUDIT_DEL_ALLOWED
"DENIED: '%(client)s' deleted key '%(key)s'", # AUDIT_DEL_DENIED
"AUDIT FAILURE 7",
"AUDIT FAILURE 8",
"PASS: '%(cli)s' authenticated as '%(name)s'", # SVC_AUTH_PASS
"FAIL: '%(cli)s' authenticated as '%(name)s'", # SVC_AUTH_FAIL
"PASS: '%(cli)s' authorized for '%(name)s'", # SVC_AUTHZ_PASS
"FAIL: '%(cli)s' authorized for '%(name)s'", # SVC_AUTHZ_FAIL
"AUDIT FAILURE 13",
]
class AuditLog:
def __init__(self, logger):
self.logger = logger
def key_access(self, origin, action, client, keyname):
if action <= AUDIT_NONE or action >= AUDIT_LAST:
action = AUDIT_NONE
msg = AUDIT_MESSAGES[action]
args = {'client': client, 'key': keyname}
self.logger.info(msg, args, extra={'origin': origin})
def svc_access(self, origin, action, client, name):
if action <= AUDIT_SVC_NONE or action >= AUDIT_SVC_LAST:
action = AUDIT_NONE
msg = AUDIT_MESSAGES[action]
args = {'cli': client, 'name': name}
self.logger.info(msg, args, extra={'origin': origin})
auditlog = AuditLog(logging.getLogger('custodia.audit'))
class ProvisionalWarning(FutureWarning):
pass
def warn_provisional(modulename, stacklevel=3):
msg = ("Module '{}' is a provisional API. It may changed or get "
"removed in future releases.")
return warnings.warn(msg.format(modulename), ProvisionalWarning,
stacklevel=stacklevel)
| 6,513
|
Python
|
.py
| 154
| 35.292208
| 78
| 0.643682
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,709
|
authorizers.py
|
freeipa_freeipa/ipaserver/custodia/httpd/authorizers.py
|
# Copyright (C) 2015 Custodia Project Contributors - see LICENSE file
from __future__ import absolute_import
import os
from ipaserver.custodia import log
from ipaserver.custodia.plugin import HTTPAuthorizer
class SimplePathAuthz(HTTPAuthorizer):
# keep SimplePathAuthz an old-style plugin for now.
# KEMKeysStore and IPAKEMKeys haven't been ported.
def __init__(self, config):
super(SimplePathAuthz, self).__init__(config)
self.paths = []
if 'paths' in self.config:
self.paths = self.config['paths'].split()
def handle(self, request):
reqpath = path = request.get('path', '')
# if an authorized path does not end in /
# check if it matches fullpath for strict match
for authz in self.paths:
if authz.endswith('/'):
continue
if authz.endswith('.'):
# special case to match a path ending in /
authz = authz[:-1]
if authz == path:
self.audit_svc_access(log.AUDIT_SVC_AUTHZ_PASS,
request['client_id'], path)
return True
while path != '':
if path in self.paths:
self.audit_svc_access(log.AUDIT_SVC_AUTHZ_PASS,
request['client_id'], path)
return True
if path == '/':
path = ''
else:
path, _head = os.path.split(path)
self.logger.debug('No path in %s matched %s', self.paths, reqpath)
return None
| 1,600
|
Python
|
.py
| 38
| 30.342105
| 74
| 0.561494
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,710
|
authenticators.py
|
freeipa_freeipa/ipaserver/custodia/httpd/authenticators.py
|
# Copyright (C) 2015 Custodia Project Contributors - see LICENSE file
from __future__ import absolute_import
from ipaserver.custodia import log
from ipaserver.custodia.plugin import HTTPAuthenticator, PluginOption
class SimpleCredsAuth(HTTPAuthenticator):
uid = PluginOption('pwd_uid', -1, "User id or name, -1 ignores user")
gid = PluginOption('grp_gid', -1, "Group id or name, -1 ignores group")
def handle(self, request):
creds = request.get('creds')
if creds is None:
self.logger.debug('SCA: Missing "creds" from request')
return False
uid = int(creds['uid'])
gid = int(creds['gid'])
uid_match = self.uid != -1 and self.uid == uid
gid_match = self.gid != -1 and self.gid == gid
if uid_match or gid_match:
self.audit_svc_access(log.AUDIT_SVC_AUTH_PASS,
request['client_id'],
"%d, %d" % (uid, gid))
return True
else:
self.audit_svc_access(log.AUDIT_SVC_AUTH_FAIL,
request['client_id'],
"%d, %d" % (uid, gid))
return False
class SimpleHeaderAuth(HTTPAuthenticator):
header = PluginOption(str, 'REMOTE_USER', "header name")
value = PluginOption('str_set', None,
"Comma-separated list of required values")
def handle(self, request):
if self.header not in request['headers']:
self.logger.debug('SHA: No "headers" in request')
return None
value = request['headers'][self.header]
if self.value is not None:
if value not in self.value:
self.audit_svc_access(log.AUDIT_SVC_AUTH_FAIL,
request['client_id'], value)
return False
self.audit_svc_access(log.AUDIT_SVC_AUTH_PASS,
request['client_id'], value)
request['remote_user'] = value
return True
| 2,053
|
Python
|
.py
| 44
| 34
| 75
| 0.563718
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,711
|
server.py
|
freeipa_freeipa/ipaserver/custodia/httpd/server.py
|
# Copyright (C) 2015 Custodia Project Contributors - see LICENSE file
from __future__ import absolute_import
import atexit
import errno
import os
import shutil
import socket
import struct
import sys
import warnings
from http.server import BaseHTTPRequestHandler
from socketserver import ForkingTCPServer, BaseServer
from urllib.parse import parse_qs, unquote, urlparse
import six
from ipaserver.custodia import log
from ipaserver.custodia.plugin import HTTPError
try:
from systemd import daemon as sd
except ImportError:
sd = None
if 'NOTIFY_SOCKET' in os.environ:
warnings.warn(
"NOTIFY_SOCKET env var is set but python-systemd bindings are "
"not available!",
category=RuntimeWarning
)
if 'LISTEN_FDS' in os.environ:
warnings.warn(
"LISTEN_FDS env var is set, but python-systemd bindings are"
"not available!",
category=RuntimeWarning
)
logger = log.getLogger(__name__)
SO_PEERCRED = getattr(socket, 'SO_PEERCRED', 17)
SO_PEERSEC = getattr(socket, 'SO_PEERSEC', 31)
SELINUX_CONTEXT_LEN = 256
MAX_REQUEST_SIZE = 10 * 1024 * 1024 # For now limit body to 10MiB
class ForkingHTTPServer(ForkingTCPServer):
"""
A forking HTTP Server.
Each request runs into a forked server so that the whole environment
is clean and isolated, and parallel requests cannot unintentionally
influence one another.
When a request is received it is parsed by the handler_class provided
at server initialization.
"""
server_string = "Custodia/0.1"
allow_reuse_address = True
socket_file = None
def __init__(self, server_address, handler_class, config,
bind_and_activate=True):
# pylint: disable=non-parent-init-called
# Init just BaseServer, TCPServer creates a socket.
BaseServer.__init__(self, server_address, handler_class)
if isinstance(server_address, socket.socket):
# It's a bound and activates socket from systemd.
self.socket = server_address
bind_and_activate = False
else:
self.socket = socket.socket(self.address_family,
self.socket_type)
# copied from TCPServer
if bind_and_activate:
try:
self.server_bind()
self.server_activate()
except BaseException:
self.server_close()
raise
if self.socket.family == socket.AF_UNIX:
self.socket_file = self.socket.getsockname()
if 'consumers' not in config:
raise ValueError('Configuration does not provide any consumer')
self.config = config
if 'server_string' in self.config:
self.server_string = self.config['server_string']
self.auditlog = log.auditlog
class ForkingUnixHTTPServer(ForkingHTTPServer):
address_family = socket.AF_UNIX
def server_bind(self):
self.unlink()
# Remove on exit
atexit.register(self.unlink)
basedir = os.path.dirname(self.server_address)
if not os.path.isdir(basedir):
os.makedirs(basedir, mode=0o755)
ForkingHTTPServer.server_bind(self)
os.chmod(self.server_address, 0o666)
def unlink(self):
try:
os.unlink(self.server_address)
except OSError:
pass
class HTTPRequestHandler(BaseHTTPRequestHandler):
"""
This request handler is a slight modification of BaseHTTPRequestHandler
where the per-request handler is replaced.
When a request comes in it is parsed and the 'request' dictionary is
populated accordingly. Additionally a 'creds' structure is added to the
request.
The 'creds' structure contains the data retrieved via a call to
getsockopt with the SO_PEERCRED option. This retrieves via kernel assist
the uid,gid and pid of the process on the other side of the unix socket
on which the request has been made. This can be used for authentication
and/or authorization purposes.
The 'creds' structure is further augmented with a 'context' option
containing the Selinux Context string for the calling process, if
available.
after the request is parsed the server's pipeline() function is invoked
in order to handle it. The pipeline() should return a response object,
where te return 'code', the 'output' and 'headers' may be found.
If no 'code' is present the request is assumed to be successful and a
'200 OK' status code will be sent back to the client.
The 'output' parameter can be a string or a file like object.
The 'headers' objct must be a dictionary where keys are headers names.
By default we assume HTTP1.0
"""
protocol_version = "HTTP/1.0"
def __init__(self, request, client_address, server):
self.requestline = ''
self.request_version = ''
self.command = ''
self.raw_requestline = None
self.close_connection = 0
self.path = None # quoted, raw path
self.path_chain = None # tuple of unquoted segments
self.query = None
self.url = None
self.body = None
self.loginuid = None
self._creds = False
BaseHTTPRequestHandler.__init__(self, request, client_address, server)
def version_string(self):
return self.server.server_string
def _get_loginuid(self, pid):
loginuid = None
# NOTE: Using proc to find the login uid is not reliable
# this is why login uid is fetched separately and not stored
# into 'creds', to avoid giving the false impression it can be
# used to perform access control decisions
try:
with open("/proc/%i/loginuid" % pid, "r") as f:
loginuid = int(f.read())
except IOError as e:
if e.errno != errno.ENOENT:
raise
if loginuid == -1:
loginuid = None
return loginuid
@property
def peer_creds(self):
if self._creds is not False:
return self._creds
# works only for unix sockets
if self.request.family != socket.AF_UNIX:
self._creds = None
return self._creds
# pid_t: signed int32, uid_t/gid_t: unsigned int32
fmt = 'iII'
creds = self.request.getsockopt(socket.SOL_SOCKET, SO_PEERCRED,
struct.calcsize(fmt))
pid, uid, gid = struct.unpack(fmt, creds)
try:
creds = self.request.getsockopt(socket.SOL_SOCKET, SO_PEERSEC,
SELINUX_CONTEXT_LEN)
context = creds.rstrip(b'\x00').decode('utf-8')
except Exception: # pylint: disable=broad-except
logger.debug("Couldn't retrieve SELinux Context", exc_info=True)
context = None
self._creds = {'pid': pid, 'uid': uid, 'gid': gid, 'context': context}
return self._creds
@property
def peer_info(self):
if self.peer_creds is not None:
return self._creds['pid']
elif self.request.family in {socket.AF_INET, socket.AF_INET6}:
return self.request.getpeername()
return None
@property
def peer_cert(self):
if not hasattr(self.request, 'getpeercert'):
return None
return self.request.getpeercert()
def parse_request(self):
if not BaseHTTPRequestHandler.parse_request(self):
return False
# grab the loginuid from `/proc` as soon as possible
creds = self.peer_creds
if creds is not None:
self.loginuid = self._get_loginuid(creds['pid'])
# after basic parsing also use urlparse to retrieve individual
# elements of a request.
url = urlparse(self.path)
# Yes, override path with the path part only
self.path = url.path
self.path_chain = self._parse_path(url)
# Create dict out of query
self.query = parse_qs(url.query)
# keep the rest into the 'url' element in case someone needs it
self.url = url
return True
def _parse_path(self, url):
path_chain = []
for segment in url.path.split('/'):
# unquote URL path encoding
segment = unquote(segment)
path_chain.append(segment)
return tuple(path_chain)
def parse_body(self):
length = int(self.headers.get('content-length', 0))
if length > MAX_REQUEST_SIZE:
raise HTTPError(413)
if length == 0:
self.body = None
else:
self.body = self.rfile.read(length)
def handle_one_request(self):
if self.request.family == socket.AF_UNIX:
# Set a fake client address to make log functions happy
self.client_address = ['127.0.0.1', 0]
try:
if not self.server.config:
self.close_connection = 1
return
self.raw_requestline = self.rfile.readline(65537)
if not self.raw_requestline:
self.close_connection = 1
return
if len(self.raw_requestline) > 65536:
self.requestline = ''
self.request_version = ''
self.command = ''
self.send_error(414)
self.wfile.flush()
return
if not self.parse_request():
self.close_connection = 1
return
try:
self.parse_body()
except HTTPError as e:
self.send_error(e.code, e.mesg)
self.wfile.flush()
return
request = {'creds': self.peer_creds,
'client_cert': self.peer_cert,
'client_id': self.peer_info,
'command': self.command,
'path': self.path,
'path_chain': self.path_chain,
'query': self.query,
'url': self.url,
'version': self.request_version,
'headers': self.headers,
'body': self.body}
logger.debug(
"REQUEST: %s %s, query: %r, cred: %r, client_id: %s, "
"headers: %r, body: %r",
request['command'], request['path_chain'], request['query'],
request['creds'], request['client_id'],
dict(request['headers']), request['body']
)
try:
response = self.pipeline(self.server.config, request)
if response is None:
raise HTTPError(500)
except HTTPError as e:
self.send_error(e.code, e.mesg)
self.wfile.flush()
return
except socket.timeout as e:
self.log_error("Request timed out: %r", e)
self.close_connection = 1
return
except Exception as e: # pylint: disable=broad-except
self.log_error("Handler failed: %r", e, exc_info=True)
self.send_error(500)
self.wfile.flush()
return
self.send_response(response.get('code', 200))
for header, value in six.iteritems(response.get('headers', {})):
self.send_header(header, value)
self.end_headers()
output = response.get('output', None)
if hasattr(output, 'read'):
shutil.copyfileobj(output, self.wfile)
output.close()
elif output is not None:
self.wfile.write(output)
else:
self.close_connection = 1
self.wfile.flush()
return
except socket.timeout as e:
self.log_error("Request timed out: %r", e)
self.close_connection = 1
return
# pylint: disable=arguments-differ
def log_error(self, fmtstr, *args, **kwargs):
logger.error(fmtstr, *args, **kwargs)
def pipeline(self, config, request):
"""
The pipeline() function handles authentication and invocation of
the correct consumer based on the server configuration, that is
provided at initialization time.
When authentication is performed all the authenticators are
executed. If any returns False, authentication fails and a 403
error is raised. If none of them positively succeeds and they all
return None then also authentication fails and a 403 error is
raised. Authentication plugins can add attributes to the request
object for use of authorization or other plugins.
When authorization is performed and positive result will cause the
operation to be accepted and any negative result will cause it to
fail. If no authorization plugin returns a positive result a 403
error is returned.
Once authentication and authorization are successful the pipeline
will parse the path component and find the consumer plugin that
handles the provided path walking up the path component by
component until a consumer is found.
Paths are walked up from the leaf to the root, so if two consumers
hang on the same tree, the one closer to the leaf will be used. If
there is a trailing path when the conumer is selected then it will
be stored in the request dicstionary named 'trail'. The 'trail' is
an ordered list of the path components below the consumer entry
point.
"""
path_chain = request['path_chain']
if not path_chain or path_chain[0] != '':
# no path or not an absolute path
raise HTTPError(400)
# auth framework here
authers = config.get('authenticators')
if authers is None:
raise HTTPError(403)
valid_once = False
for auth in authers:
valid = authers[auth].handle(request)
if valid is False:
raise HTTPError(403)
elif valid is True:
valid_once = True
if valid_once is not True:
self.server.auditlog.svc_access(self.__class__.__name__,
log.AUDIT_SVC_AUTH_FAIL,
request['client_id'], 'No auth')
raise HTTPError(403)
# auhz framework here
authzers = config.get('authorizers')
if authzers is None:
raise HTTPError(403)
authz_ok = None
for authz in authzers:
valid = authzers[authz].handle(request)
if valid is True:
authz_ok = True
elif valid is False:
authz_ok = False
break
if authz_ok is not True:
self.server.auditlog.svc_access(self.__class__.__name__,
log.AUDIT_SVC_AUTHZ_FAIL,
request['client_id'],
path_chain)
raise HTTPError(403)
# Select consumer
trail = []
while path_chain:
if path_chain in config['consumers']:
con = config['consumers'][path_chain]
if len(trail) != 0:
request['trail'] = trail
return con.handle(request)
trail.insert(0, path_chain[-1])
path_chain = path_chain[:-1]
raise HTTPError(404)
class HTTPServer:
handler = HTTPRequestHandler
def __init__(self, srvurl, config):
url = urlparse(srvurl)
serverclass, address = self._get_serverclass(url)
if sd is not None:
address = self._get_systemd_socket(address)
self.httpd = serverclass(address, self.handler, config)
def _get_serverclass(self, url):
if url.scheme == 'http+unix':
# Unix socket
address = unquote(url.netloc)
if not address:
raise ValueError('Empty address {}'.format(url))
logger.info('Serving on Unix socket %s', address)
serverclass = ForkingUnixHTTPServer
elif url.scheme == 'http':
host, port = url.netloc.split(":")
address = (host, int(port))
logger.info('Serving on %s (HTTP)', url.netloc)
serverclass = ForkingHTTPServer
else:
raise ValueError('Unknown URL Scheme: %s' % url.scheme)
return serverclass, address
def _get_systemd_socket(self, address):
fds = sd.listen_fds()
if not fds:
return address
elif len(fds) > 1:
raise ValueError('Too many listening sockets', fds)
if isinstance(address, tuple):
port = address[1]
# systemd uses IPv6
if not sd.is_socket_inet(fds[0], family=socket.AF_INET6,
type=socket.SOCK_STREAM,
listening=True, port=port):
raise ValueError(
"FD {} is not TCP IPv6 socket on port {}".format(
fds[0], port
)
)
logger.info('Using systemd socket activation on port %i', port)
sock = socket.fromfd(fds[0], socket.AF_INET6, socket.SOCK_STREAM)
else:
if not sd.is_socket_unix(fds[0], socket.SOCK_STREAM,
listening=True, path=address):
raise ValueError(
"FD {} is not Unix stream socket on path {}".format(
fds[0], address
)
)
logger.info('Using systemd socket activation on path %s', address)
sock = socket.fromfd(fds[0], socket.AF_UNIX, socket.SOCK_STREAM)
if sys.version_info[0] < 3:
# Python 2.7's socket.fromfd() returns _socket.socket
sock = socket.socket(_sock=sock)
return sock
def get_socket(self):
return (self.httpd.socket, self.httpd.socket_file)
def serve(self):
if sd is not None and sd.booted():
sd.notify("READY=1")
return self.httpd.serve_forever()
| 18,530
|
Python
|
.py
| 438
| 30.769406
| 78
| 0.584864
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,712
|
kem.py
|
freeipa_freeipa/ipaserver/custodia/message/kem.py
|
# Copyright (C) 2015 Custodia Project Contributors - see LICENSE file
from __future__ import absolute_import
import os
import time
from jwcrypto.common import json_decode
from jwcrypto.common import json_encode
from jwcrypto.jwe import JWE
from jwcrypto.jwk import JWK
from jwcrypto.jws import JWS
from jwcrypto.jwt import JWT
from ipaserver.custodia.httpd.authorizers import SimplePathAuthz
from ipaserver.custodia.log import getLogger
from ipaserver.custodia.message.common import InvalidMessage
from ipaserver.custodia.message.common import MessageHandler
logger = getLogger(__name__)
KEY_USAGE_SIG = 0
KEY_USAGE_ENC = 1
KEY_USAGE_MAP = {KEY_USAGE_SIG: 'sig', KEY_USAGE_ENC: 'enc'}
class UnknownPublicKey(Exception):
def __init__(self, message=None):
logger.debug(message)
super(UnknownPublicKey, self).__init__(message)
class KEMKeysStore(SimplePathAuthz):
"""A KEM Keys Store.
This is a store that holds public keys of registered
clients allowed to use KEM messages. It takes the form
of an authorizer merely for the purpose of attaching
itself to a 'request' so that later on the KEM Parser
can fetch the appropriate key to verify/decrypt an
incoming request and make the payload available.
The KEM Parser will actually perform additional
authorization checks in this case.
SimplePathAuthz is extended here as we ant to attach the
store only to requests on paths we are configured to
manage.
"""
def __init__(self, config):
super(KEMKeysStore, self).__init__(config)
self._server_keys = None
self._alg = None
self._enc = None
def _db_key(self, kid):
return os.path.join('kemkeys', kid)
def handle(self, request):
inpath = super(KEMKeysStore, self).handle(request)
if inpath:
request['KEMKeysStore'] = self
return inpath
def find_key(self, kid, usage):
dbkey = self._db_key('%s/%s' % (KEY_USAGE_MAP[usage], kid))
pubkey = self.store.get(dbkey)
if pubkey is None:
raise UnknownPublicKey(kid)
return pubkey
@property
def server_keys(self):
if self._server_keys is None:
if 'server_keys' not in self.config:
raise UnknownPublicKey("Server Keys not defined")
skey = self.find_key(self.config['server_keys'], KEY_USAGE_SIG)
ekey = self.find_key(self.config['server_keys'], KEY_USAGE_ENC)
self._server_keys = [JWK(**(json_decode(skey))),
JWK(**(json_decode(ekey)))]
return self._server_keys
@property
def alg(self):
if self._alg is None:
alg = self.config.get('signing_algorithm', None)
if alg is None:
ktype = self.server_keys[KEY_USAGE_SIG]['kty']
if ktype == 'RSA':
alg = 'RS256'
elif ktype == 'EC':
alg = 'ES256'
else:
raise ValueError('Key type unsupported for signing')
self._alg = alg
return self._alg
def check_kem_claims(claims, name):
if 'sub' not in claims:
raise InvalidMessage('Missing subject in payload')
if claims['sub'] != name:
raise InvalidMessage('Key name %s does not match subject %s' % (
name, claims['sub']))
if 'exp' not in claims:
raise InvalidMessage('Missing expiration time in payload')
if claims['exp'] - (10 * 60) > int(time.time()):
raise InvalidMessage('Message expiration too far in the future')
if claims['exp'] < int(time.time()):
raise InvalidMessage('Message Expired')
class KEMHandler(MessageHandler):
"""Handles 'kem' messages"""
def __init__(self, request):
super(KEMHandler, self).__init__(request)
self.kkstore = self.req.get('KEMKeysStore', None)
if self.kkstore is None:
raise Exception('KEM KeyStore not configured')
self.client_keys = None
self.name = None
def _get_key(self, header, usage):
if 'kid' not in header:
raise InvalidMessage("Missing key identifier")
key = self.kkstore.find_key(header.get('kid'), usage)
if key is None:
raise UnknownPublicKey('Key found [kid:%s]' % header.get('kid'))
return json_decode(key)
def parse(self, msg, name):
"""Parses the message.
We check that the message is properly formatted.
:param msg: a json-encoded value containing a JWS or JWE+JWS token
:raises InvalidMessage: if the message cannot be parsed or validated
:returns: A verified payload
"""
try:
jtok = JWT(jwt=msg)
except Exception as e:
raise InvalidMessage('Failed to parse message: %s' % str(e))
try:
token = jtok.token
if isinstance(token, JWE):
token.decrypt(self.kkstore.server_keys[KEY_USAGE_ENC])
# If an encrypted payload is received then there must be
# a nested signed payload to verify the provenance.
payload = token.payload.decode('utf-8')
token = JWS()
token.deserialize(payload)
elif isinstance(token, JWS):
pass
else:
raise TypeError("Invalid Token type: %s" % type(jtok))
# Retrieve client keys for later use
self.client_keys = [
JWK(**self._get_key(token.jose_header, KEY_USAGE_SIG)),
JWK(**self._get_key(token.jose_header, KEY_USAGE_ENC))]
# verify token and get payload
token.verify(self.client_keys[KEY_USAGE_SIG])
claims = json_decode(token.payload)
except Exception as e:
logger.debug('Failed to validate message', exc_info=True)
raise InvalidMessage('Failed to validate message: %s' % str(e))
check_kem_claims(claims, name)
self.name = name
self.payload = claims.get('value')
self.msg_type = 'kem'
return {'type': self.msg_type,
'value': {'kid': self.client_keys[KEY_USAGE_ENC].get('kid'),
'claims': claims}}
def reply(self, output):
if self.client_keys is None:
raise UnknownPublicKey("Peer key not defined")
ktype = self.client_keys[KEY_USAGE_ENC]['kty']
if ktype == 'RSA':
enc = ('RSA-OAEP', 'A256CBC-HS512')
else:
raise ValueError("'%s' type not supported yet" % ktype)
value = make_enc_kem(self.name, output,
self.kkstore.server_keys[KEY_USAGE_SIG],
self.kkstore.alg,
self.client_keys[1], enc)
return {'type': 'kem', 'value': value}
class KEMClient:
def __init__(self, server_keys, client_keys):
self.server_keys = server_keys
self.client_keys = client_keys
def make_request(self, name, value=None, alg="RS256", encalg=None):
if encalg is None:
return make_sig_kem(name, value,
self.client_keys[KEY_USAGE_SIG], alg)
else:
return make_enc_kem(name, value,
self.client_keys[KEY_USAGE_SIG], alg,
self.server_keys[KEY_USAGE_ENC], encalg)
def parse_reply(self, name, message):
claims = decode_enc_kem(message,
self.client_keys[KEY_USAGE_ENC],
self.server_keys[KEY_USAGE_SIG])
check_kem_claims(claims, name)
return claims['value']
def make_sig_kem(name, value, key, alg):
header = {'kid': key.get('kid'), 'alg': alg}
claims = {'sub': name, 'exp': int(time.time() + (5 * 60))}
if value is not None:
claims['value'] = value
jwt = JWT(header, claims)
jwt.make_signed_token(key)
return jwt.serialize(compact=True)
def make_enc_kem(name, value, sig_key, alg, enc_key, enc):
plaintext = make_sig_kem(name, value, sig_key, alg)
eprot = {'kid': enc_key.get('kid'), 'alg': enc[0], 'enc': enc[1]}
jwe = JWE(plaintext, json_encode(eprot))
jwe.add_recipient(enc_key)
return jwe.serialize(compact=True)
def decode_enc_kem(message, enc_key, sig_key):
jwe = JWT(jwt=message, key=enc_key)
jws = JWT(jwt=jwe.claims, key=sig_key)
return json_decode(jws.claims)
| 8,544
|
Python
|
.py
| 196
| 33.770408
| 76
| 0.606002
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,713
|
formats.py
|
freeipa_freeipa/ipaserver/custodia/message/formats.py
|
# Copyright (C) 2015 Custodia Project Contributors - see LICENSE file
from __future__ import absolute_import
from ipaserver.custodia.message.common import InvalidMessage
from ipaserver.custodia.message.common import UnallowedMessage
from ipaserver.custodia.message.common import UnknownMessageType
from ipaserver.custodia.message.kem import KEMHandler
from ipaserver.custodia.message.simple import SimpleKey
default_types = ['simple', 'kem']
key_types = {'simple': SimpleKey,
'kem': KEMHandler}
class Validator:
"""Validates incoming messages."""
def __init__(self, allowed=None):
"""Creates a Validator object.
:param allowed: list of allowed message types (optional)
"""
self.allowed = allowed or default_types
self.types = key_types.copy()
def add_types(self, types):
self.types.update(types)
def parse(self, request, msg, name):
if not isinstance(msg, dict):
raise InvalidMessage('The message must be a dict')
if 'type' not in msg:
raise InvalidMessage('The type is missing')
if isinstance(msg['type'], list):
if len(msg['type']) != 1:
raise InvalidMessage('Type is multivalued: %s' % msg['type'])
msg_type = msg['type'][0]
else:
msg_type = msg['type']
if 'value' not in msg:
raise InvalidMessage('The value is missing')
if isinstance(msg['value'], list):
if len(msg['value']) != 1:
raise InvalidMessage('Value is multivalued: %s' % msg['value'])
msg_value = msg['value'][0]
else:
msg_value = msg['value']
if msg_type not in self.types:
raise UnknownMessageType("Type '%s' is unknown" % msg_type)
if msg_type not in self.allowed:
raise UnallowedMessage("Message type '%s' not allowed" % (
msg_type,))
handler = self.types[msg_type](request)
handler.parse(msg_value, name)
return handler
| 2,081
|
Python
|
.py
| 47
| 35
| 79
| 0.624194
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,714
|
simple.py
|
freeipa_freeipa/ipaserver/custodia/message/simple.py
|
# Copyright (C) 2015 Custodia Project Contributors - see LICENSE file
from __future__ import absolute_import
from six import string_types
from ipaserver.custodia.message.common import InvalidMessage
from ipaserver.custodia.message.common import MessageHandler
class SimpleKey(MessageHandler):
"""Handles 'simple' messages"""
def parse(self, msg, name):
"""Parses a simple message
:param msg: the json-decoded value
:param name: the requested name
:raises UnknownMessageType: if the type is not 'simple'
:raises InvalidMessage: if the message cannot be parsed or validated
"""
# On requests we imply 'simple' if there is no input message
if msg is None:
return
if not isinstance(msg, string_types):
raise InvalidMessage("The 'value' attribute is not a string")
self.name = name
self.payload = msg
self.msg_type = 'simple'
def reply(self, output):
if output is None:
return None
if self.name.endswith('/'):
# directory listings are pass-through with simple messages
return output
return {'type': self.msg_type, 'value': output}
| 1,232
|
Python
|
.py
| 29
| 34.413793
| 76
| 0.667227
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,715
|
common.py
|
freeipa_freeipa/ipaserver/custodia/message/common.py
|
# Copyright (C) 2015 Custodia Project Contributors - see LICENSE file
from __future__ import absolute_import
from ipaserver.custodia.log import getLogger
logger = getLogger(__name__)
class InvalidMessage(Exception):
"""Invalid Message.
This exception is raised when a message cannot be parsed
or validated.
"""
def __init__(self, message=None):
logger.debug(message)
super(InvalidMessage, self).__init__(message)
class UnknownMessageType(Exception):
"""Unknown Message Type.
This exception is raised when a message is of an unknown
type.
"""
def __init__(self, message=None):
logger.debug(message)
super(UnknownMessageType, self).__init__(message)
class UnallowedMessage(Exception):
"""Unallowed Message.
This exception is raise when the message type is know but
is not allowed.
"""
def __init__(self, message=None):
logger.debug(message)
super(UnallowedMessage, self).__init__(message)
class MessageHandler:
def __init__(self, request):
self.req = request
self.name = None
self.payload = None
self.msg_type = None
def parse(self, msg, name):
"""Parses the message.
:param req: the original request
:param msg: a decoded json string with the incoming message
:raises InvalidMessage: if the message cannot be parsed or validated
"""
raise NotImplementedError
def reply(self, output):
"""Generates a reply.
:param req: the original request
:param output: a Python object that can be converted to JSON
"""
raise NotImplementedError
| 1,691
|
Python
|
.py
| 47
| 29.425532
| 76
| 0.671596
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,716
|
__main__.py
|
freeipa_freeipa/ipaserver/custodia/server/__main__.py
|
# Copyright (C) 2015 Custodia Project Contributors - see LICENSE file
from __future__ import absolute_import
from ipaserver.custodia.server import main
if __name__ == '__main__':
main()
| 193
|
Python
|
.py
| 5
| 36.4
| 70
| 0.731183
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,717
|
config.py
|
freeipa_freeipa/ipaserver/custodia/server/config.py
|
# Copyright (C) 2015-2017 Custodia Project Contributors - see LICENSE file
from __future__ import absolute_import
import configparser
import glob
import os
import socket
from urllib.parse import quote as url_escape
class CustodiaConfig:
CONFIG_SPECIALS = ['authenticators', 'authorizers', 'consumers', 'stores']
DEFAULT_PATHS = [
('libdir', '/var/lib/custodia/{instance}'),
('logdir', '/var/log/custodia/{instance}'),
('rundir', '/var/run/custodia/{instance}'),
('socketdir', '/var/run/custodia'),
]
def __init__(self, args):
self.args = args
self.config = {}
self.defaults = None
self.parser = None
def get_defaults(self):
configpath = self.args.configfile.name
instance = self.args.instance
defaults = {
# Do not use getfqdn(). Internaly it calls gethostbyaddr which
# might perform a DNS query.
'hostname': socket.gethostname(),
'configdir': os.path.dirname(configpath),
'confdpattern': os.path.join(configpath + '.d', '*.conf'),
'instance': instance if instance else '',
}
for name, path in self.DEFAULT_PATHS:
defaults[name] = os.path.abspath(path.format(**defaults))
return defaults
def create_parser(self):
parser = configparser.ConfigParser(
interpolation=configparser.ExtendedInterpolation(),
defaults=self.defaults
)
parser.optionxform = str
# add env
parser.add_section(u'ENV')
for k, v in os.environ.items():
if set(v).intersection('\r\n\x00'):
continue
parser.set(u'ENV', k, v.replace(u'$', u'$$'))
# default globals
parser.add_section(u'global')
parser.set(u'global', u'auditlog', u'${logdir}/audit.log')
parser.set(u'global', u'debug', u'false')
parser.set(u'global', u'umask', u'027')
parser.set(u'global', u'makedirs', u'false')
return parser
def read_configs(self):
with self.args.configfile as f:
self.parser.read_file(f)
configfiles = [self.args.configfile.name]
pattern = self.parser.get(u'DEFAULT', u'confdpattern')
if pattern:
confdfiles = glob.glob(pattern)
confdfiles.sort()
for confdfile in confdfiles:
with open(confdfile) as f:
self.parser.read_file(f)
configfiles.append(confdfile)
return configfiles
def makedirs(self):
for name, _path in self.DEFAULT_PATHS:
path = self.parser.get(u'DEFAULT', name)
parent = os.path.dirname(path)
# create parents according to umask
if not os.path.isdir(parent):
os.makedirs(parent)
# create final directory with restricted permissions
if not os.path.isdir(path):
os.mkdir(path, 0o700)
def populate_config(self):
config = self.config
for s in self.CONFIG_SPECIALS:
config[s] = {}
for opt, val in self.parser.items(u'global'):
if opt in self.CONFIG_SPECIALS:
raise ValueError('"%s" is an invalid '
'[global] option' % opt)
config[opt] = val
config['tls_verify_client'] = self.parser.getboolean(
'global', 'tls_verify_client', fallback=False)
config['debug'] = self.parser.getboolean(
'global', 'debug', fallback=False)
config['makedirs'] = self.parser.getboolean(
'global', 'makedirs', fallback=False)
if self.args.debug:
config['debug'] = self.args.debug
config['auditlog'] = os.path.abspath(config.get('auditlog'))
config['umask'] = int(config.get('umask', '027'), 8)
url = config.get('server_url')
sock = config.get('server_socket')
if url and sock:
raise ValueError(
"'server_url' and 'server_socket' are mutually exclusive.")
if not url and not sock:
# no option but, use default socket path
socketdir = self.parser.get(u'DEFAULT', u'socketdir')
name = self.args.instance if self.args.instance else 'custodia'
sock = os.path.join(socketdir, name + '.sock')
if sock:
server_socket = os.path.abspath(sock)
config['server_url'] = 'http+unix://{}/'.format(
url_escape(server_socket, ''))
def __call__(self):
self.defaults = self.get_defaults()
self.parser = self.create_parser()
self.config['configfiles'] = self.read_configs()
self.populate_config()
if self.config[u'makedirs']:
self.makedirs()
return self.parser, self.config
def parse_config(args):
ccfg = CustodiaConfig(args)
return ccfg()
def test(arglist):
from pprint import pprint
from .args import parse_args
args = parse_args(arglist)
parser, config = parse_config(args)
pprint(parser.items("DEFAULT"))
pprint(config)
if __name__ == '__main__':
test(['--instance=demo', './tests/empty.conf'])
| 5,262
|
Python
|
.py
| 129
| 30.922481
| 78
| 0.589377
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,718
|
__init__.py
|
freeipa_freeipa/ipaserver/custodia/server/__init__.py
|
# Copyright (C) 2015 Custodia Project Contributors - see LICENSE file
from __future__ import absolute_import
import importlib
import os
import pkg_resources
import six
from ipaserver.custodia import log
from ipaserver.custodia.httpd.server import HTTPServer
from .args import default_argparser
from .args import parse_args as _parse_args
from .config import parse_config as _parse_config
logger = log.getLogger('custodia')
__all__ = ['default_argparser', 'main']
def attach_store(typename, plugins, stores):
for name, c in six.iteritems(plugins):
if getattr(c, 'store_name', None) is None:
continue
try:
c.store = stores[c.store_name]
except KeyError:
raise ValueError('[%s%s] references unexisting store '
'"%s"' % (typename, name, c.store_name))
def _load_plugin_class(menu, name):
"""Load Custodia plugin
Entry points are preferred over dotted import path.
"""
group = 'custodia.{}'.format(menu)
eps = list(pkg_resources.iter_entry_points(group, name))
if len(eps) > 1:
raise ValueError(
"Multiple entry points for {} {}: {}".format(menu, name, eps))
elif len(eps) == 1:
# backwards compatibility with old setuptools
ep = eps[0]
if hasattr(ep, 'resolve'):
return ep.resolve()
else:
return ep.load(require=False)
elif '.' in name:
# fall back to old style dotted name
module, classname = name.rsplit('.', 1)
m = importlib.import_module(module)
return getattr(m, classname)
else:
raise ValueError("{}: {} not found".format(menu, name))
def _create_plugin(cfgparser, section, menu):
if not cfgparser.has_option(section, 'handler'):
raise ValueError('Invalid section, missing "handler"')
handler_name = cfgparser.get(section, 'handler')
hconf = {'facility_name': section}
try:
handler = _load_plugin_class(menu, handler_name)
classname = handler.__name__
hconf['facility_name'] = '%s-[%s]' % (classname, section)
except Exception as e:
raise ValueError('Invalid format for "handler" option '
'[%r]: %s' % (e, handler_name))
if handler._options is not None:
# new-style plugin with parser and section
plugin = handler(cfgparser, section)
else:
# old-style plugin with config dict
hconf.update(cfgparser.items(section))
hconf.pop('handler')
plugin = handler(hconf)
plugin.section = section
return plugin
def _load_plugins(config, cfgparser):
"""Load and initialize plugins
"""
# set umask before any plugin gets a chance to create a file
os.umask(config['umask'])
for s in cfgparser.sections():
if s in {'ENV', 'global'}:
# ENV section is only used for interpolation
continue
if s.startswith('/'):
menu = 'consumers'
path_chain = s.split('/')
if path_chain[-1] == '':
path_chain = path_chain[:-1]
name = tuple(path_chain)
else:
if s.startswith('auth:'):
menu = 'authenticators'
name = s[5:]
elif s.startswith('authz:'):
menu = 'authorizers'
name = s[6:]
elif s.startswith('store:'):
menu = 'stores'
name = s[6:]
else:
raise ValueError('Invalid section name [%s].\n' % s)
try:
config[menu][name] = _create_plugin(cfgparser, s, menu)
except Exception as e:
logger.debug("Plugin '%s' failed to load.", name, exc_info=True)
raise RuntimeError(menu, name, e)
# 2nd initialization stage
for menu in ['authenticators', 'authorizers', 'consumers', 'stores']:
plugins = config[menu]
for name in sorted(plugins):
plugin = plugins[name]
plugin.finalize_init(config, cfgparser, context=None)
def main(argparser=None):
args = _parse_args(argparser=argparser)
# parse arguments and populate config with basic settings
cfgparser, config = _parse_config(args)
# initialize logging
log.setup_logging(config['debug'], config['auditlog'])
logger.info('Custodia instance %s', args.instance or '<main>')
logger.debug('Config file(s) %s loaded', config['configfiles'])
# load plugins after logging
_load_plugins(config, cfgparser)
# create and run server
httpd = HTTPServer(config['server_url'], config)
httpd.serve()
| 4,656
|
Python
|
.py
| 118
| 31.152542
| 76
| 0.613203
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,719
|
args.py
|
freeipa_freeipa/ipaserver/custodia/server/args.py
|
# Copyright (C) 2015-2017 Custodia Project Contributors - see LICENSE file
from __future__ import absolute_import
import argparse
import os
class AbsFileType(argparse.FileType):
"""argparse file type with absolute path
"""
def __call__(self, string):
if string != '-':
string = os.path.abspath(string)
return super(AbsFileType, self).__call__(string)
class ConfigfileAction(argparse.Action):
"""Default action handler for configfile
"""
default_path = '/etc/custodia/custodia.conf'
default_instance = '/etc/custodia/{instance}.conf'
def __call__(self, parser, namespace, values, option_string=None):
if values is None:
if namespace.instance is not None:
values = self.default_instance.format(
instance=namespace.instance
)
else:
values = self.default_path
values = self.type(values)
setattr(namespace, self.dest, values)
def instance_name(string):
"""Check for valid instance name
"""
invalid = ':/@'
if set(string).intersection(invalid):
msg = 'Invalid instance name {}'.format(string)
raise argparse.ArgumentTypeError(msg)
return string
default_argparser = argparse.ArgumentParser(
prog='custodia',
description='Custodia server'
)
default_argparser.add_argument(
'--debug',
action='store_true',
help='Debug mode'
)
default_argparser.add_argument(
'--instance',
type=instance_name,
help='Instance name',
default=None
)
default_argparser.add_argument(
'configfile',
nargs='?',
action=ConfigfileAction,
type=AbsFileType('r'),
help=('Path to custodia server config (default: '
'/etc/custodia/{instance}/custodia.conf)'),
)
def parse_args(args=None, argparser=None):
if argparser is None:
argparser = default_argparser
# namespace with default values
namespace = argparse.Namespace(
debug=False,
instance=None,
)
return argparser.parse_args(args, namespace)
| 2,096
|
Python
|
.py
| 66
| 25.606061
| 75
| 0.660218
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,720
|
odsmgr.py
|
freeipa_freeipa/ipaserver/dnssec/odsmgr.py
|
#
# Copyright (C) 2014 FreeIPA Contributors see COPYING for license
#
import logging
import dns.name
import re
try:
from xml.etree import cElementTree as etree
except ImportError:
from xml.etree import ElementTree as etree
from ipapython import ipa_log_manager, ipautil
from ipaserver.dnssec.opendnssec import tasks
logger = logging.getLogger(__name__)
# hack: zone object UUID is stored as path to imaginary zone file
ENTRYUUID_PREFIX = "/var/lib/ipa/dns/zone/entryUUID/"
ENTRYUUID_PREFIX_LEN = len(ENTRYUUID_PREFIX)
class ZoneListReader:
def __init__(self):
self.names = set() # dns.name
self.uuids = set() # UUID strings
self.mapping = dict() # {UUID: dns.name}
def _add_zone(self, name, zid):
"""Add zone & UUID to internal structures.
Zone with given name and UUID must not exist."""
# detect duplicate zone names
name = dns.name.from_text(name)
assert name not in self.names, \
'duplicate name (%s, %s) vs. %s' % (name, zid, self.mapping)
# duplicate non-None zid is not allowed
assert not zid or zid not in self.uuids, \
'duplicate UUID (%s, %s) vs. %s' % (name, zid, self.mapping)
self.names.add(name)
self.uuids.add(zid)
self.mapping[zid] = name
def _del_zone(self, name, zid):
"""Remove zone & UUID from internal structures.
Zone with given name and UUID must exist.
"""
name = dns.name.from_text(name)
assert zid is not None
assert name in self.names, \
'name (%s, %s) does not exist in %s' % (name, zid, self.mapping)
assert zid in self.uuids, \
'UUID (%s, %s) does not exist in %s' % (name, zid, self.mapping)
assert zid in self.mapping and name == self.mapping[zid], \
'pair {%s: %s} does not exist in %s' % (zid, name, self.mapping)
self.names.remove(name)
self.uuids.remove(zid)
del self.mapping[zid]
class ODSZoneListReader(ZoneListReader):
"""One-shot parser for ODS zonelist.xml."""
def __init__(self, zonelist_text):
super(ODSZoneListReader, self).__init__()
root = etree.fromstring(zonelist_text)
self._parse_zonelist(root)
def _parse_zonelist(self, root):
"""iterate over Zone elements with attribute 'name' and
add IPA zones to self.zones"""
if not root.tag == 'ZoneList':
raise ValueError(root.tag)
for zone_xml in root.findall('./Zone[@name]'):
name, zid = self._parse_ipa_zone(zone_xml)
self._add_zone(name, zid)
def _parse_ipa_zone(self, zone_xml):
"""Extract zone name, input adapter and detect IPA zones.
IPA zones have contains Adapters/Input/Adapter element with
attribute type = "File" and with value prefixed with ENTRYUUID_PREFIX.
Returns:
tuple (zone name, ID)
"""
name = zone_xml.get('name')
zids = []
for in_adapter in zone_xml.findall(
'./Adapters/Input/Adapter[@type="File"]'):
path = in_adapter.text
if path.startswith(ENTRYUUID_PREFIX):
# strip prefix from path
zids.append(path[ENTRYUUID_PREFIX_LEN:])
if len(zids) != 1:
raise ValueError('only IPA zones are supported: {}'.format(
etree.tostring(zone_xml)))
return name, zids[0]
class LDAPZoneListReader(ZoneListReader):
def __init__(self):
super(LDAPZoneListReader, self).__init__()
def process_ipa_zone(self, op, uuid, zone_ldap):
assert (op in ['add', 'del']), 'unsupported op %s' % op
assert uuid is not None
assert 'idnsname' in zone_ldap, \
'LDAP zone UUID %s without idnsName' % uuid
assert len(zone_ldap['idnsname']) == 1, \
'LDAP zone UUID %s with len(idnsname) != 1' % uuid
if op == 'add':
self._add_zone(zone_ldap['idnsname'][0], uuid)
elif op == 'del':
self._del_zone(zone_ldap['idnsname'][0], uuid)
class ODSMgr:
"""OpenDNSSEC zone manager. It does LDAP->ODS synchronization.
Zones with idnsSecInlineSigning attribute = TRUE in LDAP are added
or deleted from ODS as necessary. ODS->LDAP key synchronization
has to be solved seperatelly.
"""
def __init__(self):
self.zl_ldap = LDAPZoneListReader()
def ksmutil(self, params):
"""Call ods-ksmutil / ods-enforcer with parameters and return stdout.
Raises CalledProcessError if returncode != 0.
"""
result = tasks.run_ods_manager(params, capture_output=True)
return result.output
def get_ods_zonelist(self):
stdout = self.ksmutil(['zonelist', 'export'])
try:
reader = ODSZoneListReader(stdout)
except etree.ParseError:
# With OpenDNSSEC 2, the above command returns a message
# containing the zonelist filename instead of the XML text:
# "Exported zonelist to /etc/opendnssec/zonelist.xml successfully"
# extract the filename and read its content
pattern = re.compile(r'.* (/.*) .*')
matches = re.findall(pattern, stdout)
if matches:
with open(matches[0]) as f:
content = f.read()
reader = ODSZoneListReader(content)
return reader
def add_ods_zone(self, uuid, name):
zone_path = '%s%s' % (ENTRYUUID_PREFIX, uuid)
if name != dns.name.root:
name = name.relativize(dns.name.root)
cmd = ['zone', 'add', '--zone', str(name), '--input', zone_path]
output = None
try:
output = self.ksmutil(cmd)
except ipautil.CalledProcessError as e:
# Zone already exists in HSM
if e.returncode == 1 \
and str(e.output).endswith(str(name) + ' already exists!'):
# Just return
return
if output is not None:
logger.info('%s', output)
self.notify_enforcer()
def del_ods_zone(self, name):
# ods-ksmutil blows up if zone name has period at the end
if name != dns.name.root:
name = name.relativize(dns.name.root)
# detect if name is root zone
if name == dns.name.empty:
name = dns.name.root
cmd = ['zone', 'delete', '--zone', str(name)]
output = None
try:
output = self.ksmutil(cmd)
except ipautil.CalledProcessError as e:
# Zone already doesn't exist in HSM
if e.returncode == 1 \
and str(e.output).endswith(str(name) + ' not found!'):
# Just cleanup signer, no need to notify enforcer
self.cleanup_signer(name)
return
if output is not None:
logger.info('%s', output)
self.notify_enforcer()
self.cleanup_signer(name)
def notify_enforcer(self):
result = tasks.run_ods_notify(capture_output=True)
logger.info('%s', result.output)
def cleanup_signer(self, zone_name):
cmd = ['ods-signer', 'ldap-cleanup', str(zone_name)]
output = ipautil.run(cmd, capture_output=True)
logger.info('%s', output)
def ldap_event(self, op, uuid, attrs):
"""Record single LDAP event - zone addition or deletion.
Change is only recorded to memory.
self.sync() have to be called to synchronize change to ODS."""
assert op in ('add', 'del')
self.zl_ldap.process_ipa_zone(op, uuid, attrs)
logger.debug("LDAP zones: %s", self.zl_ldap.mapping)
def sync(self):
"""Synchronize list of zones in LDAP with ODS."""
zl_ods = self.get_ods_zonelist()
logger.debug("ODS zones: %s", zl_ods.mapping)
removed = self.diff_zl(zl_ods, self.zl_ldap)
logger.info("Zones removed from LDAP: %s", removed)
added = self.diff_zl(self.zl_ldap, zl_ods)
logger.info("Zones added to LDAP: %s", added)
for (uuid, name) in removed:
self.del_ods_zone(name)
for (uuid, name) in added:
self.add_ods_zone(uuid, name)
def diff_zl(self, s1, s2):
"""Compute zones present in s1 but not present in s2.
Returns: List of (uuid, name) tuples with zones present only in s1."""
s1_extra = s1.uuids - s2.uuids
removed = [(uuid, name) for (uuid, name) in s1.mapping.items()
if uuid in s1_extra]
return removed
if __name__ == '__main__':
ipa_log_manager.standard_logging_setup(debug=True)
ods = ODSMgr()
reader = ods.get_ods_zonelist()
logger.info('ODS zones: %s', reader.mapping)
| 8,827
|
Python
|
.py
| 203
| 34.157635
| 79
| 0.599464
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,721
|
_ods21.py
|
freeipa_freeipa/ipaserver/dnssec/_ods21.py
|
#
# Copyright (C) 2020 FreeIPA Contributors see COPYING for license
#
import os
import dateutil.tz
from ipaserver.dnssec._odsbase import AbstractODSDBConnection
from ipaserver.dnssec._odsbase import AbstractODSSignerConn
from ipaserver.dnssec._odsbase import ODS_SE_MAXLINE
from ipaplatform.constants import constants
from ipaplatform.paths import paths
from ipapython import ipautil
CLIENT_OPC_STDOUT = 0
CLIENT_OPC_EXIT = 4
class ODSDBConnection(AbstractODSDBConnection):
def get_zones(self):
cur = self._db.execute("SELECT name from zone")
rows = cur.fetchall()
return [row['name'] for row in rows]
def get_zone_id(self, zone_name):
cur = self._db.execute(
"SELECT id FROM zone WHERE LOWER(name)=LOWER(?)",
(zone_name,))
rows = cur.fetchall()
return [row[0] for row in rows]
def get_keys_for_zone(self, zone_id):
cur = self._db.execute(
"SELECT hsmk.locator, hsmk.inception, hsmk.algorithm, "
"hsmk.role, hsmk.state "
"FROM hsmKey AS hsmk "
"JOIN keyData AS kd ON hsmk.id = kd.hsmKeyId "
"WHERE kd.zoneId = ?", (zone_id,))
for row in cur:
key = dict()
key['HSMkey_id'] = row['locator']
# The date is stored in UTC format but OpenDNSSEC 1.4 was
# returning a local tz format
tz = dateutil.tz.tzlocal()
key['generate'] = ipautil.datetime_from_utctimestamp(
row['inception'],
units=1).astimezone(tz).replace(tzinfo=None).isoformat(
sep=' ', timespec='seconds')
key['algorithm'] = row['algorithm']
key['publish'] = key['generate']
key['active'] = None
key['retire'] = None
key['dead'] = None
if row['role'] == 2:
key['keytype'] = 256
elif row['role'] == 1:
key['keytype'] = 257
key['state'] = row['state']
yield key
class ODSSignerConn(AbstractODSSignerConn):
def read_cmd(self):
msg = self._conn.recv(ODS_SE_MAXLINE)
_opc = int(msg[0])
msglen = int(msg[1]) << 8 + int(msg[2])
cmd = msg[3:msglen - 1].strip()
return cmd
def send_reply_and_close(self, reply):
prefix = bytearray([CLIENT_OPC_STDOUT, len(reply) >> 8,
len(reply) & 255])
self._conn.sendall(prefix + reply)
# 2nd message: CLIENT_OPC_EXIT, then len, msg len, exit code
prefix = bytearray([CLIENT_OPC_EXIT, 0, 1, 0])
self._conn.sendall(prefix)
self._conn.close()
class ODSTask():
def run_ods_setup(self):
"""Initialize a new kasp.db"""
cmd = [paths.ODS_ENFORCER_DB_SETUP]
return ipautil.run(cmd, stdin="y", runas=constants.ODS_USER)
def run_ods_notify(self, **kwargs):
"""Notify ods-enforcerd to reload its conf."""
cmd = [paths.ODS_ENFORCER, 'flush']
# run commands as ODS user
if os.geteuid() == 0:
kwargs['runas'] = constants.ODS_USER
return ipautil.run(cmd, **kwargs)
def run_ods_policy_import(self, **kwargs):
"""Run OpenDNSSEC manager command to import policy."""
cmd = [paths.ODS_ENFORCER, 'policy', 'import']
# run commands as ODS user
if os.geteuid() == 0:
kwargs['runas'] = constants.ODS_USER
ipautil.run(cmd, **kwargs)
def run_ods_manager(self, params, **kwargs):
"""Run OpenDNSSEC manager command (ksmutil, enforcer)
:param params: parameter for ODS command
:param kwargs: additional arguments for ipautil.run()
:return: result from ipautil.run()
"""
assert params[0] != 'setup'
cmd = [paths.ODS_ENFORCER]
cmd.extend(params)
# run commands as ODS user
if os.geteuid() == 0:
kwargs['runas'] = constants.ODS_USER
return ipautil.run(cmd, **kwargs)
| 4,031
|
Python
|
.py
| 99
| 31.494949
| 71
| 0.590026
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,722
|
_odsbase.py
|
freeipa_freeipa/ipaserver/dnssec/_odsbase.py
|
#
# Copyright (C) 2020 FreeIPA Contributors see COPYING for license
#
import six
import abc
import sqlite3
from ipaplatform.paths import paths
ODS_SE_MAXLINE = 1024 # from ODS common/config.h
@six.add_metaclass(abc.ABCMeta)
class AbstractODSDBConnection():
"""Abstract class representing the Connection to ODS database."""
def __init__(self):
"""Creates a connection to the kasp database."""
self._db = sqlite3.connect(paths.OPENDNSSEC_KASP_DB)
self._db.row_factory = sqlite3.Row
self._db.execute('BEGIN')
@abc.abstractmethod
def get_zones(self):
"""Returns a list of zone names."""
@abc.abstractmethod
def get_zone_id(self, zone_name):
"""Returns a list of zone ids for the given zone_name."""
@abc.abstractmethod
def get_keys_for_zone(self, zone_id):
"""Returns a list of keys for the given zone_id."""
def close(self):
"""Closes the connection to the kasp database."""
self._db.close()
@six.add_metaclass(abc.ABCMeta)
class AbstractODSSignerConn():
"""Abstract class representing the Connection to ods-signer."""
def __init__(self, conn):
"""Initializes the object with a socket conn."""
self._conn = conn
@abc.abstractmethod
def read_cmd(self):
"""Reads the next command on the connection."""
@abc.abstractmethod
def send_reply_and_close(self, reply):
"""Sends the reply on the connection."""
| 1,475
|
Python
|
.py
| 40
| 31.475
| 69
| 0.675334
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,723
|
temp.py
|
freeipa_freeipa/ipaserver/dnssec/temp.py
|
#
# Copyright (C) 2014 FreeIPA Contributors see COPYING for license
#
import errno
import shutil
import tempfile
class TemporaryDirectory:
def __init__(self, root):
self.root = root
def __enter__(self):
self.name = tempfile.mkdtemp(dir=self.root)
return self.name
def __exit__(self, exc_type, exc_value, traceback):
try:
shutil.rmtree(self.name)
except OSError as e:
if e.errno != errno.ENOENT:
raise
| 499
|
Python
|
.py
| 18
| 21.333333
| 66
| 0.62605
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,724
|
_ods14.py
|
freeipa_freeipa/ipaserver/dnssec/_ods14.py
|
#
# Copyright (C) 2020 FreeIPA Contributors see COPYING for license
#
import os
import socket
from ipapython import ipautil
from ipaserver.dnssec._odsbase import AbstractODSDBConnection
from ipaserver.dnssec._odsbase import AbstractODSSignerConn
from ipaserver.dnssec._odsbase import ODS_SE_MAXLINE
from ipaplatform.constants import constants
from ipaplatform.paths import paths
class ODSDBConnection(AbstractODSDBConnection):
def get_zones(self):
cur = self._db.execute("SELECT name from zones")
rows = cur.fetchall()
return [row['name'] for row in rows]
def get_zone_id(self, zone_name):
cur = self._db.execute(
"SELECT id FROM zones WHERE LOWER(name)=LOWER(?)",
(zone_name,))
rows = cur.fetchall()
return [row[0] for row in rows]
def get_keys_for_zone(self, zone_id):
cur = self._db.execute(
"SELECT kp.HSMkey_id, kp.generate, kp.algorithm, "
"dnsk.publish, dnsk.active, dnsk.retire, dnsk.dead, "
"dnsk.keytype, dnsk.state "
"FROM keypairs AS kp "
"JOIN dnsseckeys AS dnsk ON kp.id = dnsk.keypair_id "
"WHERE dnsk.zone_id = ?", (zone_id,))
for row in cur:
yield row
class ODSSignerConn(AbstractODSSignerConn):
def read_cmd(self):
cmd = self._conn.recv(ODS_SE_MAXLINE).strip()
return cmd
def send_reply_and_close(self, reply):
self._conn.send(reply + b'\n')
self._conn.shutdown(socket.SHUT_RDWR)
self._conn.close()
class ODSTask():
def run_ods_setup(self):
"""Initialize a new kasp.db"""
cmd = [paths.ODS_KSMUTIL, 'setup']
return ipautil.run(cmd, stdin="y", runas=constants.ODS_USER)
def run_ods_notify(self, **kwargs):
"""Notify ods-enforcerd to reload its conf."""
cmd = [paths.ODS_KSMUTIL, 'notify']
# run commands as ODS user
if os.geteuid() == 0:
kwargs['runas'] = constants.ODS_USER
return ipautil.run(cmd, **kwargs)
def run_ods_policy_import(self, **kwargs):
"""Run OpenDNSSEC manager command to import policy."""
# This step is needed with OpenDNSSEC 2.1 only
return
def run_ods_manager(self, params, **kwargs):
"""Run OpenDNSSEC manager command (ksmutil, enforcer)
:param params: parameter for ODS command
:param kwargs: additional arguments for ipautil.run()
:return: result from ipautil.run()
"""
assert params[0] != 'setup'
cmd = [paths.ODS_KSMUTIL]
cmd.extend(params)
# run commands as ODS user
if os.geteuid() == 0:
kwargs['runas'] = constants.ODS_USER
return ipautil.run(cmd, **kwargs)
| 2,772
|
Python
|
.py
| 69
| 32.391304
| 68
| 0.63511
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,725
|
localhsm.py
|
freeipa_freeipa/ipaserver/dnssec/localhsm.py
|
#
# Copyright (C) 2014 FreeIPA Contributors see COPYING for license
#
from __future__ import print_function, absolute_import
from collections.abc import MutableMapping
import os
from pprint import pprint
from ipalib.constants import SOFTHSM_DNSSEC_TOKEN_LABEL
from ipaplatform.paths import paths
from ipaserver import p11helper as _ipap11helper
from ipaserver.dnssec.abshsm import (attrs_name2id, attrs_id2name, AbstractHSM,
keytype_id2name, keytype_name2id,
ldap2p11helper_api_params)
from ipaserver.dnssec.ldapkeydb import str_hexlify
private_key_api_params = set(["label", "id", "data", "unwrapping_key",
"wrapping_mech", "key_type", "cka_always_authenticate", "cka_copyable",
"cka_decrypt", "cka_derive", "cka_extractable", "cka_modifiable",
"cka_private", "cka_sensitive", "cka_sign", "cka_sign_recover",
"cka_unwrap", "cka_wrap_with_trusted"])
public_key_api_params = set(["label", "id", "data", "cka_copyable",
"cka_derive", "cka_encrypt", "cka_modifiable", "cka_private",
"cka_trusted", "cka_verify", "cka_verify_recover", "cka_wrap"])
class Key(MutableMapping):
def __init__(self, p11, handle):
self.p11 = p11
self.handle = handle
# sanity check CKA_ID and CKA_LABEL
try:
cka_id = self.p11.get_attribute(handle, _ipap11helper.CKA_ID)
assert len(cka_id) != 0, 'ipk11id length should not be 0'
except _ipap11helper.NotFound:
raise _ipap11helper.NotFound('key without ipk11id: handle %s' % handle)
try:
cka_label = self.p11.get_attribute(handle, _ipap11helper.CKA_LABEL)
assert len(cka_label) != 0, 'ipk11label length should not be 0'
except _ipap11helper.NotFound:
raise _ipap11helper.NotFound(
'key without ipk11label: id 0x%s' % str_hexlify(cka_id))
def __getitem__(self, key):
key = key.lower()
try:
value = self.p11.get_attribute(self.handle, attrs_name2id[key])
if key == 'ipk11keytype':
value = keytype_id2name[value]
return value
except _ipap11helper.NotFound:
raise KeyError()
def __setitem__(self, key, value):
key = key.lower()
if key == 'ipk11keytype':
value = keytype_name2id[value]
return self.p11.set_attribute(self.handle, attrs_name2id[key], value)
def __delitem__(self, key):
raise _ipap11helper.P11HelperException('__delitem__ is not supported')
def __iter__(self):
"""generates list of ipa names of all attributes present in the object"""
for pkcs11_id, ipa_name in attrs_id2name.items():
try:
self.p11.get_attribute(self.handle, pkcs11_id)
except _ipap11helper.NotFound:
continue
yield ipa_name
def __len__(self):
cnt = 0
for _attr in self:
cnt += 1
return cnt
def __str__(self):
return str(dict(self))
def __repr__(self):
return self.__str__()
class LocalHSM(AbstractHSM):
def __init__(self, library, label, pin):
self.cache_replica_pubkeys = None
self.p11 = _ipap11helper.P11_Helper(label, pin, library)
def __del__(self):
self.p11.finalize()
def find_keys(self, **kwargs):
"""Return dict with Key objects matching given criteria.
CKA_ID is used as key so all matching objects have to have unique ID."""
# this is a hack for old p11-kit URI parser
# see https://bugs.freedesktop.org/show_bug.cgi?id=85057
if 'uri' in kwargs:
kwargs['uri'] = kwargs['uri'].replace('type=', 'object-type=')
handles = self.p11.find_keys(**kwargs)
keys = {}
for h in handles:
key = Key(self.p11, h)
o_id = key['ipk11id']
assert o_id not in keys, 'duplicate ipk11Id = 0x%s; keys = %s' % (
str_hexlify(o_id), keys)
keys[o_id] = key
return keys
@property
def replica_pubkeys(self):
return self._filter_replica_keys(
self.find_keys(objclass=_ipap11helper.KEY_CLASS_PUBLIC_KEY))
@property
def replica_pubkeys_wrap(self):
return self._filter_replica_keys(
self.find_keys(objclass=_ipap11helper.KEY_CLASS_PUBLIC_KEY,
cka_wrap=True))
@property
def master_keys(self):
"""Get all usable DNSSEC master keys"""
keys = self.find_keys(objclass=_ipap11helper.KEY_CLASS_SECRET_KEY, label=u'dnssec-master', cka_unwrap=True)
for key in keys.values():
prefix = 'dnssec-master'
assert key['ipk11label'] == prefix, \
'secret key ipk11id=0x%s ipk11label="%s" with ipk11UnWrap ' \
'= TRUE does not have "%s" key label' % (
str_hexlify(key['ipk11id']),
str(key['ipk11label']), prefix
)
return keys
@property
def active_master_key(self):
"""Get one active DNSSEC master key suitable for key wrapping"""
keys = self.find_keys(objclass=_ipap11helper.KEY_CLASS_SECRET_KEY,
label=u'dnssec-master', cka_wrap=True, cka_unwrap=True)
assert len(keys) > 0, "DNSSEC master key with UN/WRAP = TRUE not found"
return keys.popitem()[1]
@property
def zone_pubkeys(self):
return self._filter_zone_keys(
self.find_keys(objclass=_ipap11helper.KEY_CLASS_PUBLIC_KEY))
@property
def zone_privkeys(self):
return self._filter_zone_keys(
self.find_keys(objclass=_ipap11helper.KEY_CLASS_PRIVATE_KEY))
def import_public_key(self, source, data):
params = ldap2p11helper_api_params(source)
# filter out params inappropriate for public keys
for par in set(params).difference(public_key_api_params):
del params[par]
params['data'] = data
h = self.p11.import_public_key(**params)
return Key(self.p11, h)
def import_private_key(self, source, data, unwrapping_key):
params = ldap2p11helper_api_params(source)
# filter out params inappropriate for private keys
for par in set(params).difference(private_key_api_params):
del params[par]
params['data'] = data
params['unwrapping_key'] = unwrapping_key.handle
h = self.p11.import_wrapped_private_key(**params)
return Key(self.p11, h)
if __name__ == '__main__':
if 'SOFTHSM2_CONF' not in os.environ:
os.environ['SOFTHSM2_CONF'] = paths.DNSSEC_SOFTHSM2_CONF
localhsm = LocalHSM(paths.LIBSOFTHSM2_SO, SOFTHSM_DNSSEC_TOKEN_LABEL,
open(paths.DNSSEC_SOFTHSM_PIN).read())
print('replica public keys: CKA_WRAP = TRUE')
print('====================================')
for pubkey_id, pubkey in localhsm.replica_pubkeys_wrap.items():
print(str_hexlify(pubkey_id))
pprint(pubkey)
print('')
print('replica public keys: all')
print('========================')
for pubkey_id, pubkey in localhsm.replica_pubkeys.items():
print(str_hexlify(pubkey_id))
pprint(pubkey)
print('')
print('master keys')
print('===========')
for mkey_id, mkey in localhsm.master_keys.items():
print(str_hexlify(mkey_id))
pprint(mkey)
print('')
print('zone public keys')
print('================')
for key_id, zkey in localhsm.zone_pubkeys.items():
print(str_hexlify(key_id))
pprint(zkey)
print('')
print('zone private keys')
print('=================')
for key_id, zkey in localhsm.zone_privkeys.items():
print(str_hexlify(key_id))
pprint(zkey)
| 7,848
|
Python
|
.py
| 181
| 34.353591
| 115
| 0.608661
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,726
|
ldapkeydb.py
|
freeipa_freeipa/ipaserver/dnssec/ldapkeydb.py
|
#
# Copyright (C) 2014 FreeIPA Contributors see COPYING for license
#
from __future__ import print_function, absolute_import
from binascii import hexlify
from collections.abc import MutableMapping
import logging
from pprint import pprint
import ipalib
from ipaplatform.paths import paths
from ipapython.dn import DN
from ipapython import ipaldap
from ipapython import ipa_log_manager
from ipaserver.dnssec.abshsm import (
attrs_name2id,
AbstractHSM,
bool_attr_names,
populate_pkcs11_metadata)
from ipaserver import p11helper as _ipap11helper
import uuid
logger = logging.getLogger(__name__)
def uri_escape(val):
"""convert val to %-notation suitable for ID component in URI"""
if len(val) == 0:
raise ValueError("zero-length URI component detected")
hexval = str_hexlify(val)
out = '%'
out += '%'.join(hexval[i:i+2] for i in range(0, len(hexval), 2))
return out
def ldap_bool(val):
if val == 'TRUE' or val is True:
return True
elif val == 'FALSE' or val is False:
return False
else:
raise ValueError('invalid LDAP boolean "%s"' % val)
def get_default_attrs(object_classes):
# object class -> default attribute values mapping
defaults = {
u'ipk11publickey': {
'ipk11copyable': True,
'ipk11derive': False,
'ipk11encrypt': False,
'ipk11local': True,
'ipk11modifiable': True,
'ipk11private': True,
'ipk11trusted': False,
'ipk11verify': True,
'ipk11verifyrecover': True,
'ipk11wrap': False
},
u'ipk11privatekey': {
'ipk11alwaysauthenticate': False,
'ipk11alwayssensitive': True,
'ipk11copyable': True,
'ipk11decrypt': False,
'ipk11derive': False,
'ipk11extractable': True,
'ipk11local': True,
'ipk11modifiable': True,
'ipk11neverextractable': False,
'ipk11private': True,
'ipk11sensitive': True,
'ipk11sign': True,
'ipk11signrecover': True,
'ipk11unwrap': False,
'ipk11wrapwithtrusted': False
},
u'ipk11secretkey': {
'ipk11alwaysauthenticate': False,
'ipk11alwayssensitive': True,
'ipk11copyable': True,
'ipk11decrypt': False,
'ipk11derive': False,
'ipk11encrypt': False,
'ipk11extractable': True,
'ipk11local': True,
'ipk11modifiable': True,
'ipk11neverextractable': False,
'ipk11private': True,
'ipk11sensitive': True,
'ipk11sign': False,
'ipk11trusted': False,
'ipk11unwrap': True,
'ipk11verify': False,
'ipk11wrap': True,
'ipk11wrapwithtrusted': False
}
}
# get set of supported object classes
present_clss = set()
for cls in object_classes:
present_clss.add(cls.lower())
present_clss.intersection_update(set(defaults.keys()))
if len(present_clss) <= 0:
raise ValueError(
"none of '%s' object classes are supported" % object_classes
)
result = {}
for cls in present_clss:
result.update(defaults[cls])
return result
def str_hexlify(data):
out = hexlify(data)
if isinstance(out, bytes):
out = out.decode('utf-8')
return out
class Key(MutableMapping):
"""abstraction to hide LDAP entry weirdnesses:
- non-normalized attribute names
- boolean attributes returned as strings
- planned entry deletion prevents subsequent use of the instance
"""
def __init__(self, entry, ldap, ldapkeydb):
self.entry = entry
self._delentry = None # indicates that object was deleted
self.ldap = ldap
self.ldapkeydb = ldapkeydb
def __assert_not_deleted(self):
assert self.entry and not self._delentry, (
"attempt to use to-be-deleted entry %s detected"
% self._delentry.dn)
def __getitem__(self, key):
self.__assert_not_deleted()
val = self.entry.single_value[key]
if key.lower() in bool_attr_names:
val = ldap_bool(val)
return val
def __setitem__(self, key, value):
self.__assert_not_deleted()
self.entry[key] = value
def __delitem__(self, key):
self.__assert_not_deleted()
del self.entry[key]
def __iter__(self):
"""generates list of ipa names of all PKCS#11 attributes present in the object"""
self.__assert_not_deleted()
for ipa_name in list(self.entry.keys()):
lowercase = ipa_name.lower()
if lowercase in attrs_name2id:
yield lowercase
def __len__(self):
self.__assert_not_deleted()
return len(self.entry)
def __repr__(self):
if self._delentry:
return 'deleted entry: %s' % repr(self._delentry)
sanitized = dict(self.entry)
for attr in ['ipaPrivateKey', 'ipaPublicKey', 'ipk11publickeyinfo']:
if attr in sanitized:
del sanitized[attr]
return repr(sanitized)
def _cleanup_key(self):
"""remove default values from LDAP entry"""
default_attrs = get_default_attrs(self.entry['objectclass'])
empty = object()
for attr, attr_val in default_attrs.items():
if self.get(attr, empty) == attr_val:
del self[attr]
def _update_key(self):
"""remove default values from LDAP entry and write back changes"""
if self._delentry:
self._delete_key()
return
self._cleanup_key()
try:
self.ldap.update_entry(self.entry)
except ipalib.errors.EmptyModlist:
pass
def _delete_key(self):
"""remove key metadata entry from LDAP
After calling this, the python object is no longer valid and all
subsequent method calls on it will fail.
"""
assert not self.entry, (
"Key._delete_key() called before Key.schedule_deletion()")
assert self._delentry, "Key._delete_key() called more than once"
logger.debug('deleting key id 0x%s DN %s from LDAP',
str_hexlify(self._delentry.single_value['ipk11id']),
self._delentry.dn)
self.ldap.delete_entry(self._delentry)
self._delentry = None
self.ldap = None
self.ldapkeydb = None
def schedule_deletion(self):
"""schedule key deletion from LDAP
Calling schedule_deletion() will make this object incompatible with
normal Key. After that the object must not be read or modified.
Key metadata will be actually deleted when LdapKeyDB.flush() is called.
"""
assert not self._delentry, (
"Key.schedule_deletion() called more than once")
self._delentry = self.entry
self.entry = None
class ReplicaKey(Key):
# TODO: object class assert
def __init__(self, entry, ldap, ldapkeydb):
super(ReplicaKey, self).__init__(entry, ldap, ldapkeydb)
class MasterKey(Key):
# TODO: object class assert
def __init__(self, entry, ldap, ldapkeydb):
super(MasterKey, self).__init__(entry, ldap, ldapkeydb)
@property
def wrapped_entries(self):
"""LDAP entires with wrapped data
One entry = one blob + ipaWrappingKey pointer to unwrapping key"""
keys = []
if 'ipaSecretKeyRef' not in self.entry:
return keys
for dn in self.entry['ipaSecretKeyRef']:
try:
obj = self.ldap.get_entry(dn)
keys.append(obj)
except ipalib.errors.NotFound:
continue
return keys
def add_wrapped_data(self, data, wrapping_mech, replica_key_id):
wrapping_key_uri = 'pkcs11:id=%s;type=public' \
% uri_escape(replica_key_id)
# TODO: replace this with 'autogenerate' to prevent collisions
uuid_rdn = DN('ipk11UniqueId=%s' % uuid.uuid1())
entry_dn = DN(uuid_rdn, self.ldapkeydb.base_dn)
entry = self.ldap.make_entry(entry_dn,
objectClass=['ipaSecretKeyObject', 'ipk11Object'],
ipaSecretKey=data,
ipaWrappingKey=wrapping_key_uri,
ipaWrappingMech=wrapping_mech)
logger.info('adding master key 0x%s wrapped with replica key 0x%s to '
'%s',
str_hexlify(self['ipk11id']),
str_hexlify(replica_key_id),
entry_dn)
self.ldap.add_entry(entry)
if 'ipaSecretKeyRef' not in self.entry:
self.entry['objectClass'] += ['ipaSecretKeyRefObject']
self.entry.setdefault('ipaSecretKeyRef', []).append(entry_dn)
class LdapKeyDB(AbstractHSM):
def __init__(self, ldap, base_dn):
self.ldap = ldap
self.base_dn = base_dn
self.cache_replica_pubkeys_wrap = None
self.cache_masterkeys = None
self.cache_zone_keypairs = None
def _get_key_dict(self, key_type, ldap_filter):
try:
objs = self.ldap.get_entries(base_dn=self.base_dn,
filter=ldap_filter)
except ipalib.errors.NotFound:
return {}
keys = {}
for o in objs:
# add default values not present in LDAP
key = key_type(o, self.ldap, self)
default_attrs = get_default_attrs(key.entry['objectclass'])
for attr, attr_val in default_attrs.items():
key.setdefault(attr, attr_val)
if 'ipk11id' not in key:
raise ValueError(
'key is missing ipk11Id in %s' % key.entry.dn
)
key_id = key['ipk11id']
if key_id in keys:
raise ValueError(
"duplicate ipk11Id=0x%s in '%s' and '%s'"
% (str_hexlify(key_id), key.entry.dn,
keys[key_id].entry.dn)
)
if 'ipk11label' not in key:
raise ValueError(
"key '%s' is missing ipk11Label" % key.entry.dn
)
if 'objectclass' not in key.entry:
raise ValueError(
"key '%s' is missing objectClass attribute"
% key.entry.dn
)
keys[key_id] = key
self._update_keys()
return keys
def _update_keys(self):
for cache in [self.cache_masterkeys, self.cache_replica_pubkeys_wrap,
self.cache_zone_keypairs]:
if cache:
for key in cache.values():
key._update_key()
def flush(self):
"""write back content of caches to LDAP"""
self._update_keys()
self.cache_masterkeys = None
self.cache_replica_pubkeys_wrap = None
self.cache_zone_keypairs = None
def _import_keys_metadata(self, source_keys):
"""import key metadata from Key-compatible objects
metadata from multiple source keys can be imported into single LDAP
object
:param: source_keys is iterable of (Key object, PKCS#11 object class)"""
entry_dn = DN('ipk11UniqueId=autogenerate', self.base_dn)
entry = self.ldap.make_entry(entry_dn, objectClass=['ipk11Object'])
new_key = Key(entry, self.ldap, self)
for source_key, pkcs11_class in source_keys:
if pkcs11_class == _ipap11helper.KEY_CLASS_SECRET_KEY:
entry['objectClass'].append('ipk11SecretKey')
elif pkcs11_class == _ipap11helper.KEY_CLASS_PUBLIC_KEY:
entry['objectClass'].append('ipk11PublicKey')
elif pkcs11_class == _ipap11helper.KEY_CLASS_PRIVATE_KEY:
entry['objectClass'].append('ipk11PrivateKey')
else:
raise ValueError(
"unsupported object class '%s'" % pkcs11_class
)
populate_pkcs11_metadata(source_key, new_key)
new_key._cleanup_key()
return new_key
def import_master_key(self, mkey):
new_key = self._import_keys_metadata(
[(mkey, _ipap11helper.KEY_CLASS_SECRET_KEY)])
self.ldap.add_entry(new_key.entry)
logger.debug('imported master key metadata: %s', new_key.entry)
def import_zone_key(self, pubkey, pubkey_data, privkey,
privkey_wrapped_data, wrapping_mech, master_key_id):
new_key = self._import_keys_metadata(
[(pubkey, _ipap11helper.KEY_CLASS_PUBLIC_KEY),
(privkey, _ipap11helper.KEY_CLASS_PRIVATE_KEY)])
new_key.entry['objectClass'].append('ipaPrivateKeyObject')
new_key.entry['ipaPrivateKey'] = privkey_wrapped_data
new_key.entry['ipaWrappingKey'] = 'pkcs11:id=%s;type=secret-key' \
% uri_escape(master_key_id)
new_key.entry['ipaWrappingMech'] = wrapping_mech
new_key.entry['objectClass'].append('ipaPublicKeyObject')
new_key.entry['ipaPublicKey'] = pubkey_data
self.ldap.add_entry(new_key.entry)
logger.debug('imported zone key id: 0x%s',
str_hexlify(new_key['ipk11id']))
@property
def replica_pubkeys_wrap(self):
if self.cache_replica_pubkeys_wrap:
return self.cache_replica_pubkeys_wrap
keys = self._filter_replica_keys(
self._get_key_dict(
ReplicaKey,
'(&(objectClass=ipk11PublicKey)(ipk11Wrap=TRUE)'
'(objectClass=ipaPublicKeyObject))'
)
)
self.cache_replica_pubkeys_wrap = keys
return keys
@property
def master_keys(self):
if self.cache_masterkeys:
return self.cache_masterkeys
keys = self._get_key_dict(
MasterKey,
'(&(objectClass=ipk11SecretKey)'
'(|(ipk11UnWrap=TRUE)(!(ipk11UnWrap=*)))'
'(ipk11Label=dnssec-master))'
)
for key in keys.values():
prefix = 'dnssec-master'
if key['ipk11label'] != prefix:
raise ValueError(
"secret key dn='%s' ipk11id=0x%s ipk11label='%s' with "
"ipk11UnWrap = TRUE does not have '%s' key label'"
% (key.entry.dn, str_hexlify(key['ipk11id']),
str(key['ipk11label']), prefix)
)
self.cache_masterkeys = keys
return keys
@property
def zone_keypairs(self):
if self.cache_zone_keypairs:
return self.cache_zone_keypairs
self.cache_zone_keypairs = self._filter_zone_keys(
self._get_key_dict(Key,
'(&(objectClass=ipk11PrivateKey)(objectClass=ipaPrivateKeyObject)(objectClass=ipk11PublicKey)(objectClass=ipaPublicKeyObject))'))
return self.cache_zone_keypairs
if __name__ == '__main__':
# this is debugging mode
# print information we think are useful to stdout
# other garbage goes via logger to stderr
ipa_log_manager.standard_logging_setup(debug=True)
# IPA framework initialization
# no logging to file
ipalib.api.bootstrap(in_server=True, log=None, confdir=paths.ETC_IPA)
ipalib.api.finalize()
# LDAP initialization
dns_dn = DN(ipalib.api.env.container_dns, ipalib.api.env.basedn)
ldap = ipaldap.LDAPClient(ipalib.api.env.ldap_uri)
logger.debug('Connecting to LDAP')
# GSSAPI will be used, used has to be kinited already
ldap.gssapi_bind()
logger.debug('Connected')
ldapkeydb = LdapKeyDB(ldap, DN(('cn', 'keys'),
('cn', 'sec'),
ipalib.api.env.container_dns,
ipalib.api.env.basedn))
print('replica public keys: CKA_WRAP = TRUE')
print('====================================')
for pubkey_id, pubkey in ldapkeydb.replica_pubkeys_wrap.items():
print(str_hexlify(pubkey_id))
pprint(pubkey)
print('')
print('master keys')
print('===========')
for mkey_id, mkey in ldapkeydb.master_keys.items():
print(str_hexlify(mkey_id))
pprint(mkey)
print('')
print('zone key pairs')
print('==============')
for key_id, key in ldapkeydb.zone_keypairs.items():
print(str_hexlify(key_id))
pprint(key)
| 16,641
|
Python
|
.py
| 412
| 30.036408
| 145
| 0.588447
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,727
|
bindmgr.py
|
freeipa_freeipa/ipaserver/dnssec/bindmgr.py
|
#
# Copyright (C) 2014 FreeIPA Contributors see COPYING for license
#
from __future__ import absolute_import
from datetime import datetime
import logging
import dns.name
import errno
import os
import shutil
import stat
import six
import ipalib.constants
from ipapython.dn import DN
from ipapython import ipautil
from ipaplatform.constants import constants as platformconstants
from ipaplatform.paths import paths
from ipaserver.dnssec.temp import TemporaryDirectory
from ipaserver.install import installutils
logger = logging.getLogger(__name__)
time_bindfmt = '%Y%m%d%H%M%S'
# this daemon should run under ods:named user:group
# user has to be ods because ODSMgr.py sends signal to ods-enforcerd
FILE_PERM = (stat.S_IRUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IWUSR)
DIR_PERM = (stat.S_IRWXU | stat.S_IRWXG)
class BINDMgr:
"""BIND key manager. It does LDAP->BIND key files synchronization.
One LDAP object with idnsSecKey object class will produce
single pair of BIND key files.
"""
def __init__(self, api):
self.api = api
self.ldap_keys = {}
self.modified_zones = set()
def notify_zone(self, zone):
cmd = ['rndc', 'sign', zone.to_text()]
result = ipautil.run(cmd, capture_output=True)
logger.info('%s', result.output_log)
def dn2zone_name(self, dn):
"""cn=KSK-20140813162153Z-cede9e182fc4af76c4bddbc19123a565,cn=keys,idnsname=test,cn=dns,dc=ipa,dc=example"""
# verify that metadata object is under DNS sub-tree
dn = DN(dn)
container = DN(self.api.env.container_dns, self.api.env.basedn)
idx = dn.rfind(container)
assert idx != -1, 'Metadata object %s is not inside %s' % (dn, container)
assert len(dn[idx - 1]) == 1, 'Multi-valued RDN as zone name is not supported'
return dns.name.from_text(dn[idx - 1]['idnsname'])
def time_ldap2bindfmt(self, str_val):
if isinstance(str_val, bytes):
str_val = str_val.decode('utf-8')
dt = datetime.strptime(
str_val,
ipalib.constants.LDAP_GENERALIZED_TIME_FORMAT
)
return dt.strftime(time_bindfmt).encode('utf-8')
def dates2params(self, ldap_attrs):
"""Convert LDAP timestamps to list of parameters suitable
for dnssec-keyfromlabel utility"""
attr2param = {'idnsseckeypublish': '-P',
'idnsseckeyactivate': '-A',
'idnsseckeyinactive': '-I',
'idnsseckeydelete': '-D'}
params = []
for attr, param in attr2param.items():
params.append(param)
if attr in ldap_attrs:
assert len(ldap_attrs[attr]) == 1, 'Timestamp %s is expected to be single-valued' % attr
params.append(self.time_ldap2bindfmt(ldap_attrs[attr][0]))
else:
params.append('none')
return params
def ldap_event(self, op, uuid, attrs):
"""Record single LDAP event - key addition, deletion or modification.
Change is only recorded to memory.
self.sync() has to be called to synchronize change to BIND."""
assert op in ('add', 'del', 'mod')
zone = self.dn2zone_name(attrs['dn'])
self.modified_zones.add(zone)
zone_keys = self.ldap_keys.setdefault(zone, {})
if op == 'add':
logger.info('Key metadata %s added to zone %s',
attrs['dn'], zone)
zone_keys[uuid] = attrs
elif op == 'del':
logger.info('Key metadata %s deleted from zone %s',
attrs['dn'], zone)
zone_keys.pop(uuid)
elif op == 'mod':
logger.info('Key metadata %s updated in zone %s',
attrs['dn'], zone)
zone_keys[uuid] = attrs
def install_key(self, zone, uuid, attrs, workdir):
"""Run dnssec-keyfromlabel on given LDAP object.
:returns: base file name of output files, e.g. Kaaa.test.+008+19719
"""
logger.info('attrs: %s', attrs)
assert attrs.get('idnsseckeyzone', [b'FALSE'])[0] == b'TRUE', \
b'object %s is not a DNS zone key' % attrs['dn']
uri = b"%s;pin-source=%s" % (
attrs['idnsSecKeyRef'][0],
paths.DNSSEC_SOFTHSM_PIN.encode('utf-8')
)
cmd = [
paths.DNSSEC_KEYFROMLABEL,
'-E', 'pkcs11',
'-K', workdir,
'-a', attrs['idnsSecAlgorithm'][0],
'-l', uri
]
cmd.extend(self.dates2params(attrs))
if attrs.get('idnsSecKeySep', [b'FALSE'])[0].upper() == b'TRUE':
cmd.extend(['-f', 'KSK'])
if attrs.get('idnsSecKeyRevoke', [b'FALSE'])[0].upper() == b'TRUE':
cmd.extend(['-R', datetime.now().strftime(time_bindfmt)])
if platformconstants.NAMED_OPENSSL_ENGINE is not None:
cmd.extend(['-E', platformconstants.NAMED_OPENSSL_ENGINE])
cmd.append(zone.to_text())
installutils.check_entropy()
# keys has to be readable by ODS & named
result = ipautil.run(cmd, capture_output=True)
basename = result.output.strip()
private_fn = "%s/%s.private" % (workdir, basename)
os.chmod(private_fn, FILE_PERM)
# this is useful mainly for debugging
with open("%s/%s.uuid" % (workdir, basename), 'w') as uuid_file:
uuid_file.write(uuid)
with open("%s/%s.dn" % (workdir, basename), 'w') as dn_file:
dn_file.write(attrs['dn'])
def get_zone_dir_name(self, zone):
"""Escape zone name to form suitable for file-system.
This method has to be equivalent to zr_get_zone_path()
in bind-dyndb-ldap/zone_register.c."""
if zone == dns.name.root:
return "@"
# strip final (empty) label
zone = zone.relativize(dns.name.root)
escaped = []
for label in zone:
for char in label:
if six.PY3:
char = chr(char)
if char.isalnum() or char in "-_":
escaped.append(char.lower())
else:
escaped.append("%%%02X" % ord(char))
escaped.append('.')
# strip trailing period
return ''.join(escaped[:-1])
def sync_zone(self, zone):
logger.info('Synchronizing zone %s', zone)
zone_path = os.path.join(paths.BIND_LDAP_DNS_ZONE_WORKDIR,
self.get_zone_dir_name(zone))
try:
os.mkdir(zone_path, 0o770)
except FileExistsError:
pass
# fix HSM permissions
# TODO: move out
for prefix, dirs, files in os.walk(paths.DNSSEC_TOKENS_DIR, topdown=True):
for name in dirs:
fpath = os.path.join(prefix, name)
logger.debug('Fixing directory permissions: %s', fpath)
os.chmod(fpath, DIR_PERM | stat.S_ISGID)
for name in files:
fpath = os.path.join(prefix, name)
logger.debug('Fixing file permissions: %s', fpath)
os.chmod(fpath, FILE_PERM)
# TODO: move out
with TemporaryDirectory(zone_path) as tempdir:
for uuid, attrs in self.ldap_keys[zone].items():
self.install_key(zone, uuid, attrs, tempdir)
# keys were generated in a temporary directory, swap directories
target_dir = "%s/keys" % zone_path
try:
shutil.rmtree(target_dir)
except OSError as e:
if e.errno != errno.ENOENT:
raise e
shutil.move(tempdir, target_dir)
os.chmod(target_dir, DIR_PERM)
self.notify_zone(zone)
def sync(self, dnssec_zones):
"""Synchronize list of zones in LDAP with BIND.
dnssec_zones lists zones which should be processed. All other zones
will be ignored even though they were modified using ldap_event().
This filter is useful in cases where LDAP contains DNS zones which
have old metadata objects and DNSSEC disabled. Such zones must be
ignored to prevent errors while calling dnssec-keyfromlabel or rndc.
"""
logger.debug('Key metadata in LDAP: %s', self.ldap_keys)
logger.debug('Zones modified but skipped during bindmgr.sync: %s',
self.modified_zones - dnssec_zones)
for zone in self.modified_zones.intersection(dnssec_zones):
self.sync_zone(zone)
self.modified_zones = set()
def diff_zl(self, s1, s2):
"""Compute zones present in s1 but not present in s2.
Returns: List of (uuid, name) tuples with zones present only in s1."""
s1_extra = s1.uuids - s2.uuids
removed = [(uuid, name) for (uuid, name) in s1.mapping.items()
if uuid in s1_extra]
return removed
| 8,934
|
Python
|
.py
| 202
| 34.212871
| 116
| 0.596709
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,728
|
syncrepl.py
|
freeipa_freeipa/ipaserver/dnssec/syncrepl.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 FreeIPA Contributors see COPYING for license
#
"""
This script implements a syncrepl consumer which syncs data from server
to a local dict.
"""
import logging
import ldap
from ldap.cidict import cidict
from ldap.ldapobject import ReconnectLDAPObject
from ldap.syncrepl import SyncreplConsumer
logger = logging.getLogger(__name__)
class SyncReplConsumer(ReconnectLDAPObject, SyncreplConsumer):
"""
Syncrepl Consumer interface
"""
def __init__(self, *args, **kwargs):
# Initialise the LDAP Connection first
ldap.ldapobject.ReconnectLDAPObject.__init__(self, *args, **kwargs)
# Now prepare the data store
self.__data = cidict()
self.__data['uuids'] = cidict()
# We need this for later internal use
self.__presentUUIDs = cidict()
def close_db(self):
# This is useless for dict
pass
def syncrepl_get_cookie(self):
if 'cookie' in self.__data:
cookie = self.__data['cookie']
logger.debug('Current cookie is: %s', cookie)
return cookie
else:
logger.debug('Current cookie is: None (not received yet)')
return None
def syncrepl_set_cookie(self, cookie):
logger.debug('New cookie is: %s', cookie)
self.__data['cookie'] = cookie
def syncrepl_entry(self, dn, attrs, uuid):
attributes = cidict(attrs)
# First we determine the type of change we have here
# (and store away the previous data for later if needed)
previous_attributes = cidict()
if uuid in self.__data['uuids']:
change_type = 'modify'
previous_attributes = self.__data['uuids'][uuid]
else:
change_type = 'add'
# Now we store our knowledge of the existence of this entry
# (including the DN as an attribute for convenience)
attributes['dn'] = dn
self.__data['uuids'][uuid] = attributes
# Debugging
logger.debug('Detected %s of entry: %s %s', change_type, dn, uuid)
if change_type == 'modify':
self.application_sync(uuid, dn, attributes, previous_attributes)
else:
self.application_add(uuid, dn, attributes)
def syncrepl_delete(self, uuids):
# Make sure we know about the UUID being deleted, just in case...
uuids = [uuid for uuid in uuids if uuid in self.__data['uuids']]
# Delete all the UUID values we know of
for uuid in uuids:
attributes = self.__data['uuids'][uuid]
dn = attributes['dn']
logger.debug('Detected deletion of entry: %s %s', dn, uuid)
self.application_del(uuid, dn, attributes)
del self.__data['uuids'][uuid]
def syncrepl_present(self, uuids, refreshDeletes=False):
# If we have not been given any UUID values,
# then we have received all the present controls...
if uuids is None:
# We only do things if refreshDeletes is false
# as the syncrepl extension will call syncrepl_delete instead
# when it detects a delete notice
if refreshDeletes is False:
deletedEntries = [uuid for uuid in self.__data['uuids'].keys()
if uuid not in self.__presentUUIDs]
self.syncrepl_delete(deletedEntries)
# Phase is now completed, reset the list
self.__presentUUIDs = {}
else:
# Note down all the UUIDs we have been sent
for uuid in uuids:
self.__presentUUIDs[uuid] = True
def application_add(self, uuid, dn, attributes):
logger.info('Performing application add for: %s %s', dn, uuid)
logger.debug('New attributes: %s', attributes)
return True
def application_sync(self, uuid, dn, attributes, previous_attributes):
logger.info('Performing application sync for: %s %s', dn, uuid)
logger.debug('Old attributes: %s', previous_attributes)
logger.debug('New attributes: %s', attributes)
return True
def application_del(self, uuid, dn, previous_attributes):
logger.info('Performing application delete for: %s %s', dn, uuid)
logger.debug('Old attributes: %s', previous_attributes)
return True
| 4,368
|
Python
|
.py
| 100
| 34.83
| 78
| 0.625676
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,729
|
keysyncer.py
|
freeipa_freeipa/ipaserver/dnssec/keysyncer.py
|
#
# Copyright (C) 2014 FreeIPA Contributors see COPYING for license
#
from __future__ import absolute_import
import logging
import ldap.dn
import os
import dns.name
from ipaplatform.paths import paths
from ipapython import ipautil
from ipaserver.dnssec.syncrepl import SyncReplConsumer
from ipaserver.dnssec.odsmgr import ODSMgr
from ipaserver.dnssec.bindmgr import BINDMgr
logger = logging.getLogger(__name__)
SIGNING_ATTR = 'idnsSecInlineSigning'
OBJCLASS_ATTR = 'objectClass'
class KeySyncer(SyncReplConsumer):
def __init__(self, *args, **kwargs):
# hack
self.api = kwargs['ipa_api']
del kwargs['ipa_api']
# DNSSEC master should have OpenDNSSEC installed
# TODO: Is this the best way?
if os.environ.get('ISMASTER', '0') == '1':
self.ismaster = True
self.odsmgr = ODSMgr()
else:
self.ismaster = False
self.bindmgr = BINDMgr(self.api)
self.init_done = False
self.dnssec_zones = set()
SyncReplConsumer.__init__(self, *args, **kwargs)
def _get_objclass(self, attrs):
"""Get object class.
Given set of attributes has to have exactly one supported object class.
"""
supported_objclasses = {b'idnszone', b'idnsseckey', b'ipk11publickey'}
present_objclasses = set(
o.lower() for o in attrs[OBJCLASS_ATTR]
).intersection(
supported_objclasses
)
assert len(present_objclasses) == 1, attrs[OBJCLASS_ATTR]
return present_objclasses.pop()
def __get_signing_attr(self, attrs):
"""Get SIGNING_ATTR from dictionary with LDAP zone attributes.
Returned value is normalized to TRUE or FALSE, defaults to FALSE."""
values = attrs.get(SIGNING_ATTR, [b'FALSE'])
assert len(values) == 1, '%s is expected to be single-valued' \
% SIGNING_ATTR
return values[0].upper()
def __is_dnssec_enabled(self, attrs):
"""Test if LDAP DNS zone with given attributes is DNSSEC enabled."""
return self.__get_signing_attr(attrs) == b'TRUE'
def __is_replica_pubkey(self, attrs):
vals = attrs.get('ipk11label', [])
if len(vals) != 1:
return False
return vals[0].startswith(b'dnssec-replica:')
def application_add(self, uuid, dn, attributes):
objclass = self._get_objclass(attributes)
if objclass == b'idnszone':
self.zone_add(uuid, dn, attributes)
elif objclass == b'idnsseckey':
self.key_meta_add(uuid, dn, attributes)
elif objclass == b'ipk11publickey' and \
self.__is_replica_pubkey(attributes):
self.hsm_master_sync()
def application_del(self, uuid, dn, previous_attributes):
objclass = self._get_objclass(previous_attributes)
if objclass == b'idnszone':
self.zone_del(uuid, dn, previous_attributes)
elif objclass == b'idnsseckey':
self.key_meta_del(uuid, dn, previous_attributes)
elif objclass == b'ipk11publickey' and \
self.__is_replica_pubkey(previous_attributes):
self.hsm_master_sync()
def application_sync(self, uuid, dn, attributes, previous_attributes):
objclass = self._get_objclass(previous_attributes)
if objclass == b'idnszone':
olddn = ldap.dn.str2dn(previous_attributes['dn'])
newdn = ldap.dn.str2dn(attributes['dn'])
assert olddn == newdn, 'modrdn operation is not supported'
oldval = self.__get_signing_attr(previous_attributes)
newval = self.__get_signing_attr(attributes)
if oldval != newval:
if self.__is_dnssec_enabled(attributes):
self.zone_add(uuid, olddn, attributes)
else:
self.zone_del(uuid, olddn, previous_attributes)
elif objclass == b'idnsseckey':
self.key_metadata_sync(uuid, dn, previous_attributes, attributes)
elif objclass == b'ipk11publickey' and \
self.__is_replica_pubkey(attributes):
self.hsm_master_sync()
def syncrepl_refreshdone(self):
logger.info('Initial LDAP dump is done, sychronizing with ODS and '
'BIND')
self.init_done = True
self.ods_sync()
self.hsm_replica_sync()
self.hsm_master_sync()
self.bindmgr.sync(self.dnssec_zones)
# idnsSecKey wrapper
# Assumption: metadata points to the same key blob all the time,
# i.e. it is not necessary to re-download blobs because of change in DNSSEC
# metadata - DNSSEC flags or timestamps.
def key_meta_add(self, uuid, dn, newattrs):
self.hsm_replica_sync()
self.bindmgr.ldap_event('add', uuid, newattrs)
self.bindmgr_sync(self.dnssec_zones)
def key_meta_del(self, uuid, dn, oldattrs):
self.bindmgr.ldap_event('del', uuid, oldattrs)
self.bindmgr_sync(self.dnssec_zones)
self.hsm_replica_sync()
def key_metadata_sync(self, uuid, dn, oldattrs, newattrs):
self.bindmgr.ldap_event('mod', uuid, newattrs)
self.bindmgr_sync(self.dnssec_zones)
def bindmgr_sync(self, dnssec_zones):
if self.init_done:
self.bindmgr.sync(dnssec_zones)
# idnsZone wrapper
def zone_add(self, uuid, dn, newattrs):
zone = dns.name.from_text(newattrs['idnsname'][0])
if self.__is_dnssec_enabled(newattrs):
self.dnssec_zones.add(zone)
else:
self.dnssec_zones.discard(zone)
if not self.ismaster:
return
if self.__is_dnssec_enabled(newattrs):
self.odsmgr.ldap_event('add', uuid, newattrs)
self.ods_sync()
def zone_del(self, uuid, dn, oldattrs):
zone = dns.name.from_text(oldattrs['idnsname'][0])
self.dnssec_zones.discard(zone)
if not self.ismaster:
return
if self.__is_dnssec_enabled(oldattrs):
self.odsmgr.ldap_event('del', uuid, oldattrs)
self.ods_sync()
def ods_sync(self):
if not self.ismaster:
return
if self.init_done:
self.odsmgr.sync()
# triggered by modification to idnsSecKey objects
def hsm_replica_sync(self):
"""Download keys from LDAP to local HSM."""
if self.ismaster:
return
if not self.init_done:
return
ipautil.run([paths.IPA_DNSKEYSYNCD_REPLICA])
# triggered by modification to ipk11PublicKey objects
def hsm_master_sync(self):
"""Download replica keys from LDAP to local HSM
& upload master and zone keys to LDAP."""
if not self.ismaster:
return
if not self.init_done:
return
ipautil.run([paths.ODS_SIGNER, 'ipa-hsm-update'])
| 6,887
|
Python
|
.py
| 163
| 33.159509
| 79
| 0.627524
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,730
|
abshsm.py
|
freeipa_freeipa/ipaserver/dnssec/abshsm.py
|
#
# Copyright (C) 2014 FreeIPA Contributors see COPYING for license
#
import logging
from ipaserver import p11helper as _ipap11helper
logger = logging.getLogger(__name__)
attrs_id2name = {
#_ipap11helper.CKA_ALLOWED_MECHANISMS: 'ipk11allowedmechanisms',
_ipap11helper.CKA_ALWAYS_AUTHENTICATE: 'ipk11alwaysauthenticate',
_ipap11helper.CKA_ALWAYS_SENSITIVE: 'ipk11alwayssensitive',
#_ipap11helper.CKA_CHECK_VALUE: 'ipk11checkvalue',
_ipap11helper.CKA_COPYABLE: 'ipk11copyable',
_ipap11helper.CKA_DECRYPT: 'ipk11decrypt',
_ipap11helper.CKA_DERIVE: 'ipk11derive',
#_ipap11helper.CKA_DESTROYABLE: 'ipk11destroyable',
_ipap11helper.CKA_ENCRYPT: 'ipk11encrypt',
#_ipap11helper.CKA_END_DATE: 'ipk11enddate',
_ipap11helper.CKA_EXTRACTABLE: 'ipk11extractable',
_ipap11helper.CKA_ID: 'ipk11id',
#_ipap11helper.CKA_KEY_GEN_MECHANISM: 'ipk11keygenmechanism',
_ipap11helper.CKA_KEY_TYPE: 'ipk11keytype',
_ipap11helper.CKA_LABEL: 'ipk11label',
_ipap11helper.CKA_LOCAL: 'ipk11local',
_ipap11helper.CKA_MODIFIABLE: 'ipk11modifiable',
_ipap11helper.CKA_NEVER_EXTRACTABLE: 'ipk11neverextractable',
_ipap11helper.CKA_PRIVATE: 'ipk11private',
#_ipap11helper.CKA_PUBLIC_KEY_INFO: 'ipapublickey',
#_ipap11helper.CKA_PUBLIC_KEY_INFO: 'ipk11publickeyinfo',
_ipap11helper.CKA_SENSITIVE: 'ipk11sensitive',
_ipap11helper.CKA_SIGN: 'ipk11sign',
_ipap11helper.CKA_SIGN_RECOVER: 'ipk11signrecover',
#_ipap11helper.CKA_START_DATE: 'ipk11startdate',
#_ipap11helper.CKA_SUBJECT: 'ipk11subject',
_ipap11helper.CKA_TRUSTED: 'ipk11trusted',
_ipap11helper.CKA_UNWRAP: 'ipk11unwrap',
#_ipap11helper.CKA_UNWRAP_TEMPLATE: 'ipk11unwraptemplate',
_ipap11helper.CKA_VERIFY: 'ipk11verify',
_ipap11helper.CKA_VERIFY_RECOVER: 'ipk11verifyrecover',
_ipap11helper.CKA_WRAP: 'ipk11wrap',
#_ipap11helper.CKA_WRAP_TEMPLATE: 'ipk11wraptemplate',
_ipap11helper.CKA_WRAP_WITH_TRUSTED: 'ipk11wrapwithtrusted',
}
attrs_name2id = {v: k for k, v in attrs_id2name.items()}
# attribute:
# http://www.freeipa.org/page/V4/PKCS11_in_LDAP/Schema#ipk11KeyType
#
# mapping table:
# http://www.freeipa.org/page/V4/PKCS11_in_LDAP/Schema#CK_MECHANISM_TYPE
keytype_name2id = {
"rsa": _ipap11helper.KEY_TYPE_RSA,
"aes": _ipap11helper.KEY_TYPE_AES,
}
keytype_id2name = {v: k for k, v in keytype_name2id.items()}
wrappingmech_name2id = {
"rsaPkcs": _ipap11helper.MECH_RSA_PKCS,
"rsaPkcsOaep": _ipap11helper.MECH_RSA_PKCS_OAEP,
"aesKeyWrap": _ipap11helper.MECH_AES_KEY_WRAP,
"aesKeyWrapPad": _ipap11helper.MECH_AES_KEY_WRAP_PAD
}
wrappingmech_id2name = {v: k for k, v in wrappingmech_name2id.items()}
bool_attr_names = set([
'ipk11alwaysauthenticate',
'ipk11alwayssensitive',
'ipk11copyable',
'ipk11decrypt',
'ipk11derive',
'ipk11encrypt',
'ipk11extractable',
'ipk11local',
'ipk11modifiable',
'ipk11neverextractable',
'ipk11private',
'ipk11sensitive',
'ipk11sign',
'ipk11signrecover',
'ipk11trusted',
'ipk11unwrap',
'ipk11verify',
'ipk11verifyrecover',
'ipk11wrap',
'ipk11wrapwithtrusted',
])
modifiable_attrs_id2name = {
_ipap11helper.CKA_DECRYPT: 'ipk11decrypt',
_ipap11helper.CKA_DERIVE: 'ipk11derive',
_ipap11helper.CKA_ENCRYPT: 'ipk11encrypt',
_ipap11helper.CKA_EXTRACTABLE: 'ipk11extractable',
_ipap11helper.CKA_ID: 'ipk11id',
_ipap11helper.CKA_LABEL: 'ipk11label',
_ipap11helper.CKA_SENSITIVE: 'ipk11sensitive',
_ipap11helper.CKA_SIGN: 'ipk11sign',
_ipap11helper.CKA_SIGN_RECOVER: 'ipk11signrecover',
_ipap11helper.CKA_UNWRAP: 'ipk11unwrap',
_ipap11helper.CKA_VERIFY: 'ipk11verify',
_ipap11helper.CKA_VERIFY_RECOVER: 'ipk11verifyrecover',
_ipap11helper.CKA_WRAP: 'ipk11wrap',
}
modifiable_attrs_name2id = {v: k for k, v in modifiable_attrs_id2name.items()}
def sync_pkcs11_metadata(name, source, target):
"""sync ipk11 metadata from source object to target object"""
# iterate over list of modifiable PKCS#11 attributes - this prevents us
# from attempting to set read-only attributes like CKA_LOCAL
for attr in modifiable_attrs_name2id:
if attr in source:
if source[attr] != target[attr]:
logger.debug('%s: Updating attribute %s from "%s" to "%s"',
name,
attr,
repr(source[attr]),
repr(target[attr]))
target[attr] = source[attr]
def populate_pkcs11_metadata(source, target):
"""populate all ipk11 metadata attributes in target object from source object"""
for attr in attrs_name2id:
if attr in source:
target[attr] = source[attr]
def ldap2p11helper_api_params(ldap_key):
"""prepare dict with metadata parameters suitable for key unwrapping"""
unwrap_params = {}
# some attributes are just renamed
direct_param_map = {
"ipk11label": "label",
"ipk11id": "id",
"ipk11copyable": "cka_copyable",
"ipk11decrypt": "cka_decrypt",
"ipk11derive": "cka_derive",
"ipk11encrypt": "cka_encrypt",
"ipk11extractable": "cka_extractable",
"ipk11modifiable": "cka_modifiable",
"ipk11private": "cka_private",
"ipk11sensitive": "cka_sensitive",
"ipk11sign": "cka_sign",
"ipk11unwrap": "cka_unwrap",
"ipk11verify": "cka_verify",
"ipk11wrap": "cka_wrap",
"ipk11wrapwithtrusted": "cka_wrap_with_trusted"
}
for ldap_name, p11h_name in direct_param_map.items():
if ldap_name in ldap_key:
unwrap_params[p11h_name] = ldap_key[ldap_name]
# and some others needs conversion
indirect_param_map = {
"ipk11keytype": ("key_type", keytype_name2id),
"ipawrappingmech": ("wrapping_mech", wrappingmech_name2id),
}
for ldap_name, rules in indirect_param_map.items():
p11h_name, mapping = rules
if ldap_name in ldap_key:
unwrap_params[p11h_name] = mapping[ldap_key[ldap_name]]
return unwrap_params
class AbstractHSM:
def _filter_replica_keys(self, all_keys):
replica_keys = {}
for key_id, key in all_keys.items():
if not key['ipk11label'].startswith('dnssec-replica:'):
continue
replica_keys[key_id] = key
return replica_keys
def _filter_zone_keys(self, all_keys):
zone_keys = {}
for key_id, key in all_keys.items():
if key['ipk11label'] == u'dnssec-master' \
or key['ipk11label'].startswith('dnssec-replica:'):
continue
zone_keys[key_id] = key
return zone_keys
| 6,891
|
Python
|
.py
| 166
| 34.277108
| 84
| 0.665124
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,731
|
opendnssec.py
|
freeipa_freeipa/ipaserver/dnssec/opendnssec.py
|
#
# Copyright (C) 2020 FreeIPA Contributors see COPYING for license
#
import os
from ipaplatform.paths import paths
# pylint: disable=unused-import
if paths.ODS_KSMUTIL is not None and os.path.exists(paths.ODS_KSMUTIL):
from ._ods14 import ODSDBConnection, ODSSignerConn, ODSTask
else:
from ._ods21 import ODSDBConnection, ODSSignerConn, ODSTask
tasks = ODSTask()
| 376
|
Python
|
.py
| 11
| 32.181818
| 71
| 0.798343
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,732
|
__init__.py
|
freeipa_freeipa/ipaserver/advise/__init__.py
|
# Authors: Tomas Babej <tbabej@redhat.com>
#
# Copyright (C) 2013 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Base subpackage for ipa-advise related code.
"""
| 821
|
Python
|
.py
| 21
| 38.047619
| 71
| 0.772215
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,733
|
base.py
|
freeipa_freeipa/ipaserver/advise/base.py
|
# Authors: Tomas Babej <tbabej@redhat.com>
#
# Copyright (C) 2013 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function, absolute_import
from contextlib import contextmanager
import logging
import os
from textwrap import wrap
from ipalib import api
from ipalib.plugable import Plugin, API
from ipalib.errors import ValidationError
from ipaplatform.paths import paths
from ipapython import admintool
from ipapython.ipa_log_manager import Filter
from ipaserver.install import installutils
"""
To add configuration instructions for a new use case, define a new class that
inherits from Advice class.
You should create a plugin file for it in ipaserver/advise/plugins folder.
The class can run any arbitrary code or IPA command via api.Command['command']()
calls. It needs to override get_info() method, which returns the formatted
advice string.
Important! Do not forget to register the class to the API.
>>> @register()
>>> class sample_advice(Advice):
>>> description = 'Instructions for machine with SSSD 1.0 setup.'
Description provided shows itself as a header and in the list of all advices
currently available via ipa-advise.
Optionally, you can require root privileges for your plugin:
>>> require_root = True
The following method should be implemented in your plugin:
>>> def get_info():
>>> self.log.debug('Entering execute() method')
>>> self.log.comment('Providing useful advice just for you')
>>> self.log.command('yum update sssd -y')
As you can see, Advice's log has 3 different levels. Debug lines are printed
out with '# DEBUG:' prefix if --verbose had been used. Comment lines utilize
'# ' prefix and command lines are printed raw.
Please note that comments are automatically wrapped after 70 characters.
Use wrapped=False option to force the unwrapped line in the comment.
>>> self.log.comment("This line should not be wrapped", wrapped=False)
As a result, you can redirect the advice's output directly to a script file.
# ipa-advise sample-advice > script.sh
# ./script.sh
"""
DEFAULT_INDENTATION_INCREMENT = 2
class _IndentationTracker:
"""
A simple wrapper that tracks the indentation level of the generated bash
commands
"""
def __init__(self, spaces_per_indent=0):
if spaces_per_indent <= 0:
raise ValueError(
"Indentation increments cannot be zero or negative")
self.spaces_per_indent = spaces_per_indent
self._indentation_stack = []
self._total_indentation_level = 0
@property
def indentation_string(self):
"""
return a string containing number of spaces corresponding to
indentation level
"""
return " " * self._total_indentation_level
def indent(self):
"""
track a single indentation of the generated code
"""
self._indentation_stack.append(self.spaces_per_indent)
self._recompute_indentation_level()
def _recompute_indentation_level(self):
"""
Track total indentation level of the generated code
"""
self._total_indentation_level = sum(self._indentation_stack)
def dedent(self):
"""
track a single dedentation of the generated code
dedents that would result in zero or negative indentation level will be
ignored
"""
try:
self._indentation_stack.pop()
except IndexError:
# can not dedent any further
pass
self._recompute_indentation_level()
class CompoundStatement:
"""
Wrapper around indented blocks of Bash statements.
Override `begin_statement` and `end_statement` methods to issue
opening/closing commands using the passed in _AdviceOutput instance
"""
def __init__(self, advice_output):
self.advice_output = advice_output
def __enter__(self):
self.begin_statement()
self.advice_output.indent()
def begin_statement(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
self.advice_output.dedent()
self.end_statement()
def end_statement(self):
pass
class IfBranch(CompoundStatement):
"""
Base wrapper around `if` branch. The closing statement is empty so it
leaves trailing block that can be closed off or continued by else branches
"""
def __init__(self, advice_output, conditional):
super(IfBranch, self).__init__(advice_output)
self.conditional = conditional
def begin_statement(self):
self.advice_output.command('if {}'.format(self.conditional))
self.advice_output.command('then')
class ElseIfBranch(CompoundStatement):
"""
Wrapper for `else if <CONDITIONAL>`
"""
def __init__(self, advice_output, alternative_conditional):
super(ElseIfBranch, self).__init__(advice_output)
self.alternative_conditional = alternative_conditional
def begin_statement(self):
command = 'else if {}'.format(self.alternative_conditional)
self.advice_output.command(command)
class ElseBranch(CompoundStatement):
"""
Wrapper for final `else` block
"""
def begin_statement(self):
self.advice_output.command('else')
def end_statement(self):
self.advice_output.command('fi')
class UnbranchedIfStatement(IfBranch):
"""
Plain `if` without branches
"""
def end_statement(self):
self.advice_output.command('fi')
class ForLoop(CompoundStatement):
"""
Wrapper around the for loop
"""
def __init__(self, advice_output, loop_variable, iterable):
super(ForLoop, self).__init__(advice_output)
self.loop_variable = loop_variable
self.iterable = iterable
def begin_statement(self):
self.advice_output.command(
'for {} in {}'.format(self.loop_variable, self.iterable))
self.advice_output.command('do')
def end_statement(self):
self.advice_output.command('done')
class _AdviceOutput:
def __init__(self):
self.content = []
self.prefix = '# '
self.options = None
self.pkgmgr_detected = False
self._indentation_tracker = _IndentationTracker(
spaces_per_indent=DEFAULT_INDENTATION_INCREMENT)
def indent(self):
"""
Indent the statements by one level
"""
self._indentation_tracker.indent()
def dedent(self):
"""
Dedent the statements by one level
"""
self._indentation_tracker.dedent()
@contextmanager
def indented_block(self):
self.indent()
try:
yield
finally:
self.dedent()
def comment(self, line, wrapped=True):
if wrapped:
self.append_wrapped_and_indented_comment(line)
else:
self.append_comment(line)
def append_wrapped_and_indented_comment(self, line, character_limit=70):
"""
append wrapped and indented comment to the output
"""
for wrapped_indented_line in wrap(
self.indent_statement(line), character_limit):
self.append_comment(wrapped_indented_line)
def append_comment(self, line):
self.append_statement(self.prefix + line)
def append_statement(self, statement):
"""
Append a line to the generated content indenting it by tracked number
of spaces
"""
self.content.append(self.indent_statement(statement))
def indent_statement(self, statement):
return '{indent}{statement}'.format(
indent=self._indentation_tracker.indentation_string,
statement=statement)
def debug(self, line):
if self.options.verbose:
self.comment('DEBUG: ' + line)
def command(self, line):
self.append_statement(line)
def echo_error(self, error_message):
self.command(self._format_error(error_message))
def _format_error(self, error_message):
return 'echo "{}" >&2'.format(error_message)
def exit_on_failed_command(self, command_to_run,
error_message_lines):
self.command(command_to_run)
self.exit_on_predicate(
'[ "$?" -ne "0" ]',
error_message_lines)
def exit_on_nonroot_euid(self):
self.exit_on_predicate(
'[ "$(id -u)" -ne "0" ]',
["This script has to be run as root user"]
)
def exit_on_predicate(self, predicate, error_message_lines):
with self.unbranched_if(predicate):
for error_message_line in error_message_lines:
self.command(self._format_error(error_message_line))
self.command('exit 1')
def detect_pkgmgr(self):
self.commands_on_predicate(
'which yum >/dev/null',
commands_to_run_when_true=['PKGMGR=yum'],
commands_to_run_when_false=['PKGMGR=dnf']
)
self.pkgmgr_detected = True
def install_packages(self, names, error_message_lines):
assert isinstance(names, list)
self.detect_pkgmgr()
self.command('rpm -qi {} > /dev/null'.format(' '.join(names)))
self.commands_on_predicate(
'[ "$?" -ne "0" ]',
['$PKGMGR install -y {}'.format(' '.join(names))]
)
self.exit_on_predicate(
'[ "$?" -ne "0" ]',
error_message_lines
)
def remove_package(self, name, error_message_lines):
# remove only supports one package name
assert ' ' not in name
self.detect_pkgmgr()
self.command('rpm -qi {} > /dev/null'.format(name))
self.commands_on_predicate(
'[ "$?" -eq "0" ]',
['$PKGMGR remove -y {} || exit 1'.format(name)]
)
self.exit_on_predicate(
'[ "$?" -ne "0" ]',
error_message_lines
)
@contextmanager
def unbranched_if(self, predicate):
with self._compound_statement(UnbranchedIfStatement, predicate):
yield
@contextmanager
def _compound_statement(self, statement_cls, *args):
with statement_cls(self, *args):
yield
def commands_on_predicate(self, predicate, commands_to_run_when_true,
commands_to_run_when_false=None):
if commands_to_run_when_false is not None:
if_statement = self.if_branch
else:
if_statement = self.unbranched_if
with if_statement(predicate):
for command_to_run_when_true in commands_to_run_when_true:
self.command(
command_to_run_when_true)
if commands_to_run_when_false is not None:
with self.else_branch():
for command_to_run_when_false in commands_to_run_when_false:
self.command(command_to_run_when_false)
@contextmanager
def if_branch(self, predicate):
with self._compound_statement(IfBranch, predicate):
yield
@contextmanager
def else_branch(self):
with self._compound_statement(ElseBranch):
yield
@contextmanager
def else_if_branch(self, predicate):
with self._compound_statement(ElseIfBranch, predicate):
yield
@contextmanager
def for_loop(self, loop_variable, iterable):
with self._compound_statement(ForLoop, loop_variable, iterable):
yield
class Advice(Plugin):
"""
Base class for advices, plugins for ipa-advise.
"""
options = None
require_root = False
description = ''
def __init__(self, api):
super(Advice, self).__init__(api)
self.log = _AdviceOutput()
def set_options(self, options):
self.options = options
self.log.options = options
def get_info(self):
"""
This method should be overridden by child Advices.
Returns a string with instructions.
"""
raise NotImplementedError
class AdviseAPI(API):
bases = (Advice,)
@property
def packages(self):
import ipaserver.advise.plugins
return (ipaserver.advise.plugins,)
advise_api = AdviseAPI()
class IpaAdvise(admintool.AdminTool):
"""
Admin tool that given systems's configuration provides instructions how to
configure the systems for various use cases.
"""
command_name = 'ipa-advise'
usage = "%prog ADVICE"
description = "Provides configuration advice for various use cases. To "\
"see the list of possible ADVICEs, run ipa-advise without "\
"any arguments."
def __init__(self, options, args):
super(IpaAdvise, self).__init__(options, args)
@classmethod
def add_options(cls, parser):
super(IpaAdvise, cls).add_options(parser, debug_option=True)
def validate_options(self):
super(IpaAdvise, self).validate_options(needs_root=False)
installutils.check_server_configuration()
if len(self.args) > 1:
# pylint: disable=raising-bad-type, #4772
raise self.option_parser.error("You can only provide one "
"positional argument.")
def log_success(self):
pass
def print_config_list(self):
self.print_header('List of available advices')
max_keyword_len = max(
(len(advice.name) for advice in advise_api.Advice))
for advice in advise_api.Advice:
description = getattr(advice, 'description', '')
keyword = advice.name.replace('_', '-')
# Compute the number of spaces needed for the table to be aligned
offset = max_keyword_len - len(keyword)
prefix = " {key} {off}: ".format(key=keyword, off=' ' * offset)
wrapped_description = wrap(description, 80 - len(prefix))
# Print the first line with the prefix (keyword)
print(prefix + wrapped_description[0])
# Print the rest wrapped behind the colon
for line in wrapped_description[1:]:
print("{off}{line}".format(off=' ' * len(prefix), line=line))
def print_header(self, header, print_shell=False):
header_size = len(header)
prefix = ''
if print_shell:
prefix = '# '
print('#!/bin/sh')
# Do not print out empty header
if header_size > 0:
print((prefix + '-' * 70))
for line in wrap(header, 70):
print((prefix + line))
print((prefix + '-' * 70))
def print_advice(self, keyword):
advice = getattr(advise_api.Advice, keyword, None)
# Ensure that Configuration class for given --setup option value exists
if advice is None:
raise ValidationError(
name="advice",
error="No instructions are available for '{con}'. "
"See the list of available configuration "
"by invoking the ipa-advise command with no argument."
.format(con=keyword.replace('_', '-')))
# Check whether root privileges are needed
if advice.require_root and os.getegid() != 0:
raise admintool.ScriptError(
'Must be root to get advice for {adv}'
.format(adv=keyword.replace('_', '-')), 1)
# Print out nicely formatted header
self.print_header(advice.description, print_shell=True)
# Set options so that plugin can use verbose/quiet options
advice.set_options(self.options)
# Print out the actual advice
api.Backend.rpcclient.connect()
advice.get_info()
api.Backend.rpcclient.disconnect()
for line in advice.log.content:
print(line)
def run(self):
super(IpaAdvise, self).run()
api.bootstrap(in_server=False,
context='cli',
confdir=paths.ETC_IPA)
api.finalize()
advise_api.bootstrap(in_server=False,
context='cli',
confdir=paths.ETC_IPA)
advise_api.finalize()
if not self.options.verbose:
# Do not print connection information by default
logger_name = r'ipalib\.rpc'
root_logger = logging.getLogger()
root_logger.addFilter(Filter(logger_name, logging.WARNING))
# With no argument, print the list out and exit
if not self.args:
self.print_config_list()
return
else:
keyword = self.args[0].replace('-', '_')
self.print_advice(keyword)
| 17,433
|
Python
|
.py
| 439
| 31.366743
| 80
| 0.632926
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,734
|
smart_card_auth.py
|
freeipa_freeipa/ipaserver/advise/plugins/smart_card_auth.py
|
#
# Copyright (C) 2017 FreeIPA Contributors see COPYING for license
#
from __future__ import absolute_import
import sys
from ipalib.plugable import Registry
from ipaplatform import services
from ipaplatform.paths import paths
from ipaserver.advise.base import Advice
from ipaserver.install.httpinstance import OCSP_ENABLED, OCSP_DIRECTIVE
register = Registry()
class common_smart_card_auth_config(Advice):
"""
Common steps required to properly configure both server and client for
smart card auth
"""
systemwide_nssdb = paths.NSS_DB_DIR
smart_card_ca_certs_variable_name = "SC_CA_CERTS"
single_ca_cert_variable_name = 'ca_cert'
def check_ccache_not_empty(self):
self.log.comment('Check whether the credential cache is not empty')
self.log.exit_on_failed_command(
'klist',
[
"Credential cache is empty",
'Use kinit as privileged user to obtain Kerberos credentials'
])
def check_and_set_ca_cert_paths(self):
ca_paths_variable = self.smart_card_ca_certs_variable_name
single_ca_path_variable = self.single_ca_cert_variable_name
self.log.command("{}=$@".format(ca_paths_variable))
self.log.exit_on_predicate(
'[ -z "${}" ]'.format(ca_paths_variable),
['You need to provide one or more paths to the PEM files '
'containing CAs signing the Smart Cards']
)
with self.log.for_loop(single_ca_path_variable,
'${}'.format(ca_paths_variable)):
self.log.exit_on_predicate(
'[ ! -f "${}" ]'.format(single_ca_path_variable),
['Invalid CA certificate filename: ${}'.format(
single_ca_path_variable),
'Please check that the path exists and is a valid file']
)
def upload_smartcard_ca_certificates_to_systemwide_db(self):
# Newer version of sssd use OpenSSL and read the CA certs
# from /etc/sssd/pki/sssd_auth_ca_db.pem
self.log.command('mkdir -p /etc/sssd/pki')
with self.log.for_loop(
self.single_ca_cert_variable_name,
'${}'.format(self.smart_card_ca_certs_variable_name)):
self.log.command(
'certutil -d {} -A -i ${} -n "Smart Card CA $(uuidgen)" '
'-t CT,C,C'.format(
self.systemwide_nssdb, self.single_ca_cert_variable_name
)
)
self.log.command(
'cat ${} >> /etc/sssd/pki/sssd_auth_ca_db.pem'.format(
self.single_ca_cert_variable_name
)
)
def install_smart_card_signing_ca_certs(self):
with self.log.for_loop(
self.single_ca_cert_variable_name,
'${}'.format(self.smart_card_ca_certs_variable_name)):
self.log.exit_on_failed_command(
'ipa-cacert-manage install ${} -t CT,C,C'.format(
self.single_ca_cert_variable_name
),
['Failed to install external CA certificate to IPA']
)
def update_ipa_ca_certificate_store(self):
self.log.exit_on_failed_command(
'ipa-certupdate',
['Failed to update IPA CA certificate database']
)
@register()
class config_server_for_smart_card_auth(common_smart_card_auth_config):
"""
Configures smart card authentication via Kerberos (PKINIT) and for WebUI
"""
description = ("Instructions for enabling Smart Card authentication on "
" a single IPA server. Includes Apache configuration, "
"enabling PKINIT on KDC and configuring WebUI to accept "
"Smart Card auth requests. To enable the feature in the "
"whole topology you have to run the script on each master")
ssl_conf = paths.HTTPD_SSL_CONF
ssl_ocsp_directive = OCSP_DIRECTIVE
kdc_service_name = services.knownservices.krb5kdc.systemd_name
httpd_service_name = services.knownservices.httpd.systemd_name
def get_info(self):
self.log.exit_on_nonroot_euid()
self.check_and_set_ca_cert_paths()
self.check_ccache_not_empty()
self.check_hostname_is_in_masters()
self.resolve_ipaca_records()
self.enable_ssl_ocsp()
self.restart_httpd()
self.record_httpd_ocsp_status()
self.check_and_enable_pkinit()
self.enable_ok_to_auth_as_delegate_on_http_principal()
self.allow_httpd_ifp()
self.upload_smartcard_ca_certificates_to_systemwide_db()
self.install_smart_card_signing_ca_certs()
self.update_ipa_ca_certificate_store()
self.restart_kdc()
def check_hostname_is_in_masters(self):
self.log.comment('Check whether the host is IPA master')
self.log.exit_on_failed_command(
'ipa server-find $(hostname -f)',
["This script can be run on IPA master only"])
def resolve_ipaca_records(self):
ipa_domain_name = self.api.env.domain
self.log.comment('make sure bind-utils are installed so that we can '
'dig for ipa-ca records')
self.log.install_packages(
['bind-utils'],
['Failed to install bind-utils']
)
self.log.comment('make sure ipa-ca records are resolvable, '
'otherwise error out and instruct')
self.log.comment('the user to update the DNS infrastructure')
self.log.command('ipaca_records=$(dig +short '
'ipa-ca.{})'.format(ipa_domain_name))
self.log.exit_on_predicate(
'[ -z "$ipaca_records" ]',
[
f'Can not resolve ipa-ca records for {ipa_domain_name}',
'Please make sure to update your DNS infrastructure with ',
'ipa-ca record pointing to IP addresses of IPA CA masters'
])
def enable_ssl_ocsp(self):
self.log.comment('look for the OCSP directive in ssl.conf')
self.log.comment(' if it is present, switch it on')
self.log.comment(
'if it is absent, append it to the end of VirtualHost section')
predicate = self._interpolate_ocsp_directive_file_into_command(
"grep -q '{directive} ' {filename}")
self.log.commands_on_predicate(
predicate,
[
self._interpolate_ocsp_directive_file_into_command(
"sed -i.ipabkp -r "
"'s/^#*[[:space:]]*{directive}[[:space:]]+(on|off)$"
"/{directive} on/' {filename}")
],
commands_to_run_when_false=[
self._interpolate_ocsp_directive_file_into_command(
r"sed -i.ipabkp '/<\/VirtualHost>/i {directive} on' "
r"{filename}")
]
)
def _interpolate_ocsp_directive_file_into_command(self, fmt_line):
return self._format_command(
fmt_line, self.ssl_ocsp_directive, self.ssl_conf)
def _format_command(self, fmt_line, directive, filename):
return fmt_line.format(directive=directive, filename=filename)
def restart_httpd(self):
self.log.comment('finally restart apache')
self.log.command(
'systemctl restart {}'.format(self.httpd_service_name)
)
def record_httpd_ocsp_status(self):
self.log.comment('store the OCSP upgrade state')
self.log.command(
"{} -c 'from ipaserver.install import sysupgrade; "
"sysupgrade.set_upgrade_state(\"httpd\", "
"\"{}\", True)'".format(sys.executable, OCSP_ENABLED))
def check_and_enable_pkinit(self):
self.log.comment('check whether PKINIT is configured on the master')
with self.log.if_branch(
"ipa-pkinit-manage status | grep -q 'enabled'"):
self.log.command('echo "PKINIT already enabled"')
with self.log.else_branch():
self.log.exit_on_failed_command(
'ipa-pkinit-manage enable',
['Failed to issue PKINIT certificates to local KDC'])
def enable_ok_to_auth_as_delegate_on_http_principal(self):
self.log.comment('Enable OK-AS-DELEGATE flag on the HTTP principal')
self.log.comment('This enables smart card login to WebUI')
self.log.command(
'output=$(ipa service-mod HTTP/$(hostname -f) '
'--ok-to-auth-as-delegate=True 2>&1)')
self.log.exit_on_predicate(
'[ "$?" -ne "0" -a '
'-z "$(echo $output | grep \'no modifications\')" ]',
["Failed to set OK_AS_AUTH_AS_DELEGATE flag on HTTP principal"]
)
def allow_httpd_ifp(self):
self.log.comment('Allow Apache to access SSSD IFP')
self.log.exit_on_failed_command(
'{} -c "import SSSDConfig; '
'from ipaclient.install.client import sssd_enable_ifp; '
'from ipaplatform.paths import paths; '
'c = SSSDConfig.SSSDConfig(); '
'c.import_config(); '
'sssd_enable_ifp(c, allow_httpd=True); '
'c.write(paths.SSSD_CONF)"'.format(sys.executable),
['Failed to modify SSSD config']
)
self.log.comment('Restart sssd')
self.log.command('systemctl restart sssd')
def restart_kdc(self):
self.log.exit_on_failed_command(
'systemctl restart {}'.format(self.kdc_service_name),
['Failed to restart KDC. Please restart the service manually.']
)
@register()
class config_client_for_smart_card_auth(common_smart_card_auth_config):
"""
Configures smart card authentication on IPA client
"""
description = ("Instructions for enabling Smart Card authentication on "
" a single IPA client. Configures Smart Card daemon, "
"set the system-wide trust store and configures SSSD to "
"allow smart card logins to desktop")
opensc_module_name = "OpenSC"
pkcs11_shared_lib = '/usr/lib64/opensc-pkcs11.so'
smart_card_service_file = 'pcscd.service'
smart_card_socket = 'pcscd.socket'
def get_info(self):
self.log.exit_on_nonroot_euid()
self.check_and_set_ca_cert_paths()
self.check_ccache_not_empty()
self.check_and_remove_pam_pkcs11()
self.install_opensc_and_dconf_packages()
self.install_krb5_client_dependencies()
self.start_enable_smartcard_daemon()
self.add_pkcs11_module_to_systemwide_db()
self.upload_smartcard_ca_certificates_to_systemwide_db()
self.update_ipa_ca_certificate_store()
self.run_authselect_to_configure_smart_card_auth()
self.configure_pam_cert_auth()
self.restart_sssd()
def check_and_remove_pam_pkcs11(self):
self.log.remove_package(
'pam_pkcs11',
['Could not remove pam_pkcs11 package']
)
def install_opensc_and_dconf_packages(self):
self.log.comment(
'authconfig often complains about missing dconf, '
'install it explicitly')
self.log.install_packages(
[self.opensc_module_name.lower(), 'dconf'],
['Could not install OpenSC package']
)
def install_krb5_client_dependencies(self):
self.log.install_packages(
['krb5-pkinit-openssl'],
['Failed to install Kerberos client PKINIT extensions.']
)
def start_enable_smartcard_daemon(self):
self.log.command(
'systemctl start {service} {socket} '
'&& systemctl enable {service} {socket}'.format(
service=self.smart_card_service_file,
socket=self.smart_card_socket))
def add_pkcs11_module_to_systemwide_db(self):
module_name = self.opensc_module_name
nssdb = self.systemwide_nssdb
shared_lib = self.pkcs11_shared_lib
self.log.commands_on_predicate(
'modutil -dbdir {nssdb} -list | grep -q {module_name} || '
'p11-kit list-modules | grep -i {module_name} -q'.format(
nssdb=nssdb, module_name=module_name),
[
'echo "{} PKCS#11 module already configured"'.format(
module_name)
],
commands_to_run_when_false=[
'echo "" | modutil -dbdir {} -add "{}" -libfile {}'.format(
nssdb, module_name, shared_lib),
]
)
def run_authselect_to_configure_smart_card_auth(self):
# In order to be compatible with all clients, we check first
# if the client supports authselect.
# Otherwise authconfig will be used.
self.log.comment('Use either authselect or authconfig to enable '
'Smart Card authentication')
self.log.commands_on_predicate(
'[ -f {} ]'.format(paths.AUTHSELECT),
['AUTHCMD="authselect enable-feature with-smartcard"'],
['AUTHCMD="authconfig --enablesssd --enablesssdauth '
'--enablesmartcard --smartcardmodule=sssd --smartcardaction=1 '
'--updateall"']
)
self.log.exit_on_failed_command(
'$AUTHCMD',
[
'Failed to configure Smart Card authentication in SSSD'
]
)
def configure_pam_cert_auth(self):
self.log.comment('Set pam_cert_auth=True in /etc/sssd/sssd.conf')
self.log.comment('This step is required only when authselect is used')
# If the advise command is run on RHEL7 or fedora but the client
# is rhel8, python3 executable may be in a different location
# Find the right python path first
self.log.command("python3 --version >/dev/null 2>&1")
self.log.commands_on_predicate(
'[ "$?" -eq 0 ]',
['PYTHON3CMD=python3'],
['PYTHON3CMD=/usr/libexec/platform-python']
)
self.log.commands_on_predicate(
'[ -f {} ]'.format(paths.AUTHSELECT),
["${PYTHON3CMD} -c 'from SSSDConfig import SSSDConfig; "
"c = SSSDConfig(); c.import_config(); "
"c.set(\"pam\", \"pam_cert_auth\", \"True\"); "
"c.write()'"])
def restart_sssd(self):
self.log.command('systemctl restart sssd.service')
| 14,496
|
Python
|
.py
| 315
| 35.069841
| 78
| 0.597991
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,735
|
legacy_clients.py
|
freeipa_freeipa/ipaserver/advise/plugins/legacy_clients.py
|
# Authors: Ana Krivokapic <akrivoka@redhat.com>
#
# Copyright (C) 2013 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import
import os
from ipalib import api
from ipalib.plugable import Registry
from ipaplatform.paths import paths
from ipaserver.advise.base import Advice
from ipapython.ipautil import template_file
register = Registry()
CACERTDIR_REHASH_URL = ('https://pagure.io/authconfig/raw/master/f/'
'cacertdir_rehash')
class config_base_legacy_client(Advice):
def get_uri_and_base(self):
uri = 'ldap://%s' % api.env.host
base = 'cn=compat,%s' % api.env.basedn
return uri, base
def check_compat_plugin(self):
compat_is_enabled = api.Command['compat_is_enabled']()['result']
if not compat_is_enabled:
self.log.comment(
'Schema Compatibility plugin has not been configured '
'on this server. To configure it, run '
'"ipa-adtrust-install --enable-compat"\n'
)
def configure_ca_cert(self):
self.log.comment('Please note that this script assumes '
'/etc/openldap/cacerts as the default CA certificate '
'location. If this value is different on your system '
'the script needs to be modified accordingly.\n')
self.log.comment('Download the CA certificate of the IPA server')
self.log.command('mkdir -p -m 755 /etc/openldap/cacerts')
self.log.command('curl http://%s/ipa/config/ca.crt -o '
'/etc/openldap/cacerts/ipa.crt\n' % api.env.host)
self.log.comment('Generate hashes for the openldap library')
self.log.command('command -v cacertdir_rehash')
self.log.command('if [ $? -ne 0 ] ; then')
self.log.command(' curl "%s" -o cacertdir_rehash ;' %
CACERTDIR_REHASH_URL)
self.log.command(' chmod 755 ./cacertdir_rehash ;')
self.log.command(' ./cacertdir_rehash /etc/openldap/cacerts/ ;')
self.log.command('else')
self.log.command(' cacertdir_rehash /etc/openldap/cacerts/ ;')
self.log.command('fi\n')
def configure_and_start_sssd(self):
uri, base = self.get_uri_and_base()
template = os.path.join(
paths.USR_SHARE_IPA_DIR,
'advise',
'legacy',
'sssd.conf.template'
)
sssd_conf = template_file(template, dict(URI=uri, BASE=base))
self.log.comment('Configure SSSD')
self.log.command('cat > /etc/sssd/sssd.conf << EOF \n'
'%s\nEOF' % sssd_conf)
self.log.command('chmod 0600 /etc/sssd/sssd.conf\n')
self.log.comment('Start SSSD')
self.log.command('service sssd start')
@register()
class config_redhat_sssd_before_1_9(config_base_legacy_client):
"""
Legacy client configuration for Red Hat based systems, using SSSD.
"""
description = ('Instructions for configuring a system with an old version '
'of SSSD (1.5-1.8) as a IPA client. This set of '
'instructions is targeted for platforms that include '
'the authconfig utility, which are all Red Hat based '
'platforms.')
def get_info(self):
self.check_compat_plugin()
self.log.comment('Install required packages via yum')
self.log.command('yum install -y sssd authconfig curl openssl\n')
self.configure_ca_cert()
self.log.comment('Use the authconfig to configure nsswitch.conf '
'and the PAM stack')
self.log.command('authconfig --updateall --enablesssd '
'--enablesssdauth\n')
self.configure_and_start_sssd()
def configure_ca_cert(self):
self.log.comment('NOTE: IPA certificate uses the SHA-256 hash '
'function. SHA-256 was introduced in RHEL5.2. '
'Therefore, clients older than RHEL5.2 will not be '
'able to interoperate with IPA server 3.x.')
super(config_redhat_sssd_before_1_9, self).configure_ca_cert()
@register()
class config_generic_linux_sssd_before_1_9(config_base_legacy_client):
"""
Legacy client configuration for non Red Hat based linux systems,
using SSSD.
"""
description = ('Instructions for configuring a system with an old version '
'of SSSD (1.5-1.8) as a IPA client. This set of '
'instructions is targeted for linux systems that do not '
'include the authconfig utility.')
def get_info(self):
self.check_compat_plugin()
with open(os.path.join(
paths.USR_SHARE_IPA_DIR,
'advise',
'legacy',
'pam.conf.sssd.template')) as fd:
pam_conf = fd.read()
self.log.comment('Install required packages using your system\'s '
'package manager. E.g:')
self.log.command('apt-get -y install sssd curl openssl\n')
self.configure_ca_cert()
self.log.comment('Configure nsswitch.conf. Append sss to the lines '
'beginning with passwd and group. ')
self.log.command('grep "^passwd.*sss" /etc/nsswitch.conf')
self.log.command('if [ $? -ne 0 ] ; then sed -i '
'\'/^passwd/s|$| sss|\' /etc/nsswitch.conf ; fi')
self.log.command('grep "^group.*sss" /etc/nsswitch.conf')
self.log.command('if [ $? -ne 0 ] ; then sed -i '
'\'/^group/s|$| sss|\' /etc/nsswitch.conf ; fi\n')
self.log.comment('Configure PAM. Configuring the PAM stack differs on '
'particular distributions. The resulting PAM stack '
'should look like this:')
self.log.command('cat > /etc/pam.conf << EOF \n'
'%s\nEOF\n' % pam_conf)
self.configure_and_start_sssd()
def configure_ca_cert(self):
super(config_generic_linux_sssd_before_1_9, self).configure_ca_cert()
self.log.comment('Configure ldap.conf. Set the value of '
'TLS_CACERTDIR to /etc/openldap/cacerts. Make sure '
'that the location of ldap.conf file matches your '
'system\'s configuration.')
self.log.command('echo "TLS_CACERTDIR /etc/openldap/cacerts" >> '
'/etc/ldap/ldap.conf\n')
@register()
class config_redhat_nss_pam_ldapd(config_base_legacy_client):
"""
Legacy client configuration for Red Hat based systems,
using nss-pam-ldapd.
"""
description = ('Instructions for configuring a system with nss-pam-ldapd '
'as a IPA client. This set of instructions is targeted '
'for platforms that include the authconfig utility, which '
'are all Red Hat based platforms.')
def get_info(self):
uri, base = self.get_uri_and_base()
self.check_compat_plugin()
self.log.comment('Install required packages via yum')
self.log.command('yum install -y curl openssl nss-pam-ldapd pam_ldap '
'authconfig\n')
self.configure_ca_cert()
self.log.comment('Use the authconfig to configure nsswitch.conf '
'and the PAM stack')
self.log.command('authconfig --updateall --enableldap --enableldaptls '
'--enableldapauth --ldapserver=%s --ldapbasedn=%s\n'
% (uri, base))
def configure_ca_cert(self):
self.log.comment('NOTE: IPA certificate uses the SHA-256 hash '
'function. SHA-256 was introduced in RHEL5.2. '
'Therefore, clients older than RHEL5.2 will not be '
'able to interoperate with IPA server 3.x.')
super(config_redhat_nss_pam_ldapd, self).configure_ca_cert()
@register()
class config_generic_linux_nss_pam_ldapd(config_base_legacy_client):
"""
Legacy client configuration for non Red Hat based linux systems,
using nss-pam-ldapd.
"""
description = ('Instructions for configuring a system with nss-pam-ldapd. '
'This set of instructions is targeted for linux systems '
'that do not include the authconfig utility.')
def get_info(self):
uri, base = self.get_uri_and_base()
self.check_compat_plugin()
with open(os.path.join(
paths.USR_SHARE_IPA_DIR,
'advise',
'legacy',
'pam.conf.nss_pam_ldapd.template')) as fd:
pam_conf = fd.read()
nslcd_conf = 'uri %s\nbase %s' % (uri, base)
self.log.comment('Install required packages using your system\'s '
'package manager. E.g:')
self.log.command('apt-get -y install curl openssl libnss-ldapd '
'libpam-ldapd nslcd\n')
self.configure_ca_cert()
self.log.comment('Configure nsswitch.conf. Append ldap to the lines '
'beginning with passwd and group. ')
self.log.command('grep "^passwd.*ldap" /etc/nsswitch.conf')
self.log.command('if [ $? -ne 0 ] ; then sed -i '
'\'/^passwd/s|$| ldap|\' /etc/nsswitch.conf ; fi')
self.log.command('grep "^group.*ldap" /etc/nsswitch.conf')
self.log.command('if [ $? -ne 0 ] ; then sed -i '
'\'/^group/s|$| ldap|\' /etc/nsswitch.conf ; fi\n')
self.log.comment('Configure PAM. Configuring the PAM stack differs on '
'particular distributions. The resulting PAM stack '
'should look like this:')
self.log.command('cat > /etc/pam.conf << EOF \n'
'%s\nEOF\n' % pam_conf)
self.log.comment('Configure nslcd.conf:')
self.log.command('cat > /etc/nslcd.conf << EOF \n'
'%s\nEOF\n' % nslcd_conf)
self.log.comment('Configure pam_ldap.conf:')
self.log.command('cat > /etc/pam_ldap.conf << EOF \n'
'%s\nEOF\n' % nslcd_conf)
self.log.comment('Stop nscd and restart nslcd')
self.log.command('service nscd stop && service nslcd restart')
def configure_ca_cert(self):
super(config_generic_linux_nss_pam_ldapd, self).configure_ca_cert()
self.log.comment('Configure ldap.conf. Set the value of '
'TLS_CACERTDIR to /etc/openldap/cacerts. Make sure '
'that the location of ldap.conf file matches your '
'system\'s configuration.')
self.log.command('echo "TLS_CACERTDIR /etc/openldap/cacerts" >> '
'/etc/ldap/ldap.conf\n')
@register()
class config_freebsd_nss_pam_ldapd(config_base_legacy_client):
"""
Legacy client configuration for FreeBSD, using nss-pam-ldapd.
"""
description = ('Instructions for configuring a FreeBSD system with '
'nss-pam-ldapd. ')
def get_info(self):
uri, base = self.get_uri_and_base()
cacrt = '/usr/local/etc/ipa.crt'
self.check_compat_plugin()
with open(os.path.join(
paths.USR_SHARE_IPA_DIR,
'advise',
'legacy',
'pam_conf_sshd.template')) as fd:
pam_conf = fd.read()
self.log.comment('Install required packages')
self.log.command('pkg_add -r nss-pam-ldapd curl\n')
self.configure_ca_cert(cacrt)
self.log.comment('Configure nsswitch.conf')
self.log.command('sed -i \'\' -e \'s/^passwd:/passwd: files ldap/\' '
'/etc/nsswitch.conf')
self.log.command('sed -i \'\' -e \'s/^group:/group: files ldap/\' '
'/etc/nsswitch.conf\n')
self.log.comment('Configure PAM stack for the sshd service')
self.log.command('cat > /etc/pam.d/sshd << EOF \n'
'%s\nEOF\n' % pam_conf)
self.log.comment('Add automated start of nslcd to /etc/rc.conf')
self.log.command('echo \'nslcd_enable="YES"\nnslcd_debug="NO"\' >> '
'/etc/rc.conf')
self.log.comment('Configure nslcd.conf:')
self.log.command('echo "uid nslcd\n'
'gid nslcd\n'
'uri %s\n'
'base %s\n'
'scope sub\n'
'base group cn=groups,%s\n'
'base passwd cn=users,%s\n'
'base shadow cn=users,%s\n'
'ssl start_tls\n'
'tls_cacertfile %s\n" > /usr/local/etc/nslcd.conf'
% ((uri,) + (base,)*4 + (cacrt,)))
self.log.comment('Configure ldap.conf:')
self.log.command('echo "uri %s\nbase %s\nssl start_tls\ntls_cacert %s"'
'> /usr/local/etc/ldap.conf' % (uri, base, cacrt))
self.log.comment('Restart nslcd')
self.log.command('/usr/local/etc/rc.d/nslcd restart')
def configure_ca_cert(self, cacrt):
self.log.comment('Download the CA certificate of the IPA server')
self.log.command('curl -k https://%s/ipa/config/ca.crt > '
'%s' % (api.env.host, cacrt))
@register()
class config_redhat_nss_ldap(config_base_legacy_client):
"""
Legacy client configuration for Red Hat based systems,
using nss-ldap.
"""
description = ('Instructions for configuring a system with nss-ldap '
'as a IPA client. This set of instructions is targeted '
'for platforms that include the authconfig utility, which '
'are all Red Hat based platforms.')
def get_info(self):
uri, base = self.get_uri_and_base()
self.check_compat_plugin()
self.log.comment('Install required packages via yum')
self.log.command('yum install -y curl openssl nss_ldap '
'authconfig\n')
self.configure_ca_cert()
self.log.comment('Use the authconfig to configure nsswitch.conf '
'and the PAM stack')
self.log.command('authconfig --updateall --enableldap --enableldaptls '
'--enableldapauth --ldapserver=%s --ldapbasedn=%s\n'
% (uri, base))
def configure_ca_cert(self):
self.log.comment('NOTE: IPA certificate uses the SHA-256 hash '
'function. SHA-256 was introduced in RHEL5.2. '
'Therefore, clients older than RHEL5.2 will not be '
'able to interoperate with IPA server 3.x.')
super(config_redhat_nss_ldap, self).configure_ca_cert()
| 15,707
|
Python
|
.py
| 306
| 38.905229
| 79
| 0.581066
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,736
|
admins_sudo.py
|
freeipa_freeipa/ipaserver/advise/plugins/admins_sudo.py
|
#
# Copyright (C) 2018 FreeIPA Contributors see COPYING for license
#
from __future__ import absolute_import
from ipalib.plugable import Registry
from ipaserver.advise.base import Advice
register = Registry()
@register()
class enable_admins_sudo(Advice):
"""
Configures HBAC and SUDO for members of the admins group
"""
description = ("Instructions for enabling HBAC and unauthenticated "
"SUDO for members of the admins group.")
def check_ccache_not_empty(self):
self.log.comment('Check whether the credential cache is not empty')
self.log.exit_on_failed_command(
'klist',
[
"Credential cache is empty",
'Use kinit as privileged user to obtain Kerberos credentials'
])
def create_hbac_rule(self):
self.log.comment('Create the HBAC rule for sudo')
self.log.exit_on_failed_command(
'err=$(ipa hbacrule-add --hostcat=all --desc "Allow admins '
'to run sudo on all hosts" admins_sudo 2>&1)',
['Failed to add hbac rule: ${err}'])
self.log.command('ipa hbacrule-add-user --groups=admins admins_sudo')
self.log.command(
'ipa hbacrule-add-service --hbacsvcs=sudo admins_sudo'
)
def create_sudo_rule(self):
self.log.comment('Create the SUDO rule for the admins group')
self.log.exit_on_failed_command(
'err=$(ipa sudorule-add --desc "Allow admins to run any command '
'on any host" --hostcat=all --cmdcat=all admins_all '
'2>&1)',
['Failed to add sudo rule: ${err}'])
self.log.command('ipa sudorule-add-user --groups=admins admins_all')
def get_info(self):
self.check_ccache_not_empty()
with self.log.if_branch(
'ipa hbacrule-show admins_sudo > /dev/null 2>&1'):
self.log.command('echo HBAC rule admins_sudo already exists')
with self.log.else_branch():
self.create_hbac_rule()
with self.log.if_branch(
'ipa sudorule-show admins_all > /dev/null 2>&1'):
self.log.command('echo SUDO rule admins_all already exists')
with self.log.else_branch():
self.create_sudo_rule()
| 2,285
|
Python
|
.py
| 52
| 34.75
| 77
| 0.621962
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,737
|
__init__.py
|
freeipa_freeipa/ipaserver/advise/plugins/__init__.py
|
# Authors: Tomas Babej <tbabej@redhat.com>
#
# Copyright (C) 2013 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Provides a separate api for ipa-advise plugins.
"""
| 824
|
Python
|
.py
| 21
| 38.190476
| 71
| 0.77182
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,738
|
ipa_backup.py
|
freeipa_freeipa/ipaserver/install/ipa_backup.py
|
# Authors: Rob Crittenden <rcritten@redhat.com>
#
# Copyright (C) 2013 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, print_function
import logging
import optparse # pylint: disable=deprecated-module
import os
import shutil
import sys
import tempfile
import time
import six
from ipaplatform.paths import paths
from ipaplatform import services
from ipalib import api, errors
from ipapython import version
from ipapython.ipautil import run, write_tmp_file
from ipapython import admintool, certdb
from ipapython.dn import DN
from ipaserver.install.replication import wait_for_task
from ipaserver.install import installutils
from ipapython import ipaldap
from ipaplatform.constants import constants
from ipaplatform.tasks import tasks
from lib389.cli_ctl.dblib import run_dbscan
# pylint: disable=import-error
if six.PY3:
# The SafeConfigParser class has been renamed to ConfigParser in Py3
from configparser import ConfigParser as SafeConfigParser
else:
from ConfigParser import SafeConfigParser
# pylint: enable=import-error
ISO8601_DATETIME_FMT = '%Y-%m-%dT%H:%M:%S'
logger = logging.getLogger(__name__)
"""
A test GnuPG key can be generated like this:
# cat >keygen <<EOF
%echo Generating a standard key
Key-Type: RSA
Key-Length: 2048
Name-Real: IPA Backup
Name-Comment: IPA Backup
Name-Email: root@example.com
Expire-Date: 0
Passphrase: SecretPassPhrase42
%commit
%echo done
EOF
# export GNUPGHOME=/root/backup
# mkdir -p $GNUPGHOME
# gpg2 --batch --gen-key keygen
# gpg2 --list-secret-keys
"""
def encrypt_file(filename, remove_original=True):
source = filename
dest = filename + '.gpg'
args = [
paths.GPG2,
'--batch',
'--default-recipient-self',
'--output', dest,
'--encrypt', source,
]
result = run(args, raiseonerr=False)
if result.returncode != 0:
raise admintool.ScriptError('gpg failed: %s' % result.error_log)
if remove_original:
os.unlink(source)
return dest
class Backup(admintool.AdminTool):
command_name = 'ipa-backup'
log_file_name = paths.IPABACKUP_LOG
usage = "%prog [options]"
description = "Back up IPA files and databases."
dirs = (paths.IPA_HTML_DIR,
paths.ROOT_PKI,
paths.PKI_TOMCAT,
paths.SYSCONFIG_PKI,
paths.VAR_LIB_PKI_DIR,
paths.SYSRESTORE,
paths.IPA_CLIENT_SYSRESTORE,
paths.IPA_DNSSEC_DIR,
paths.SSSD_PUBCONF_KRB5_INCLUDE_D_DIR,
paths.AUTHCONFIG_LAST,
paths.VAR_LIB_CERTMONGER_DIR,
paths.VAR_LIB_IPA,
paths.VAR_RUN_DIRSRV_DIR,
paths.DIRSRV_LOCK_DIR,
)
files = (
paths.NAMED_CONF,
paths.NAMED_CUSTOM_CONF,
paths.NAMED_CUSTOM_OPTIONS_CONF,
paths.NAMED_LOGGING_OPTIONS_CONF,
paths.NAMED_KEYTAB,
paths.RESOLV_CONF,
paths.SYSCONFIG_PKI_TOMCAT,
paths.SYSCONFIG_DIRSRV,
paths.SYSCONFIG_KRB5KDC_DIR,
paths.SYSCONFIG_IPA_DNSKEYSYNCD,
paths.SYSCONFIG_IPA_ODS_EXPORTER,
paths.SYSCONFIG_NAMED,
paths.SYSCONFIG_ODS,
paths.ETC_SYSCONFIG_AUTHCONFIG,
paths.IPA_NSSDB_PWDFILE_TXT,
paths.IPA_P11_KIT,
paths.SYSTEMWIDE_IPA_CA_CRT,
paths.NSSWITCH_CONF,
paths.KRB5_KEYTAB,
paths.SSSD_CONF,
paths.OPENLDAP_LDAP_CONF,
paths.LIMITS_CONF,
paths.HTTPD_PASSWORD_CONF,
paths.HTTP_KEYTAB,
paths.HTTPD_IPA_KDCPROXY_CONF,
paths.HTTPD_IPA_PKI_PROXY_CONF,
paths.HTTPD_IPA_REWRITE_CONF,
paths.HTTPD_SSL_CONF,
paths.HTTPD_SSL_SITE_CONF,
paths.HTTPD_CERT_FILE,
paths.HTTPD_KEY_FILE,
paths.HTTPD_IPA_CONF,
paths.SSHD_CONFIG,
paths.SSHD_IPA_CONFIG,
paths.SSH_CONFIG,
paths.KRB5_CONF,
paths.KDC_CA_BUNDLE_PEM,
paths.CA_BUNDLE_PEM,
paths.IPA_CA_CRT,
paths.IPA_DEFAULT_CONF,
paths.DS_KEYTAB,
paths.CHRONY_CONF,
paths.SMB_CONF,
paths.SAMBA_KEYTAB,
paths.DOGTAG_ADMIN_P12,
paths.RA_AGENT_PEM,
paths.RA_AGENT_KEY,
paths.CACERT_P12,
paths.KRACERT_P12,
paths.KRB5KDC_KDC_CONF,
paths.KDC_CERT,
paths.KDC_KEY,
paths.CACERT_PEM,
paths.SYSTEMD_IPA_SERVICE,
paths.SYSTEMD_SYSTEM_HTTPD_IPA_CONF,
paths.SYSTEMD_SSSD_SERVICE,
paths.SYSTEMD_CERTMONGER_SERVICE,
paths.SYSTEMD_PKI_TOMCAT_SERVICE,
paths.SVC_LIST_FILE,
paths.OPENDNSSEC_CONF_FILE,
paths.OPENDNSSEC_KASP_FILE,
paths.OPENDNSSEC_ZONELIST_FILE,
paths.OPENDNSSEC_KASP_DB,
paths.DNSSEC_OPENSSL_CONF,
paths.DNSSEC_SOFTHSM2_CONF,
paths.DNSSEC_SOFTHSM_PIN_SO,
paths.IPA_ODS_EXPORTER_KEYTAB,
paths.IPA_DNSKEYSYNCD_KEYTAB,
paths.IPA_CUSTODIA_KEYS,
paths.IPA_CUSTODIA_CONF,
paths.GSSPROXY_CONF,
paths.HOSTS,
paths.SYSTEMD_PKI_TOMCAT_IPA_CONF,
paths.NETWORK_MANAGER_IPA_CONF,
paths.SYSTEMD_RESOLVED_IPA_CONF,
) + tuple(
os.path.join(paths.IPA_NSSDB_DIR, file)
for file in (certdb.NSS_DBM_FILES + certdb.NSS_SQL_FILES)
) + tasks.get_pkcs11_modules()
logs=(
paths.VAR_LOG_PKI_DIR,
paths.VAR_LOG_HTTPD_DIR,
paths.IPASERVER_INSTALL_LOG,
paths.IPASERVER_ADTRUST_INSTALL_LOG,
paths.IPASERVER_DNS_INSTALL_LOG,
paths.IPASERVER_KRA_INSTALL_LOG,
paths.IPAREPLICA_INSTALL_LOG,
paths.IPAREPLICA_CONNCHECK_LOG,
paths.IPAREPLICA_CA_INSTALL_LOG,
paths.KADMIND_LOG,
paths.MESSAGES,
paths.IPACLIENT_INSTALL_LOG,
paths.LOG_SECURE,
paths.IPASERVER_UNINSTALL_LOG,
paths.IPACLIENT_UNINSTALL_LOG,
paths.NAMED_RUN,
)
required_dirs=(
paths.TOMCAT_TOPLEVEL_DIR,
paths.TOMCAT_CA_DIR,
paths.TOMCAT_SIGNEDAUDIT_DIR,
paths.TOMCAT_CA_ARCHIVE_DIR,
paths.TOMCAT_KRA_DIR,
paths.TOMCAT_KRA_SIGNEDAUDIT_DIR,
paths.TOMCAT_KRA_ARCHIVE_DIR,
)
def __init__(self, options, args):
super(Backup, self).__init__(options, args)
self._conn = None
self.files = list(self.files)
self.dirs = list(self.dirs)
self.logs = list(self.logs)
@classmethod
def add_options(cls, parser):
super(Backup, cls).add_options(parser, debug_option=True)
parser.add_option(
"--gpg-keyring", dest="gpg_keyring",
help=optparse.SUPPRESS_HELP)
parser.add_option(
"--gpg", dest="gpg", action="store_true",
default=False, help="Encrypt the backup")
parser.add_option(
"--data", dest="data_only", action="store_true",
default=False, help="Backup only the data")
parser.add_option(
"--logs", dest="logs", action="store_true",
default=False, help="Include log files in backup")
parser.add_option(
"--online", dest="online", action="store_true",
default=False,
help="Perform the LDAP backups online, for data only.")
parser.add_option(
"--disable-role-check", dest="rolecheck", action="store_false",
default=True,
help="Perform the backup even if this host does not have all "
"the roles used in the cluster. This is not recommended."
)
def setup_logging(self, log_file_mode='a'):
super(Backup, self).setup_logging(log_file_mode='a')
def validate_options(self):
options = self.options
super(Backup, self).validate_options(needs_root=True)
installutils.check_server_configuration()
if options.gpg_keyring is not None:
print(
"--gpg-keyring is no longer supported, use GNUPGHOME "
"environment variable to use a custom GnuPG2 directory.",
file=sys.stderr
)
options.gpg = True
if options.online and not options.data_only:
self.option_parser.error("You cannot specify --online "
"without --data")
if options.gpg:
tmpfd = write_tmp_file('encryptme')
newfile = encrypt_file(tmpfd.name, False)
os.unlink(newfile)
if options.data_only and options.logs:
self.option_parser.error("You cannot specify --data "
"with --logs")
def run(self):
options = self.options
super(Backup, self).run()
api.bootstrap(in_server=True, context='backup', confdir=paths.ETC_IPA)
api.finalize()
logger.info("Preparing backup on %s", api.env.host)
self.top_dir = tempfile.mkdtemp("ipa")
constants.DS_USER.chown(self.top_dir)
os.chmod(self.top_dir, 0o750)
self.dir = os.path.join(self.top_dir, "ipa")
os.mkdir(self.dir, 0o750)
constants.DS_USER.chown(self.dir)
self.tarfile = None
self.header = os.path.join(self.top_dir, 'header')
try:
dirsrv = services.knownservices.dirsrv
self.add_instance_specific_data()
# We need the dirsrv running to get the list of services
dirsrv.start(capture_output=False)
self.get_connection()
self.check_roles(raiseonerr=options.rolecheck)
self.create_header(options.data_only)
if options.data_only:
if not options.online:
logger.info('Stopping Directory Server')
dirsrv.stop(capture_output=False)
else:
logger.info('Stopping IPA services')
run([paths.IPACTL, 'stop'])
instance = ipaldap.realm_to_serverid(api.env.realm)
if os.path.exists(paths.VAR_LIB_SLAPD_INSTANCE_DIR_TEMPLATE %
instance):
# Check existence of ipaca backend
dbpath = (paths.SLAPD_INSTANCE_DB_DIR_TEMPLATE %
(instance, ""))
output = run_dbscan(['-L', dbpath])
if 'ipaca/' in output:
self.db2ldif(instance, 'ipaca', online=options.online)
self.db2ldif(instance, 'userRoot', online=options.online)
self.db2bak(instance, online=options.online)
if not options.data_only:
# create backup of auth configuration
auth_backup_path = os.path.join(paths.VAR_LIB_IPA, 'auth_backup')
tasks.backup_auth_configuration(auth_backup_path)
self.file_backup(options)
if options.data_only:
if not options.online:
logger.info('Starting Directory Server')
dirsrv.start(capture_output=False)
else:
logger.info('Starting IPA service')
run([paths.IPACTL, 'start'])
# Compress after services are restarted to minimize
# the unavailability window
if not options.data_only:
self.compress_file_backup()
self.finalize_backup(options.data_only, options.gpg,
options.gpg_keyring)
finally:
shutil.rmtree(self.top_dir)
def check_roles(self, raiseonerr=True):
"""Check that locally-installed roles match the globally used ones.
Specifically: make sure no role used in the cluster is absent
from the local replica ipa-backup is running on.
"""
locally_installed_roles = set()
globally_used_roles = set()
# We need to cover the following roles:
# * DNS: filter="(|(cn=DNS)(cn=DNSKeySync))"
# * CA: filter="(cn=CA)"
# * KRA: filter="(cn=KRA)"
# * AD Trust Controller: filter="(cn=ADTRUST)"
# Note:
# We do not need to worry about AD Trust Agents as Trust
# Controllers are Trust Agents themselves and contain extra,
# necessary Samba configuration. So either the cluster has no
# AD Trust bits installed, or it should be backuped on a Trust
# Controller, not a Trust Agent.
role_names = {
'CA', 'DNS', 'DNSKeySync', 'KRA', 'ADTRUST'
}
search_base = DN(api.env.container_masters, api.env.basedn)
attrs_list = ['ipaconfigstring', 'cn']
for role in role_names:
search_filter = '(cn=%s)' % role
try:
masters = dict()
result = self._conn.get_entries(
search_base,
filter=search_filter,
attrs_list=attrs_list,
scope=self._conn.SCOPE_SUBTREE
)
masters[role] = {e.dn[1]['cn'] for e in result}
if api.env.host in masters[role]:
locally_installed_roles.add(role)
if masters[role] is not None:
globally_used_roles.add(role)
except errors.EmptyResult:
pass
if locally_installed_roles == globally_used_roles:
logger.info(
"Local roles match globally used roles, proceeding."
)
else:
if raiseonerr:
raise admintool.ScriptError(
'Error: Local roles %s do not match globally used '
'roles %s. A backup done on this host would not be '
'complete enough to restore a fully functional, '
'identical cluster.' % (
', '.join(sorted(locally_installed_roles)),
', '.join(sorted(globally_used_roles))
)
)
else:
msg = (
'Warning: Local roles %s do not match globally used roles '
'%s. A backup done on this host would not be complete '
'enough to restore a fully functional, identical cluster. '
'Proceeding as role check was explicitly disabled.' % (
', '.join(sorted(locally_installed_roles)),
', '.join(sorted(globally_used_roles))
)
)
logger.info(msg)
def add_instance_specific_data(self):
'''
Add instance-specific files and directories.
NOTE: this adds some things that may not get backed up.
'''
serverid = ipaldap.realm_to_serverid(api.env.realm)
for dir in [paths.ETC_DIRSRV_SLAPD_INSTANCE_TEMPLATE % serverid,
paths.VAR_LIB_DIRSRV_INSTANCE_SCRIPTS_TEMPLATE % serverid,
paths.VAR_LIB_SLAPD_INSTANCE_DIR_TEMPLATE % serverid]:
if os.path.exists(dir):
self.dirs.append(dir)
for file in (
paths.SYSCONFIG_DIRSRV_INSTANCE % serverid,
paths.ETC_TMPFILESD_DIRSRV % serverid,
paths.SLAPD_INSTANCE_SYSTEMD_IPA_ENV_TEMPLATE % serverid,
):
if os.path.exists(file):
self.files.append(file)
self.files.append(
paths.HTTPD_PASSWD_FILE_FMT.format(host=api.env.host)
)
self.logs.append(paths.VAR_LOG_DIRSRV_INSTANCE_TEMPLATE % serverid)
def get_connection(self):
'''
Create an ldapi connection and bind to it using autobind as root.
'''
if self._conn is not None:
return self._conn
self._conn = ipaldap.LDAPClient.from_realm(api.env.realm)
try:
self._conn.external_bind()
except Exception as e:
logger.error("Unable to bind to LDAP server %s: %s",
self._conn.ldap_uri, e)
return self._conn
def db2ldif(self, instance, backend, online=True):
'''
Create a LDIF backup of the data in this instance.
If executed online create a task and wait for it to complete.
For SELinux reasons this writes out to the 389-ds backup location
and we move it.
'''
logger.info('Backing up %s in %s to LDIF', backend, instance)
cn = 'export_{}_{}'.format(
backend, time.strftime('%Y_%m_%d_%H_%M_%S'))
dn = DN(('cn', cn), ('cn', 'export'), ('cn', 'tasks'), ('cn', 'config'))
ldifname = '%s-%s.ldif' % (instance, backend)
ldiffile = os.path.join(
paths.SLAPD_INSTANCE_LDIF_DIR_TEMPLATE % instance,
ldifname)
if online:
conn = self.get_connection()
ent = conn.make_entry(
dn,
{
'objectClass': ['top', 'extensibleObject'],
'cn': [cn],
'nsInstance': [backend],
'nsFilename': [ldiffile],
'nsUseOneFile': ['true'],
'nsExportReplica': ['true'],
}
)
try:
conn.add_entry(ent)
except Exception as e:
raise admintool.ScriptError(
'Unable to add LDIF task: %s' % e
)
logger.info("Waiting for LDIF to finish")
if (wait_for_task(conn, dn) != 0):
raise admintool.ScriptError(
'BAK online task failed. Check file systems\' free space.'
)
else:
args = [paths.DSCTL,
instance,
'db2ldif',
'--replication',
backend,
ldiffile]
result = run(args, raiseonerr=False)
if result.returncode != 0:
raise admintool.ScriptError(
'db2ldif failed: %s '
'Check if destination directory %s has enough space.'
% (result.error_log, os.path.dirname(ldiffile))
)
# Move the LDIF backup to our location
try:
shutil.move(ldiffile, os.path.join(self.dir, ldifname))
except (IOError, OSError) as e:
raise admintool.ScriptError(
'Unable to move LDIF: %s '
'Check if destination directory %s has enough space.'
% (e, os.path.dirname(ldiffile))
)
except Exception as e:
raise admintool.ScriptError(
'Unexpected error: %s' % e
)
def db2bak(self, instance, online=True):
'''
Create a BAK backup of the data and changelog in this instance.
If executed online create a task and wait for it to complete.
'''
logger.info('Backing up %s', instance)
cn = time.strftime('backup_%Y_%m_%d_%H_%M_%S')
dn = DN(('cn', cn), ('cn', 'backup'), ('cn', 'tasks'), ('cn', 'config'))
bakdir = os.path.join(paths.SLAPD_INSTANCE_BACKUP_DIR_TEMPLATE % (instance, instance))
if online:
conn = self.get_connection()
ent = conn.make_entry(
dn,
{
'objectClass': ['top', 'extensibleObject'],
'cn': [cn],
'nsInstance': ['userRoot'],
'nsArchiveDir': [bakdir],
'nsDatabaseType': ['ldbm database'],
}
)
try:
conn.add_entry(ent)
except Exception as e:
raise admintool.ScriptError(
'Unable to to add backup task: %s' % e
)
logger.info("Waiting for BAK to finish")
if (wait_for_task(conn, dn) != 0):
raise admintool.ScriptError(
'BAK online task failed. Check file systems\' free space.'
)
else:
args = [paths.DSCTL,
instance,
'db2bak',
bakdir]
result = run(args, raiseonerr=False)
if result.returncode != 0:
raise admintool.ScriptError(
'db2bak failed: %s '
'Check if destination directory %s has enough space.'
% (result.error_log, bakdir)
)
try:
shutil.move(bakdir, self.dir)
except (IOError, OSError) as e:
raise admintool.ScriptError(
'Unable to move BAK: %s '
'Check if destination directory %s has enough space.'
% (e, bakdir)
)
except Exception as e:
raise admintool.ScriptError(
'Unexpected error: %s' % e
)
def file_backup(self, options):
def verify_directories(dirs):
return [s for s in dirs if s and os.path.exists(s)]
self.tarfile = os.path.join(self.dir, 'files.tar')
logger.info("Backing up files")
args = ['tar',
'--exclude=%s' % paths.IPA_BACKUP_DIR,
'--xattrs',
'--selinux',
'-cf',
self.tarfile
]
args.extend(verify_directories(self.dirs))
args.extend(verify_directories(self.files))
if options.logs:
args.extend(verify_directories(self.logs))
result = run(args, raiseonerr=False)
if result.returncode != 0:
raise admintool.ScriptError('tar returned non-zero code %d: %s' %
(result.returncode, result.error_log))
# Backup the necessary directory structure. This is a separate
# call since we are using the '--no-recursion' flag to store
# the directory structure only, no files.
missing_directories = verify_directories(self.required_dirs)
if missing_directories:
args = ['tar',
'--exclude=%s' % paths.IPA_BACKUP_DIR,
'--xattrs',
'--selinux',
'--no-recursion',
'-rf', # -r appends to an existing archive
self.tarfile,
]
args.extend(missing_directories)
result = run(args, raiseonerr=False)
if result.returncode != 0:
raise admintool.ScriptError(
'tar returned non-zero code %d '
'when adding directory structure: %s' %
(result.returncode, result.error_log))
def compress_file_backup(self):
# Compress the archive. This is done separately, since 'tar' cannot
# append to a compressed archive.
if self.tarfile:
result = run([paths.GZIP, self.tarfile], raiseonerr=False)
if result.returncode != 0:
raise admintool.ScriptError(
'gzip returned non-zero code %d '
'when compressing the backup: %s' %
(result.returncode, result.error_log))
# Rename the archive back to files.tar to preserve compatibility
os.rename(os.path.join(self.dir, 'files.tar.gz'), self.tarfile)
def create_header(self, data_only):
'''
Create the backup file header that contains the meta data about
this particular backup.
'''
config = SafeConfigParser()
config.add_section("ipa")
if data_only:
config.set('ipa', 'type', 'DATA')
else:
config.set('ipa', 'type', 'FULL')
config.set(
'ipa', 'time', time.strftime(ISO8601_DATETIME_FMT, time.gmtime())
)
config.set('ipa', 'host', api.env.host)
config.set('ipa', 'ipa_version', str(version.VERSION))
config.set('ipa', 'version', '1')
dn = DN(('cn', api.env.host), api.env.container_masters,
api.env.basedn)
services_cns = []
try:
conn = self.get_connection()
services = conn.get_entries(dn, conn.SCOPE_ONELEVEL)
except errors.NetworkError:
logger.critical(
"Unable to obtain list of master services, continuing anyway")
except Exception as e:
logger.error("Failed to read services from '%s': %s",
conn.ldap_uri, e)
else:
services_cns = [s.single_value['cn'] for s in services]
config.set('ipa', 'services', ','.join(services_cns))
with open(self.header, 'w') as fd:
config.write(fd)
def finalize_backup(self, data_only=False, encrypt=False, keyring=None):
'''
Create the final location of the backup files and move the files
we've backed up there, optionally encrypting them.
This is done in a couple of steps. We have a directory that
contains the tarball of the files, a directory that contains
the db2bak output and an LDIF.
These, along with the header, are moved into a new subdirectory
in paths.IPA_BACKUP_DIR (/var/lib/ipa/backup).
'''
if data_only:
backup_dir = os.path.join(
paths.IPA_BACKUP_DIR,
time.strftime('ipa-data-%Y-%m-%d-%H-%M-%S')
)
filename = os.path.join(backup_dir, "ipa-data.tar")
else:
backup_dir = os.path.join(
paths.IPA_BACKUP_DIR,
time.strftime('ipa-full-%Y-%m-%d-%H-%M-%S')
)
filename = os.path.join(backup_dir, "ipa-full.tar")
try:
os.mkdir(backup_dir, 0o700)
except (OSError, IOError) as e:
raise admintool.ScriptError(
'Could not create backup directory: %s' % e
)
except Exception as e:
raise admintool.ScriptError(
'Unexpected error: %s' % e
)
args = [
'tar', '--xattrs', '--selinux', '-czf', filename, '.'
]
result = run(args, raiseonerr=False, cwd=self.dir)
if result.returncode != 0:
raise admintool.ScriptError(
'tar returned non-zero code %s: %s' %
(result.returncode, result.error_log)
)
if encrypt:
logger.info('Encrypting %s', filename)
filename = encrypt_file(filename)
try:
shutil.move(self.header, backup_dir)
except (IOError, OSError) as e:
raise admintool.ScriptError(
'Could not create or move data to backup directory %s: %s' %
(backup_dir, e)
)
except Exception as e:
raise admintool.ScriptError(
'Unexpected error: %s' % e
)
logger.info('Backed up to %s', backup_dir)
| 27,619
|
Python
|
.py
| 690
| 28.686957
| 94
| 0.569415
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,739
|
ipa_server_certinstall.py
|
freeipa_freeipa/ipaserver/install/ipa_server_certinstall.py
|
# Authors: Karl MacMillan <kmacmillan@mentalrootkit.com>
# Jan Cholasta <jcholast@redhat.com>
#
# Copyright (C) 2007-2013 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function, absolute_import
import os
import os.path
import tempfile
import optparse # pylint: disable=deprecated-module
from ipalib import x509
from ipalib.install import certmonger
from ipaplatform.paths import paths
from ipapython import admintool, dogtag
from ipapython.certdb import NSSDatabase, get_ca_nickname
from ipapython.dn import DN
from ipapython import ipaldap
from ipalib import api, errors
from ipaserver.install import certs, dsinstance, installutils, krbinstance
from ipaserver.install import cainstance
class ServerCertInstall(admintool.AdminTool):
command_name = 'ipa-server-certinstall'
usage = "%prog <-d|-w|-k> [options] <file> ..."
description = "Install new SSL server certificates."
@classmethod
def add_options(cls, parser):
super(ServerCertInstall, cls).add_options(parser)
parser.add_option(
"-d", "--dirsrv",
dest="dirsrv", action="store_true", default=False,
help="install certificate for the directory server")
parser.add_option(
"-w", "--http",
dest="http", action="store_true", default=False,
help="install certificate for the http server")
parser.add_option(
"-k", "--kdc",
dest="kdc", action="store_true", default=False,
help="install PKINIT certificate for the KDC")
parser.add_option(
"--pin",
dest="pin", metavar="PIN", sensitive=True,
help="The password of the PKCS#12 file")
parser.add_option(
"--dirsrv_pin", "--http_pin",
dest="pin",
help=optparse.SUPPRESS_HELP)
parser.add_option(
"--cert-name",
dest="cert_name", metavar="NAME",
help="Name of the certificate to install")
parser.add_option(
"-p", "--dirman-password",
dest="dirman_password",
help="Directory Manager password")
def validate_options(self):
super(ServerCertInstall, self).validate_options(needs_root=True)
installutils.check_server_configuration()
if not any((self.options.dirsrv, self.options.http, self.options.kdc)):
self.option_parser.error(
"you must specify dirsrv, http and/or kdc")
if not self.args:
self.option_parser.error("you must provide certificate filename")
def ask_for_options(self):
super(ServerCertInstall, self).ask_for_options()
if not self.options.dirman_password:
self.options.dirman_password = installutils.read_password(
"Directory Manager", confirm=False, validate=False, retry=False)
if self.options.dirman_password is None:
raise admintool.ScriptError(
"Directory Manager password required")
if self.options.pin is None:
self.options.pin = installutils.read_password(
"Enter private key unlock",
confirm=False, validate=False, retry=False)
if self.options.pin is None:
raise admintool.ScriptError(
"Private key unlock password required")
def validate_http_cert(self):
if dogtag.acme_status():
cert, unused, _unused = self.load_pkcs12(
ca_chain_fname=paths.IPA_CA_CRT,
host_name=api.env.host
)
cainstance.check_ipa_ca_san(cert)
def run(self):
api.bootstrap(in_server=True, confdir=paths.ETC_IPA)
api.finalize()
api.Backend.ldap2.connect(bind_pw=self.options.dirman_password)
if self.options.http:
self.validate_http_cert()
if self.options.dirsrv:
self.install_dirsrv_cert()
if self.options.http:
self.replace_http_cert()
if self.options.kdc:
self.replace_kdc_cert()
print(
"Please restart ipa services after installing certificate "
"(ipactl restart)")
api.Backend.ldap2.disconnect()
def install_dirsrv_cert(self):
serverid = ipaldap.realm_to_serverid(api.env.realm)
dirname = dsinstance.config_dirname(serverid)
conn = api.Backend.ldap2
entry = conn.get_entry(DN(('cn', 'RSA'), ('cn', 'encryption'),
('cn', 'config')),
['nssslpersonalityssl'])
old_cert = entry.single_value['nssslpersonalityssl']
server_cert = self.import_cert(dirname, self.options.pin,
old_cert, 'ldap/%s' % api.env.host,
'restart_dirsrv %s' % serverid)
entry['nssslpersonalityssl'] = [server_cert]
try:
conn.update_entry(entry)
except errors.EmptyModlist:
pass
def replace_http_cert(self):
"""
Replace the current HTTP cert-key pair with another one
from a PKCS#12 file
"""
# pass in `host_name` to perform
# `NSSDatabase.verify_server_cert_validity()``
cert, key, ca_cert = self.load_pkcs12(
ca_chain_fname=paths.IPA_CA_CRT,
host_name=api.env.host
)
key_passwd_path = paths.HTTPD_PASSWD_FILE_FMT.format(host=api.env.host)
req_id = self.replace_key_cert_files(
cert, key,
cert_fname=paths.HTTPD_CERT_FILE,
key_fname=paths.HTTPD_KEY_FILE,
ca_cert=ca_cert,
passwd_fname=key_passwd_path,
cmgr_post_command='restart_httpd')
if req_id is not None:
certmonger.add_principal(
req_id, 'HTTP/{host}'.format(host=api.env.host))
certmonger.add_subject(req_id, str(DN(cert.subject)))
def replace_kdc_cert(self):
# pass in `realm` to perform `NSSDatabase.verify_kdc_cert_validity()`
cert, key, ca_cert = self.load_pkcs12(
ca_chain_fname=paths.CA_BUNDLE_PEM, realm_name=api.env.realm)
self.replace_key_cert_files(
cert, key, paths.KDC_CERT, paths.KDC_KEY, ca_cert,
profile="KDCs_PKINIT_Certs"
)
krb = krbinstance.KrbInstance()
krb.init_info(
realm_name=api.env.realm,
host_name=api.env.host,
)
krb.pkinit_enable()
def load_pkcs12(self, ca_chain_fname=paths.IPA_CA_CRT, **kwargs):
# Note that the "installutils.load_pkcs12" is quite a complex function
# which performs some checking based on its kwargs:
# host_name performs NSSDatabase.verify_server_cert_validity()
# realm performs NSSDatabase.verify_kdc_cert_validity()
pkcs12_file, pin, ca_cert = installutils.load_pkcs12(
cert_files=self.args,
key_password=self.options.pin,
key_nickname=self.options.cert_name,
ca_cert_files=[ca_chain_fname],
**kwargs)
# Check that the ca_cert is known and trusted
with tempfile.NamedTemporaryFile() as temp:
certs.install_pem_from_p12(pkcs12_file.name, pin, temp.name)
cert = x509.load_certificate_from_file(temp.name)
with tempfile.NamedTemporaryFile("rb") as temp:
certs.install_key_from_p12(pkcs12_file.name, pin, temp.name)
key = x509.load_pem_private_key(
temp.read(), None, backend=x509.default_backend())
return cert, key, ca_cert
def replace_key_cert_files(
self, cert, key, cert_fname, key_fname, ca_cert, passwd_fname=None,
profile=None, cmgr_post_command=None
):
try:
ca_enabled = api.Command.ca_is_enabled()['result']
if ca_enabled:
certmonger.stop_tracking(certfile=cert_fname)
pkey_passwd = None
if passwd_fname is not None:
with open(passwd_fname, 'rb') as f:
pkey_passwd = f.read()
x509.write_certificate(cert, cert_fname)
x509.write_pem_private_key(key, key_fname, pkey_passwd)
if ca_enabled:
# Start tracking only if the cert was issued by IPA CA
# Retrieve IPA CA
cdb = certs.CertDB(api.env.realm, nssdir=paths.IPA_NSSDB_DIR)
ipa_ca_cert = cdb.get_cert_from_db(
get_ca_nickname(api.env.realm))
# And compare with the CA which signed this certificate
if ca_cert == ipa_ca_cert:
req_id = certmonger.start_tracking(
(cert_fname, key_fname),
pinfile=passwd_fname,
storage='FILE',
post_command=cmgr_post_command
)
return req_id
except RuntimeError as e:
raise admintool.ScriptError(str(e))
return None
def check_chain(self, pkcs12_filename, pkcs12_pin, nssdb):
# create a temp nssdb
with NSSDatabase() as tempnssdb:
tempnssdb.create_db()
# import the PKCS12 file, then delete all CA certificates
# this leaves only the server certs in the temp db
tempnssdb.import_pkcs12(pkcs12_filename, pkcs12_pin)
for nickname, flags in tempnssdb.list_certs():
if not flags.has_key:
while tempnssdb.has_nickname(nickname):
tempnssdb.delete_cert(nickname)
# import all the CA certs from nssdb into the temp db
for nickname, flags in nssdb.list_certs():
if not flags.has_key:
cert = nssdb.get_cert_from_db(nickname)
tempnssdb.add_cert(cert, nickname, flags)
# now get the server certs from tempnssdb and check their validity
try:
for nick, flags in tempnssdb.find_server_certs():
tempnssdb.verify_server_cert_validity(nick, api.env.host)
except ValueError as e:
raise admintool.ScriptError(
"Peer's certificate issuer is not trusted (%s). "
"Please run ipa-cacert-manage install and ipa-certupdate "
"to install the CA certificate." % str(e))
def import_cert(self, dirname, pkcs12_passwd, old_cert, principal, command):
pkcs12_file, pin, ca_cert = installutils.load_pkcs12(
cert_files=self.args,
key_password=pkcs12_passwd,
key_nickname=self.options.cert_name,
ca_cert_files=[paths.IPA_CA_CRT],
host_name=api.env.host)
dirname = os.path.normpath(dirname)
cdb = certs.CertDB(api.env.realm, nssdir=dirname)
# Check that the ca_cert is known and trusted
self.check_chain(pkcs12_file.name, pin, cdb)
try:
ca_enabled = api.Command.ca_is_enabled()['result']
if ca_enabled:
cdb.untrack_server_cert(old_cert)
cdb.delete_cert(old_cert)
prevs = cdb.find_server_certs()
cdb.import_pkcs12(pkcs12_file.name, pin)
news = cdb.find_server_certs()
server_certs = [item for item in news if item not in prevs]
server_cert = server_certs[0][0]
if ca_enabled:
# Start tracking only if the cert was issued by IPA CA
# Retrieve IPA CA
ipa_ca_cert = cdb.get_cert_from_db(
get_ca_nickname(api.env.realm))
# And compare with the CA which signed this certificate
if ca_cert == ipa_ca_cert:
cdb.track_server_cert(server_cert,
principal,
cdb.passwd_fname,
command)
except RuntimeError as e:
raise admintool.ScriptError(str(e))
return server_cert
| 12,904
|
Python
|
.py
| 280
| 34.25
| 80
| 0.597089
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,740
|
sysupgrade.py
|
freeipa_freeipa/ipaserver/install/sysupgrade.py
|
# Authors: Martin Kosek <mkosek@redhat.com>
#
# Copyright (C) 2012 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import
import logging
import os
import os.path
from ipalib.install import sysrestore
from ipaplatform.paths import paths
logger = logging.getLogger(__name__)
STATEFILE_FILE = 'sysupgrade.state'
_sstore = None
def _load_sstore():
global _sstore
if _sstore is None:
_sstore = sysrestore.StateFile(paths.STATEFILE_DIR, STATEFILE_FILE)
def get_upgrade_state(module, state):
_load_sstore()
return _sstore.get_state(module, state)
def set_upgrade_state(module, state, value):
_load_sstore()
_sstore.backup_state(module, state, value)
def remove_upgrade_state(module, state):
_load_sstore()
_sstore.delete_state(module, state)
def remove_upgrade_file():
try:
os.remove(os.path.join(paths.STATEFILE_DIR, STATEFILE_FILE))
except Exception as e:
logger.debug('Cannot remove sysupgrade state file: %s', e)
| 1,670
|
Python
|
.py
| 45
| 34.444444
| 75
| 0.753408
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,741
|
ipa_subids.py
|
freeipa_freeipa/ipaserver/install/ipa_subids.py
|
#
# Copyright (C) 2021 FreeIPA Contributors see COPYING for license
#
import logging
from ipalib import api
from ipalib import errors
from ipalib.facts import is_ipa_configured
from ipaplatform.paths import paths
from ipapython.admintool import AdminTool, ScriptError
from ipapython.dn import DN
from ipapython.version import API_VERSION
logger = logging.getLogger(__name__)
class IPASubids(AdminTool):
command_name = "ipa-subids"
usage = "%prog [--group GROUP|--all-users]"
description = "Mass-assign subordinate ids to users"
@classmethod
def add_options(cls, parser):
super(IPASubids, cls).add_options(parser, debug_option=True)
parser.add_option(
"--group",
dest="group",
action="store",
default=None,
help="Updates members of a user group.",
)
parser.add_option(
"--all-users",
dest="all_users",
action="store_true",
default=False,
help="Update all users.",
)
parser.add_option(
"--filter",
dest="user_filter",
action="store",
default="(!(nsaccountlock=TRUE))",
help="Additional raw LDAP filter (default: active users).",
)
parser.add_option(
"--dry-run",
dest="dry_run",
action="store_true",
default=False,
help="Dry run mode.",
)
def validate_options(self, needs_root=False):
super().validate_options(needs_root=True)
opt = self.safe_options
if opt.all_users and opt.group:
raise ScriptError("--group and --all-users are mutually exclusive")
if not opt.all_users and not opt.group:
raise ScriptError("Either --group or --all-users required")
def get_group_info(self):
assert api.isdone("finalize")
group = self.safe_options.group
if group is None:
return None
try:
result = api.Command.group_show(group, no_members=True)
return result["result"]
except errors.NotFound:
raise ScriptError(f"Unknown users group '{group}'.")
def make_filter(self, groupinfo, user_filter):
filters = [
# only users with posixAccount
"(objectClass=posixAccount)",
# without subordinate ids
f"(!(memberOf=*,cn=subids,cn=accounts,{api.env.basedn}))",
]
if groupinfo is not None:
filters.append(
self.ldap2.make_filter({"memberof": groupinfo["dn"]})
)
if user_filter:
filters.append(user_filter)
return self.ldap2.combine_filters(filters, self.ldap2.MATCH_ALL)
def search_users(self, filters):
users_dn = DN(api.env.container_user, api.env.basedn)
attrs = ["objectclass", "uid"]
logger.debug("basedn: %s", users_dn)
logger.debug("attrs: %s", attrs)
logger.debug("filter: %s", filters)
try:
entries = self.ldap2.get_entries(
base_dn=users_dn,
filter=filters,
attrs_list=attrs,
)
except errors.NotFound:
logger.debug("No entries found")
return []
else:
return entries
def run(self):
if not is_ipa_configured():
print("IPA is not configured.")
return 2
api.bootstrap(in_server=True, confdir=paths.ETC_IPA)
api.finalize()
api.Backend.ldap2.connect()
self.ldap2 = api.Backend.ldap2
subid_generate = api.Command.subid_generate
dry_run = self.safe_options.dry_run
group_info = self.get_group_info()
filters = self.make_filter(
group_info, self.safe_options.user_filter
)
entries = self.search_users(filters)
total = len(entries)
logger.info("Found %i user(s) without subordinate ids", total)
total = len(entries)
for i, entry in enumerate(entries, start=1):
logger.info(
" Processing user '%s' (%i/%i)",
entry.single_value["uid"],
i,
total
)
if not dry_run:
# TODO: check for duplicate entry (race condition)
# TODO: log new subid
subid_generate(
ipaowner=entry.single_value["uid"],
version=API_VERSION
)
if dry_run:
logger.info("Dry run mode, no user was modified")
else:
logger.info("Updated %s user(s)", total)
return 0
if __name__ == "__main__":
IPASubids.run_cli()
| 4,789
|
Python
|
.py
| 134
| 25.470149
| 79
| 0.566803
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,742
|
ipa_crlgen_manage.py
|
freeipa_freeipa/ipaserver/install/ipa_crlgen_manage.py
|
#
# Copyright (C) 2019 FreeIPA Contributors see COPYING for license
#
from __future__ import print_function, absolute_import
import os
import logging
from cryptography.hazmat.backends import default_backend
from cryptography import x509
from ipalib import api
from ipalib.errors import NetworkError
from ipaplatform.paths import paths
from ipapython.admintool import AdminTool
from ipaserver.install import cainstance
from ipaserver.install import installutils
logger = logging.getLogger(__name__)
class CRLGenManage(AdminTool):
command_name = "ipa-crlgen-manage"
usage = "%prog <enable|disable|status>"
description = "Manage CRL Generation Master."
def validate_options(self):
super(CRLGenManage, self).validate_options(needs_root=True)
installutils.check_server_configuration()
option_parser = self.option_parser
if not self.args:
option_parser.error("action not specified")
elif len(self.args) > 1:
option_parser.error("too many arguments")
action = self.args[0]
if action not in {'enable', 'disable', 'status'}:
option_parser.error("unrecognized action '{}'".format(action))
def run(self):
api.bootstrap(in_server=True, confdir=paths.ETC_IPA)
api.finalize()
try:
api.Backend.ldap2.connect()
except NetworkError as e:
logger.debug("Unable to connect to the local instance: %s", e)
raise RuntimeError("IPA must be running, please run ipactl start")
ca = cainstance.CAInstance(api.env.realm)
try:
action = self.args[0]
if action == 'enable':
self.enable(ca)
elif action == 'disable':
self.disable(ca)
elif action == 'status':
self.status(ca)
finally:
api.Backend.ldap2.disconnect()
return 0
def check_local_ca_instance(self, raiseOnErr=False):
if not api.Command.ca_is_enabled()['result'] or \
not cainstance.is_ca_installed_locally():
if raiseOnErr:
raise RuntimeError("Dogtag CA is not installed. "
"Please install a CA first with the "
"`ipa-ca-install` command.")
else:
logger.warning(
"Warning: Dogtag CA is not installed on this server.")
return False
return True
def enable(self, ca):
# When the local node is not a CA, raise an Exception
self.check_local_ca_instance(raiseOnErr=True)
ca.setup_crlgen(True)
logger.info("CRL generation enabled on the local host. "
"Please make sure to have only a single CRL generation "
"master.")
def disable(self, ca):
# When the local node is not a CA, nothing to do
if not self.check_local_ca_instance():
return
ca.setup_crlgen(False)
logger.info("CRL generation disabled on the local host. "
"Please make sure to configure CRL generation on another "
"master with %s enable", self.command_name)
def status(self, ca):
# When the local node is not a CA, return "disabled"
if not self.check_local_ca_instance():
print("CRL generation: disabled")
return
# Local node is a CA, check its configuration
if ca.is_crlgen_enabled():
print("CRL generation: enabled")
try:
crl_filename = os.path.join(paths.PKI_CA_PUBLISH_DIR,
'MasterCRL.bin')
with open(crl_filename, 'rb') as f:
crl = x509.load_der_x509_crl(f.read(), default_backend())
print("Last CRL update: {}".format(crl.last_update))
for ext in crl.extensions:
if ext.oid == x509.oid.ExtensionOID.CRL_NUMBER:
print("Last CRL Number: {}".format(
ext.value.crl_number))
except IOError:
logger.error("Unable to find last CRL")
else:
print("CRL generation: disabled")
| 4,305
|
Python
|
.py
| 99
| 31.89899
| 78
| 0.591593
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,743
|
ipa_migrate.py
|
freeipa_freeipa/ipaserver/install/ipa_migrate.py
|
# ipa-migrate
#
# IPA to IPA migration tool
#
# Copyright (C) 2023 FreeIPA Contributors see COPYING for license
#
# PYTHON_ARGCOMPLETE_OK
import argcomplete
import argparse
import base64
import datetime
import getpass
import ldap
import ldif
import logging
import os
import socket
import subprocess
import sys
import time
from cryptography import x509 as crypto_x509
from ldap.controls import SimplePagedResultsControl
from ipalib import api, errors
from ipalib.facts import is_ipa_configured
from ipalib.x509 import IPACertificate
from ipaplatform.paths import paths
from ipapython.dn import DN
from ipapython.ipaldap import LDAPClient, LDAPEntry, realm_to_ldapi_uri
from ipapython.ipa_log_manager import standard_logging_setup
from ipaserver.install.ipa_migrate_constants import (
DS_CONFIG, DB_OBJECTS, DS_INDEXES, BIND_DN, LOG_FILE_NAME,
STRIP_OP_ATTRS, STRIP_ATTRS, STRIP_OC, PROD_ATTRS,
DNA_REGEN_VAL, DNA_REGEN_ATTRS, IGNORE_ATTRS,
DB_EXCLUDE_TREES, POLICY_OP_ATTRS
)
"""
Migration design
==============================================================================
Design Features
- Migration consists of three areas: schema, config, and database
- Allow online (LDAP) or offline (LDIF file) migration. Can mix and match
LDIFs with LDAP, but the LDIF needs to come from the same server where the
online data is retrieved
- Why use LDIF files instead of over the network LDAP?
- Large databases could take a long time to process, and connections
could get closed.timed out, etc.
- Also allows a "backup" to be used
for the migration (assuming if you have LDIFs of the schema,
config (dse.ldif) and DB (userroot))
- There are options to skip schema or config migration (not sure if this is
needed, but the functionality is present)
- Config and Database migrations uses a "map" object. This map object also
contains the "Summary Report" data items (labels and counts)
- With over LDAP or LDIF, all entries are converted to a common format for
consistent processing.
Schema Migration
--------------------------------
- Option to completely overwrite schema on local server with whatever schema
is on remote server
- Process each attr/objectclass individually. If the "name" exists we do NOT
attempt to migrate it. It is skipped, unless the "overwrite" option is set.
We track stats on what attrs/objectclasses are migrated and skipped
Config Migration
--------------------------------
- Uses a "map" object (DS_CONFIG) to categorize the type of config entry we
wish to migrate.
- Each config type in the map contains attributes (singled valued and
multi-valued) that we care about. We can not rely on schema because the core
config settings are unfortunately not in the schema.
-
Database Migration
--------------------------------
- Uses a map object (DB_OBJECTS) to identify entries and keep track of what is
updated
- First we skip entries that in the excluded list
- The entry "type" is determined by DB_OBJECTS mapping. If the type is
unknown then the entry is skipped.
- Skip remote server CA certificate
- There migth be a case to keep these, but not as the main CA. TODO discuss
- Skip the remote "computer"
- Then the remote entry is cleaned --> clean_entry()
- Remove attributes from the ignore/strip list
- Replace suffix/realm/hostname/domainname in all attribute values and DN
- Remove objectclasses from groups (STRIP_OC list) Might not be needed
- userCertificate is removed -> if issued by IPA
- Remove unused objectclasses
- The entry is then checked if it exists on local server. If it does not exist
it is added, otherwise we compare the remote and local entries and update the
local entry --> update_local_entry()
- Normalize attribute names to match the case of the local server's attrs
- Loop over remote entry attributes
- Skipping attrs from the "ignore list"
- Check the migration mode (prod-mode & stage-mode)
- If prod-mode, we migrate SIDs and DNA ranges
- If stage-mode, SIDs and DNA are skipped, and dna attributes
(uidNumber, gidNumber) are reset to the magic value
- Check if attribute is defined in the mappings "special_attrs" list
- If it is a special attribute then handle attribute comparison
according to the special definition (e.g. list) and update the
local entry. Then move on to next attribute...
- If the attribute is not "special" then we simply compare attribute to
attribute.
- If requested, DNA values are reset (magic regen) at this stage
- If attribute being updated is "single-valued" then "replace" the
value. If its "multi-valued" then "append" the different value.
Other
--------------------------------
There is a lot of normalization going on because we use dictionaries for
attribute names, but attribute names are CIS, we have to normalize them during
comparison, but we need to continue to use the original case when we go to
update the local entry. So in some cases we normalize to all lowercase, but
when updatng the local entry we normalize the case of the remote attributes to
match the local entry's attribute case
What's next
- ask trivino to skip teardown on CI so I can get data
Some users/customers add their own entries to the db. Need more info on this
as those entries will not be migrated by default
after password change (but doesn't look like we can reset admin pass:
kinit_as_user -> for admin in IPA API somewhere
write test from "integration" tests (new class)
"""
logger = logging.getLogger(__name__)
# Audit stats
stats = {
# Schema
'schema_attrs_added': 0,
'schema_attrs_skipped': 0,
'schema_oc_added': 0,
'schema_oc_skipped': 0,
'schema_processed': 0,
# Config
'config_processed': 0,
'config_migrated': 0,
# general
'conflicts': 0, # LDIF log entries
'ignored_errors': 0, # force option
# db
'reset_range': 0,
'custom': 0,
'total_db_entries': 0,
'total_db_migrated': 0,
}
#
# Generic helper functions
#
def normalize_attr(entry_attrs, attr):
"""
Convert all the entry attributes that match "attr" to same case as "attr"
"""
vals = []
nattr = attr.lower()
for key_attr in entry_attrs:
if key_attr.lower() == nattr:
vals = entry_attrs[key_attr].copy()
del entry_attrs[key_attr]
break
if vals:
entry_attrs[attr] = vals
def ensure_str(val):
"""
Convert all non-binary values to strings for easier comparision
"""
if val is not None and type(val) is not str:
try:
result = val.decode('utf-8')
except UnicodeDecodeError:
# binary value, must return it as is
result = val
return result
return val
def ensure_list_str(val):
return [ensure_str(v) for v in val]
def decode_attr_vals(entry_attrs):
"""
Decode all attribute values for easier processing
"""
decoded_attrs = {}
for attr in entry_attrs:
vals = ensure_list_str(entry_attrs[attr])
decoded_attrs[attr] = vals
return decoded_attrs
def get_ldif_attr_val(attr, val):
"""
Convert an attribute value to text we can use in an LDIF file.
Return the LDIF format of the attribute value pair
"""
if type(val) is str:
return f"{attr}: {val}\n"
try:
val = val.decode('utf-8')
return f"{attr}: {val}\n"
except UnicodeDecodeError:
val = base64.b64encode(val)
val = val.decode('utf-8')
return f"{attr}:: {val}\n"
def get_ldif_records(ldif_file, decode=True):
"""
Returns a list of all the parsed records. Only run this on small LDIF
files as all the entries go into memory (cn=config/cn=schema is ok).
"""
content = ldif.LDIFRecordList(open(ldif_file, "r"))
content.parse()
# LDIF entries look like [(dn, entry), (dn, entry), ...]
# dn = Str
# entry = {ATTR: [b'value', b'value', ...], ATTR: [], ...}
if decode:
entries = []
for dn, entry in content.all_records:
entries.append((dn, decode_attr_vals(entry)))
return entries
else:
# Return binary string as LDIFRecordList intended
return content.all_records
def print_progress(msg):
sys.stdout.write('\r')
sys.stdout.write(msg)
sys.stdout.flush()
#
# ldif.LDIFParser requires that we create our own handler for parsing records
# We need to set our class so we can call the IPAMigrate functions
#
class LDIFParser(ldif.LDIFParser):
mc = None
get_realm = False
def set_class(self, obj):
# Sets the IPAMigrate class
self.mc = obj
def look_for_realm(self):
self.get_realm = True
# Override handle() to do our specific migration work
def handle(self, dn, entry):
if self.mc is None:
return
if self.get_realm:
# Get the realm from krb container
if DN(("cn", "kerberos"), self.mc.remote_suffix) in DN(dn):
# check objectclass krbrealmcontainer
oc_attr = 'objectClass'
if 'objectclass' in entry:
oc_attr = 'objectclass'
if 'krbrealmcontainer' in ensure_list_str(entry[oc_attr]):
self.mc.remote_realm = ensure_str(entry['cn'][0])
self.mc.log_debug("Found remote realm from ldif: "
f"{self.mc.remote_realm}")
else:
entry_attrs = decode_attr_vals(entry)
self.mc.process_db_entry(entry_dn=dn, entry_attrs=entry_attrs)
#
# Migrate IPA to IPA Class
#
class IPAMigrate():
command_name = "ipa-migrate"
mode = None
args = None
bindpw = None
remote_suffix = None
local_suffix = None
log_file_name = LOG_FILE_NAME
log_file_mode = "a" # or "w" TBD
local_conn = None
remote_conn = None
log = logger
ldif_writer = None
realm = None
remote_realm = None
dryrun = False
dryrun_record = None
post_notes = [
'You will have to manually migrate IDM related configuration files. '
'Here are some, but not all, of the configuration files to look into:'
'\n - /etc/ipa/*'
'\n - /etc/sssd/sssd.conf'
'\n - /etc/named.conf'
'\n - /etc/named/*'
'\n - ...',
'SSSD should be restarted after a successful migration',
]
#
# Argument Options (will be impacted by AdminTool)
#
# AdminTool uses optParse which is deprecated. I'd like to see IPA migrate
# over to argParse instead of reverting this code to optParse (we will see)
#
def add_options(self, parser):
parser.add_argument('mode', choices=['prod-mode', 'stage-mode'],
help='Migration mode. Choose from "prod-mode" '
'for a production server, or "stage-mode" '
'for a staged server. In "prod-mode" '
'everything will be migrated including the '
'current user sids and DNA ranges. In '
'"stage-mode" sids, dna ranges, etc are '
'not migrated')
parser.add_argument('hostname',
help='The FQDN hostname of the remote IPA '
'server to migrate to this local IPA '
'instance')
parser.add_argument('-v', '--verbose', help='Verbose output',
action='store_true', default=False, dest='verbose')
parser.add_argument('-D', '--bind-dn',
help='The Bind DN to authenticate to the remote '
f'server. The default is "{BIND_DN}"',
default=BIND_DN)
parser.add_argument('-w', '--bind-pw',
help='Password for the Bind DN. If a password '
'is not provided then the user will be '
'prompted to enter it',
default=None)
parser.add_argument('-j', '--bind-pw-file',
help='A text file containing the clear text '
'password for the Bind DN', default=None)
parser.add_argument('-Z', '--cacertfile',
help='File containing a CA Certificate that the '
'remote server trusts',
default=None)
parser.add_argument('-s', '--subtree', action='append', default=[],
help='Adds an additional custom database '
'subtree to include in the migration.')
parser.add_argument('-l', '--log-file',
help='The log file for recording the migration '
f'effort. The default is "{LOG_FILE_NAME}"',
default=LOG_FILE_NAME)
"""
parser.add_argument('-u', '--conflict-ldif-file', # TODO needed?
help='An LDIF file containing conflict entries, '
'or entries that need special attention '
'before they can be migrated. The default '
f'file is "{CONFLICT_FILE_NAME}"',
default=CONFLICT_FILE_NAME)
"""
parser.add_argument('-S', '--skip-schema',
help='Do not migrate schema',
action='store_true', default=False)
parser.add_argument('-C', '--skip-config',
help='Do not migrate the DS configuration '
'(dse.ldif/cn=config)',
action='store_true', default=False)
parser.add_argument('-B', '--migrate-dns',
help='Migrate the DNS records',
action='store_true', default=False)
parser.add_argument('-x', '--dryrun',
help='Go through the migration process but do '
'not write any data to the new IPA server',
action='store_true', default=False)
parser.add_argument('-o', '--dryrun-record',
help='This option does the same thing as '
'"--dryrun", but it will record the changes '
'to an LDIF file')
parser.add_argument('-F', '--force',
help='Ignore errors and continue with migration',
action='store_true', default=False)
parser.add_argument('-q', '--quiet',
help='Only display errors during the migration',
action='store_true', default=False)
parser.add_argument('-O', '--schema-overwrite',
help='Overwrite any matching schema definitions.',
action='store_true', default=False)
parser.add_argument('-r', '--reset-range',
help='Reset the ID range for migrated '
'users/groups. In "stage-mode" this is '
'done automatically',
action='store_true', default=False)
parser.add_argument('-f', '--db-ldif',
help='LDIF file containing the entire backend. '
'If omitted the tool will query the remote '
'IPA server.',
default=None)
parser.add_argument('-m', '--schema-ldif',
help='LDIF file containing the schema. '
'If omitted the tool will query the remote '
'IPA server',
default=None)
parser.add_argument('-g', '--config-ldif',
help='LDIF file containing the cn=config DIT. '
'If omitted the tool will query the remote '
'IPA server',
default=None)
parser.add_argument('-n', '--no-prompt',
help='Do not prompt for confirmation before '
'starting migration. Use at your own risk!',
action='store_true', default=False)
argcomplete.autocomplete(parser)
self.args = parser.parse_args()
def handle_error(self, msg, err=1):
self.log_error(msg)
sys.exit(err)
def validate_options(self):
# Check LDIf files are real
if self.args.db_ldif is not None:
if not os.path.isfile(self.args.db_ldif):
self.handle_error('The DB LDIF file does not exist')
if self.args.schema_ldif is not None:
if not os.path.isfile(self.args.schema_ldif):
self.handle_error('The Schema LDIF file does not exist')
if self.args.config_ldif is not None:
if not os.path.isfile(self.args.config_ldif):
self.handle_error('The Config LDIF file does not exist')
if self.args.db_ldif is None \
or (self.args.schema_ldif is None and not self.args.skip_schema) \
or (self.args.config_ldif is None and not self.args.skip_config):
# We need a password to get all our the data from the remote server
self.get_passwd()
# Check custom subtrees
for subtree in self.args.subtree:
try:
DN(subtree)
except Exception:
self.handle_error('Invalid DN used in "subtree" '
f'option: {subtree}')
# Can we write to our LDIF file?
if self.args.dryrun_record is not None:
try:
f = open(self.args.dryrun_record, "w")
f.writable()
except FileNotFoundError:
self.handle_error('Can not write to the dryrun ldif file')
# Validate hostname, must be FQDN and not an IP
hostname_value = self.args.hostname
if hostname_value[-1] == ".":
# strip trailing dot
hostname_value = hostname_value[:-1]
if '.' not in hostname_value:
self.handle_error(
f"Hostname '{hostname_value}' must be the FQDN of the "
"remote server")
# Remove all the dots, if it's a number it's an IP not a FQDN
hostname_value = hostname_value.replace('.', '')
if hostname_value.isnumeric() or ':' in hostname_value:
# might be an IP, still not allowed
self.handle_error(
f"Hostname '{self.args.hostname}' must be the FQDN of the "
"remote server")
# Set the mode
self.mode = self.args.mode
#
# Logging functions (will be replaced by AdminTool)
# Make sure when this ^^^ happens that we can still set "verbose" to True
# We don't want to lose logging all levels to the file
#
def setup_logging(self):
"""
AdminTool currently uses deprecated optparse, so we can not use its
logger since this tool is using argparse. So mimic its logger setup
"""
root_logger = logging.getLogger()
for handler in root_logger.handlers:
if (isinstance(handler, logging.StreamHandler)
and handler.stream is sys.stderr):
root_logger.removeHandler(handler)
break
if self.args.verbose:
console_format = '%(name)s: %(levelname)s: %(message)s'
debug = True
else:
console_format = '%(message)s'
debug = False
# Verbose is set to True so we log everything to the migration log file
standard_logging_setup(
self.args.log_file, console_format=console_format,
filemode=self.log_file_mode, debug=debug, verbose=True)
def log_info(self, msg):
''' write to log and stdout (unless it's quiet) '''
if self.args.quiet:
self.log.debug(msg)
else:
self.log.info(msg)
def log_debug(self, msg):
# log only to the log file
if self.args.verbose:
self.log.info(msg)
else:
self.log.debug(msg)
def log_error(self, msg):
''' write to log and stdout '''
self.log.error(msg)
#
# Helper functions
#
def attr_is_operational(self, attr):
schema = self.local_conn.schema
attr_obj = schema.get_obj(ldap.schema.AttributeType, attr)
if attr_obj is not None:
if attr_obj.usage == 1:
return True
return False
def replace_suffix(self, entry_dn):
"""
Replace the base DN in an entry DN
"""
dn = DN(entry_dn)
if self.remote_suffix in dn:
dn_len = len(dn)
old_len = len(self.remote_suffix)
offset = dn_len - old_len
dn = dn[:offset] # Strip old base DN
dn = dn + self.local_suffix # Add new base DN
return str(dn)
else:
# This entry DN is not in scope
return entry_dn
def replace_suffix_value(self, val):
"""
Take an attribute value and replace the old suffix with the new one
"""
# Skip bytes
if isinstance(val, bytes):
return val
try:
dn = DN(val)
# Value is a DN
return self.replace_suffix(str(dn))
except ValueError:
# Not a DN. Maybe aci or filter? Try replacing substring
val = val.replace(str(self.remote_suffix), str(self.local_suffix))
return val
def replace_suffix_values(self, vals):
"""
Replace suffix values in a list
"""
return [self.replace_suffix_value(v) for v in vals]
def normalize_vals(self, vals):
"""
If the value is a DN, normalize it
"""
new_vals = []
for val in vals:
try:
dn = DN(val)
# Value is a DN
new_vals.append(str(dn))
except (ValueError, TypeError):
# Not a DN
new_vals.append(val)
return new_vals
def write_update_to_ldif(self, entry, add_entry=False):
"""
Take an LDAPEntry and write its modlist(or add op) to LDIF format so
it can be be processed by ldapmodify
"""
if self.args.dryrun_record is None:
return
ldif_entry = f"dn: {str(entry.dn)}\n"
if add_entry:
ldif_entry += "changetype: add\n"
for attr in entry:
vals = entry[attr]
for val in vals:
ldif_entry += get_ldif_attr_val(attr, val)
ldif_entry += "\n" # end of entry
else:
ldif_entry += "changetype: modify\n"
mods = entry.generate_modlist()
for mod in mods:
mod_type = mod[0]
attr = mod[1]
vals = mod[2]
if mod_type == ldap.MOD_ADD:
action = "add"
elif mod_type == ldap.MOD_DELETE:
action = "delete"
else:
action = "replace"
ldif_entry += f"{action}: {attr}\n"
for val in list(vals or []):
ldif_entry += get_ldif_attr_val(attr, val)
ldif_entry += "-\n"
ldif_entry += "\n"
self.dryrun_record.write(ldif_entry)
def write_conflict(self, dn, attrs):
"""
Write an entry that needs special attention to an LDIF for later
review. Maybe we add a "post" section of the tool to evaluate it
after migration as part of the migration, or a separate option to
do it later, or both, or just let the admin manually do it?
Currently this function/feature is not used...
"""
if self.ldif_writer is None:
self.ldif_writer = ldif.LDIFWriter(
open(self.args.conflict_ldif_file))
self.ldif_writer.unparse(dn, attrs)
stats['conflicts'] += 1
def log_stats(self, object_dict):
"""
Print a migration stat with consisent formatting
"""
indent = 28
logged_something = False
for key in object_dict:
stat_label = object_dict[key]['label']
line = f" - {stat_label}:"
if len(line) >= indent:
padding = 2
else:
padding = indent - len(line)
line = line + (" " * padding)
if self.args.verbose or object_dict[key]['count'] > 0:
self.log_info(f"{line}{object_dict[key]['count']}")
logged_something = True
return logged_something
def display_stats(self, elapsed_time):
"""
Display the summary report of the migration
"""
self.log_info('Migration complete!')
if self.dryrun:
self.log_info('\nDry Run Summary:')
else:
self.log_info('\nSummary:')
self.log_info('=' * 79)
# Basic info
title = 'General Information'
self.log_info("\n" + title)
self.log_info('-' * len(title))
self.log_info(f" - Remote Host: {self.args.hostname}")
self.log_info(" - Migration Duration: "
f"{str(datetime.timedelta(seconds=elapsed_time))}")
self.log_info(f" - Migration Log: {self.args.log_file}")
# self.log_info(" - Conflict LDIF File: "
# f"{self.args.conflict_ldif_file} (entries: "
# f"{stats['conflicts']})")
if self.args.dryrun_record is not None:
self.log_info(" - Dryrun LDIF file: "
f"{self.args.dryrun_record}")
self.log_info(f" - Remote Host: {self.args.hostname}")
self.log_info(f" - Remote Domain: {self.remote_domain}")
self.log_info(f" - Local Host: {self.local_hostname}")
self.log_info(f" - Local Domain: {self.local_domain}")
self.log_info(f" - Remote Suffix: {self.remote_suffix}")
self.log_info(f" - Local Suffix: {self.local_suffix}")
self.log_info(f" - Remote Realm: {self.remote_realm}")
self.log_info(f" - Local Realm: {self.realm}")
for subtree in self.args.subtree:
self.log_info(f" - Custom Subtree: {subtree}")
if self.args.force is not False:
self.log_info(" - Ignored Errors: "
f"{stats['ignored_errors']}")
self.log_info(" - Schema Analyzed: "
f"{stats['schema_processed']} definitions")
self.log_info(" - Config Analyzed: "
f"{stats['config_migrated']} entries")
self.log_info(" - Database Anaylzed: "
f"{stats['total_db_entries']} entries")
# Schema
total_schema = stats['schema_attrs_added'] + stats['schema_oc_added']
title = ('\nSchema Migration (migrated '
f"{total_schema} definitions)")
self.log_info(title)
self.log_info('-' * (len(title) - 1))
self.log_info(" - Attributes: "
f"{stats['schema_attrs_added']}")
self.log_info(" - Objectclasses: "
f"{stats['schema_oc_added']}")
# Configuration
title = ('\nDS Configuration Migration (migrated '
f"{stats['config_migrated']} entries)")
self.log_info(title)
self.log_info('-' * (len(title) - 1))
logged_something = self.log_stats(DS_CONFIG)
if self.args.verbose:
logged_something = True
if not self.log_stats(DS_INDEXES) and not logged_something:
self.log_info(" - No updates")
# Database
title = ("\nDatabase Migration (migrated "
f"{stats['total_db_migrated']} entries)")
self.log_info(title)
self.log_info('-' * (len(title) - 1))
logged_something = False
if self.args.verbose or self.args.reset_range:
logged_something = self.log_info(
f" - DNA Range Resets: {stats['reset_range']}")
if len(self.args.subtree) > 0:
logged_something = self.log_info(
f" - Custom entries: {stats['custom']}")
if not self.log_stats(DB_OBJECTS) and not logged_something:
self.log_info(" - No updates")
# Display any followup notes
title = (f"\nAction Items ({len(self.post_notes)} items)")
self.log_info(title)
self.log_info('-' * (len(title) - 1))
for note in self.post_notes:
self.log_info(' - ' + note)
# The end of the summary
self.log_info('=' * 79)
def connect_to_remote_ds(self):
"""
Connect to the remote DS and store the conn in the class
"""
ldapuri = f"ldap://{self.args.hostname}"
insecure_bind = False
if self.args.cacertfile is not None:
# Start TLS connection (START_TLS)
try:
ds_conn = LDAPClient(ldapuri, cacert=self.args.cacertfile,
start_tls=True)
except ValueError:
# Most likely invalid certificate
self.handle_error(
"Failed to connect to remote server: "
"CA certificate is invalid"
)
except (
ldap.LDAPError,
errors.NetworkError,
errors.DatabaseError,
IOError
) as e:
self.handle_error(
f"Failed to connect to remote server: {str(e)}"
)
else:
# LDAP (insecure)
ds_conn = LDAPClient(ldapuri)
insecure_bind = True
try:
ds_conn.simple_bind(DN(self.args.bind_dn), self.bindpw,
insecure_bind=insecure_bind)
except (
errors.NetworkError,
errors.ACIError,
errors.DatabaseError
) as e:
self.handle_error(f"Failed to bind to remote server: {str(e)}")
# All set, stash the remote connection
self.bindpw = None
self.remote_conn = ds_conn
def connect_to_local_ds(self):
"""
Connect to the local DS over ldapi
"""
try:
ds_conn = LDAPClient(self.ldapiuri, force_schema_updates=True)
ds_conn.external_bind()
ds_conn._get_schema()
except (ldap.SERVER_DOWN, ldap.CONNECT_ERROR, errors.NetworkError):
self.handle_error(
"Local server is not running, or is unreachable.")
except ldap.LDAPError as e:
self.handle_error(
f"Failed to bind to local server: {str(e)}")
# All set, stash the local conn
self.local_conn = ds_conn
def get_remote_realm(self):
"""
Get the remote realm from cn=REALM,cn=kerberos,$SUFFIX
"""
if self.args.db_ldif is not None:
ldifParser = LDIFParser(open(self.args.db_ldif, "r"))
ldifParser.set_class(self)
ldifParser.look_for_realm()
self.log_debug('Getting realm from LDIF file ...')
ldifParser.parse_entry_records()
if self.remote_realm is None:
self.handle_error("Unable to find realm from remote LDIF",
err=2)
self.log_debug('Done getting realm from LDIF file')
else:
krb_entry = self.remote_conn.get_entries(
DN(f"cn=kerberos,{self.remote_suffix}"),
filter="objectclass=krbrealmcontainer")
if len(krb_entry) == 1:
self.remote_realm = ensure_str(krb_entry[0]['cn'][0])
self.log_debug("Found realm from remote server: "
f"{self.remote_realm}")
else:
if len(krb_entry) == 0:
self.handle_error("Failed to find remote realm", err=2)
else:
# Found too many realms (should not possible)
self.handle_error("Found multiple realms, can not proceed",
err=2)
def get_passwd(self):
"""
Get/set the migration password. Check usage arg & pw file, and if not
found prompt user for it.
"""
if self.args.bind_pw is not None:
self.bindpw = self.args.bind_pw
else:
if self.args.bind_pw_file is not None:
# Read password from file
try:
with open(self.args.bind_pw_file, "r") as f:
self.bindpw = f.readline().rstrip()
f.close()
except EnvironmentError as e:
self.handle_error(
"Failed to open password file: " + str(e))
else:
# Prompt for password
while self.bindpw is None or self.bindpw == "":
self.bindpw = getpass.getpass(
f'Enter the password for {self.args.bind_dn}: ')
def get_base_dn(self, remote=False):
"""
Search the Root DSE for the default naming context
"""
if not remote:
# Get the local server's base dn
conn = self.local_conn
if conn is None:
self.handle_error(
'There is no connection to the local server')
else:
# Get base DN from remote server. Check online or by LDIF
conn = self.remote_conn
if conn is None:
if self.args.db_ldif is not None:
# Get the base DN from DB ldif itself
with open(self.args.db_ldif, "r") as ldif:
for line in ldif:
# The first DN should be the root node
if line.startswith('dn: '):
return DN(line.replace('dn: ', ''))
self.handle_error('The db ldif file does not appear to '
'be a valid ldif file')
else:
self.handle_error('There is no connection to the remote '
'server or an LDIF file to process')
# We have our connection to the server, get the base dn from root DSE
try:
if remote:
server_type = "remote"
else:
server_type = "local"
entry = conn.get_entry(DN(""),
attrs_list=['namingcontexts',
'defaultnamingcontext'])
if 'defaultnamingcontext' in entry:
suffix = entry['defaultnamingcontext'][0]
suffix_entry = conn.get_entry(DN(suffix), attrs_list=['info'])
if 'info' not in suffix_entry or \
'IPA V2' not in suffix_entry['info'][0]:
self.handle_error(f'The {server_type} server does not '
'appear to be an IPA server', err=2)
return DN(suffix)
else:
for suffix in entry['namingcontexts']:
# Ignore o=ipaca and cn=changelog
if suffix.lower() != "o=ipaca" and \
suffix.lower() != "cn=changelog":
try:
suffix_entry = conn.get_entry(DN(suffix),
attrs_list=['info'])
if 'info' not in suffix_entry or \
'IPA V2' not in suffix_entry['info'][0]:
self.handle_error(f'The {server_type} server '
'does not appear to be '
'an IPA server', err=2)
return DN(suffix)
except (IndexError, KeyError) as e:
self.handle_error(
"Failed to find naming context: " + str(e))
# If we got here there is no userroot
self.handle_error(
"Failed to get database base DN as it does not exist")
except ldap.LDAPError as e:
self.handle_error(
"Failed to search Root DSE on remote server: " + str(e))
return None
def return_type(self, db_item):
""" Check our migration mode and return None if this entry should be
skipped
"""
if db_item[1]['mode'] == "production" and self.mode != "prod-mode":
# Production only type, but we are not in production mode
return None
# This entry can be migrated
return db_item[0]
def get_entry_type(self, entry_dn, entry_attrs):
"""
Get the type of entry from its objectclasses and DN
"""
oc_attr = 'objectClass'
if 'objectclass' in entry_attrs:
oc_attr = 'objectclass'
for oc in entry_attrs[oc_attr]:
oc = oc.lower()
for db_item in DB_OBJECTS.items():
db_obj = db_item[1]
obj_ocs = db_obj['oc']
# Do the suffix and realm substitution
obj_subtree = db_obj['subtree'].replace(
'$SUFFIX', str(self.remote_suffix))
obj_subtree = obj_subtree.replace('$REALM', self.realm)
if len(obj_ocs) > 0:
for obj_oc in obj_ocs:
if oc == obj_oc:
# OC matches, check if we have a subtree to check
if 'not_oc' in db_obj:
# We have to filter out entries that have a
# not_oc
ocs = [x.lower() for x in entry_attrs[oc_attr]]
for not_oc in db_obj['not_oc']:
if not_oc in ocs:
return None
if obj_subtree is not None:
if obj_subtree[0] == ",":
# Match child entries
obj_subtree = obj_subtree[1:]
if DN(obj_subtree) != DN(entry_dn) and \
DN(obj_subtree) in DN(entry_dn):
return self.return_type(db_item)
else:
# Match DN exactly
if DN(obj_subtree) == DN(entry_dn):
return self.return_type(db_item)
else:
return self.return_type(db_item)
else:
if obj_subtree[0] == ",":
# Match child entries
obj_subtree = obj_subtree[1:]
if DN(obj_subtree) != DN(entry_dn) and \
DN(obj_subtree) in DN(entry_dn):
return self.return_type(db_item)
else:
# Match DN exactly
if DN(obj_subtree) == DN(entry_dn):
return self.return_type(db_item)
# Check custom subtrees
for subtree in self.args.subtree:
if DN(subtree) == DN(entry_dn) or \
DN(subtree) in DN(entry_dn):
return 'custom'
# We don't know this entry, so we can ignore it
return None
#
# DB Migration
#
def get_cert_issuer(self, cert_value):
cert = crypto_x509.load_der_x509_certificate(cert_value)
ipacert = IPACertificate(cert)
issuer = str(DN(ipacert.issuer))
return issuer
def remove_usercert(self, entry_dn, cert_values):
"""
If the usercertificate was issued by IPA then mark it to be removed,
otherwise we keep it
"""
remove_vals = []
for cert_val in cert_values:
issuer = self.get_cert_issuer(cert_val)
REALM_LIST = [self.realm, self.remote_realm]
cert_removed = False
for realm in REALM_LIST:
if issuer == f"CN=Certificate Authority,O={realm}":
# This is an IPA issued cert, remove it
remove_vals.append(cert_val)
self.log_debug("Removed IPA issued userCertificate "
f"from: {entry_dn}")
cert_removed = True
break
if not cert_removed:
self.log_debug("Keeping userCertificate issued by "
f"'{issuer}' in entry: {entry_dn}")
# Now remove the values from cert_vals
for val in remove_vals:
cert_values.remove(val)
return len(cert_values) == 0
def convert_value(self, val, dns=False):
"""
Replace suffix, hostname, domain, and realm from a string
"""
if isinstance(val, bytes) or isinstance(val, DN):
return val
# Replace base DN
val = self.replace_suffix_value(val)
# For DNS DN we only replace suffix
if dns:
return val
# Replace host
if self.args.hostname in val:
val = val.replace(self.args.hostname, self.local_hostname)
# Replace domain
if self.remote_domain in val and self.local_domain not in val:
val = val.replace(self.remote_domain, self.local_domain)
# Replace realm
val = val.replace(self.remote_realm, self.realm)
return val
def convert_values(self, values, dns=False):
"""
Replace suffix, hostname, domain, and realm in a list
"""
new_values = []
for val in values:
new_values.append(self.convert_value(val, dns))
# normalize DN values
return self.normalize_vals(new_values)
def get_ldapentry_attr_vals(self, entry, attr):
"""
Get the raw attribute values from IPA's LDAPEntry
"""
vals = []
attr_vals = entry.raw.get(attr)
for val in attr_vals:
if isinstance(val, bytes):
vals.append(ensure_str(val))
elif not isinstance(val, str):
val = str(val)
vals.append(ensure_str(val))
else:
# Just a string
vals.append(val)
return self.normalize_vals(vals)
def build_ldap_entry(self, dn, attrs):
"""
Take a DN and some attributes and build an LDAPEntry. Used when
adding entries to the local server
"""
entry = LDAPEntry(self.local_conn, DN(dn))
range_reset = False
for attr, values in attrs.items():
if (self.args.reset_range or self.mode == "stage-mode") and \
attr.lower() in DNA_REGEN_ATTRS:
# Set the magic regen value
values = [DNA_REGEN_VAL]
self.log_debug(f"Resetting DNA range for new entry: {dn}")
range_reset = True
entry[attr] = values
if range_reset:
stats['reset_range'] += 1
return entry
def attr_is_required(self, attr, entry):
"""
Check if an attribute is required in this entry
"""
entry_oc = entry['objectClass']
for oc in entry_oc:
required_attrs = self.local_conn.get_allowed_attributes(
[oc], raise_on_unknown=False, attributes="must")
if attr.lower() in required_attrs:
return True
return False
def clean_entry(self, entry_dn, entry_type, entry_attrs):
"""
Clean up the entry from the remote server
- Remove attributes from the ignore/strip list
- Reset suffix in all attributes
- If REALM was changed reset it to the new value
- Remove objectclasses from groups (STRIP_OC list)
- userCertificate is removed if issued by IPA
- Remove unused objectclasses
"""
# Don't clean DNS entries
if entry_type.startswith("dns"):
return entry_attrs
# Set the attrs we want to remove
remove_list = []
remove_attrs = STRIP_ATTRS + STRIP_OP_ATTRS
if self.args.mode != "prod-mode":
remove_attrs += PROD_ATTRS
# Need to remove the remote host member from the ipaserver host group
remove_member = False
if entry_type == "host_groups" and \
entry_dn.startswith("cn=ipaservers,"):
# We need remove any members that match the old host
remove_member = True
# Walk the entry normalizing and marking attrs to remove as needed
for attr in entry_attrs:
if attr.lower() in remove_attrs:
remove_list.append(attr)
continue
# remove remote server hostgroup member
if remove_member and attr == "member":
new_vals = []
for val in entry_attrs[attr]:
if val.startswith("fqdn=" + self.local_hostname):
new_vals.append(val)
else:
self.log_debug(
f"Skipping remote host '{val}' from '{entry_dn}'")
remove_member = False
entry_attrs[attr] = new_vals
# Replace suffix/realm/host/domain in all values
entry_attrs[attr] = self.convert_values(entry_attrs[attr])
# Check userCertificate issuer and remove IPA CA certs
if attr.lower() == "usercertificate" and \
self.remove_usercert(entry_dn, entry_attrs[attr]):
# This cert was issued by IPA, remove it
remove_list.append(attr)
# Cleanup up entry attributes
for remove_attr in remove_list:
del entry_attrs[remove_attr]
# Normalize the objectclass name -> objectClass
normalize_attr(entry_attrs, 'objectClass')
# Cleanup objectclasses from groups (users too?)
if entry_type == "group":
for oc in ensure_list_str(entry_attrs['objectClass']):
if oc.lower() in STRIP_OC:
entry_attrs['objectClass'].remove(oc.encode())
# Cleanup unused objectclasses. We removed some attributes, so there
# might be objectclasses we don't need
entry_oc = ensure_list_str(entry_attrs['objectClass'])
for oc in entry_oc:
found = False
required_attrs = self.local_conn.get_allowed_attributes(
[oc], raise_on_unknown=False, attributes="must")
if len(required_attrs) == 0:
# This objectclass does not require any attributes, move on
continue
for attr in required_attrs:
for entry_attr in entry_attrs:
if entry_attr.lower().startswith(attr.lower()):
# The startswith approach allows for attr extensions
found = True
break
if found:
break
if not found:
# Ok, there were no attributes that require this objectclass
entry_attrs['objectClass'].remove(oc)
return entry_attrs
def update_local_entry(self, entry_type, local_dn, local_entry,
remote_attrs):
"""
Go through the remote entry (which has already been cleaned up) and
convert remote attribute names to the same case as the local entry.
Then create the mod list
"""
entry_updated = False
range_reset = False
# Reset the remote attribute name to match the same case as the local
# attributes.
for remote_attr in list(remote_attrs):
for local_attr in local_entry:
if local_attr.lower() == remote_attr.lower() and \
local_attr != remote_attr:
# The case is different, reset remote to match local
vals = remote_attrs[remote_attr].copy()
del remote_attrs[remote_attr]
remote_attrs[local_attr] = vals
# For non-admin users we need to strip krb attributes so userpassword
# can be migrated
if entry_type == "users":
updated = False
for attr in DB_OBJECTS['users']['strip_attrs']:
if attr in local_entry:
del local_entry[attr]
updated = True
if updated:
self.write_update_to_ldif(local_entry)
self.local_conn.update_entry(local_entry)
local_entry = self.local_conn.get_entry(DN(local_dn),
attrs_list=['*', '+'])
# Loop over the remote entry, and add whatever attr and/or value is
# missing from the local entry
for attr in remote_attrs:
if attr.lower() in IGNORE_ATTRS:
# We are not merging this attribute, just move on unless..
if self.mode == "prod-mode":
if attr.lower() not in PROD_ATTRS:
# We are in production mode, but this attr can still be
# skipped
continue
else:
continue
if entry_type == "admin" and attr.lower() == "userpassword":
# Can not modify userpassword on admin, skip it
self.post_notes.append(
"The admin password is not migrated from the remote "
"server. Reset it manually if needed.")
continue
if attr in local_entry:
# Check if we have special attributes to process.
# These attributes need their values handled in a special way.
# The attrs are a tuple of attr name and type. Based on the
# type of the attribute we will handle the value comparision
# differently.
if 'special_attrs' in DB_OBJECTS[entry_type]:
goto_next_attr = False
for sp_attr in DB_OBJECTS[entry_type]['special_attrs']:
if attr.lower() == sp_attr[0]:
local_attr_vals = self.get_ldapentry_attr_vals(
local_entry, attr)
if 'list' == sp_attr[1]:
# These attributes are single valued. Split
# them up into parts and compare
remote_items = remote_attrs[attr][0].lower() \
.split(',')
local_items = local_attr_vals[0].lower() \
.split(',')
# Track what is missing
new_items = []
for remote_item in remote_items:
if remote_item not in local_items:
new_items.append(remote_item)
# Add the missing values to the current value
# (preserves case of the original value)
old_value = local_entry[attr][0]
for item in new_items:
local_entry[attr][0] += f",{item}"
if len(new_items) > 0:
entry_updated = True
self.log_debug("Entry is different and "
"will be updated: "
f"'{local_dn}' attribute "
f"'{attr}' old value "
f"'{old_value}' "
"new value "
f"'{local_entry[attr][0]}'")
elif 'single' == sp_attr[1]:
# The attribute is defined as multivalued, but
# we really need to treat it as single valued
self.log_debug("Entry is different and will "
f"be updated: '{local_dn}' "
f"attribute '{attr}' replaced "
"with val "
f"'{remote_attrs[attr][0]}' "
"old value: "
f"{local_entry[attr][0]}")
local_entry[attr][0] = remote_attrs[attr][0]
goto_next_attr = True
break
if goto_next_attr:
continue
# merge values
for val in remote_attrs[attr]:
local_attr_vals = self.get_ldapentry_attr_vals(local_entry,
attr)
if val not in local_attr_vals:
# Check if we should reset the DNA range for this entry
if (
self.args.reset_range
or self.mode == "stage-mode"
) and attr.lower() in DNA_REGEN_ATTRS:
# Skip dna attributes from managed entries
if 'mepManagedBy' in local_entry:
break
# Ok, set the magic regen value
local_entry[attr] = [DNA_REGEN_VAL]
self.log_debug("Resetting the DNA range for: "
f"{local_dn}")
range_reset = True
elif self.local_conn.get_attribute_single_value(attr):
# Must "replace" single valued attribute
local_entry[attr] = remote_attrs[attr]
self.log_debug("Entry is different and will be "
f"updated: '{local_dn}' attribute "
f"'{attr}' replaced with val "
f"'{val}' old value: "
f"{str(local_attr_vals)}")
else:
# Ok, "append" multivalued attribute value
local_entry[attr].append(val)
self.log_debug("Entry is different and will be "
f"updated: '{local_dn}' attribute "
f"'{attr}' add val '{val}' not "
f"in {str(local_attr_vals)}")
entry_updated = True
else:
# Attribute does not exist in the local entry, copy the
# entire attribute/valueset over
local_entry[attr] = remote_attrs[attr]
entry_updated = True
# Remove attributes in the local entry that do not exist in the
# remote entry
remove_attrs = []
for attr in local_entry:
if (self.attr_is_operational(attr)
and attr.lower() not in POLICY_OP_ATTRS) or \
attr.lower() in IGNORE_ATTRS or \
attr.lower() in STRIP_ATTRS or \
attr.lower() == "usercertificate":
# This is an attribute that we do not want to remove
continue
if attr not in remote_attrs and \
not self.attr_is_required(attr, local_entry):
# Mark this attribute for deletion
remove_attrs.append(attr)
entry_updated = True
# Remove attributes
for remove_attr in remove_attrs:
self.log_debug("Entry is different and will be updated: "
f"'{local_dn}' attribute '{remove_attr}' "
"is being removed")
del local_entry[remove_attr]
if range_reset:
stats['reset_range'] += 1
# return updated local entry
if entry_updated:
return local_entry
else:
return None
def process_db_entry(self, entry_dn, entry_attrs):
"""
Process chunks of remote entries from a paged results search
entry_dn = the remote entry DN
entry_attrs = the remote entry's attributes stored in a dict
Identify entry type
Process entry (removing/change attr/val/schema)
Compare processed remote entry with local entry, merge/overwrite?
Add/replace local entry
...
"""
stats['total_db_entries'] += 1
if stats['total_db_entries'] % 1000 == 0:
print_progress(
f"Processed {stats['total_db_entries']} entries... ")
# First just skip entries we are excluding
for exclude_dn in DB_EXCLUDE_TREES:
exclude_dn = exclude_dn.replace("$SUFFIX",
str(self.remote_suffix))
if DN(exclude_dn) in DN(entry_dn):
return
# Determine entry type: user, group, hbac, etc
entry_type = self.get_entry_type(entry_dn, entry_attrs)
if entry_type is None:
# We are not interested in this entry
return
if entry_type.startswith("dns") and not self.args.migrate_dns:
# Ok skipping dns
return
if entry_type == 'certificate':
# Ok we need to skip remote CA Cert (in all cases? TODO)
if 'cACertificate;binary' in entry_attrs:
issuer = self.get_cert_issuer(
entry_attrs['cACertificate;binary'][0])
if issuer == f"CN=Certificate Authority,O={self.remote_realm}":
self.log_debug("Skipping remote certificate entry: "
f"'{entry_dn}' Issuer: {issuer}")
return
if entry_type == "computer":
if entry_attrs['fqdn'] == self.args.hostname:
# We do not migrate the remote computer
return
# Cleanup the remote entry before merging/adding
remote_attrs = self.clean_entry(entry_dn, entry_type, entry_attrs)
# First we need to convert dn to match local server
local_dn = self.convert_value(str(entry_dn),
dns=entry_type.startswith("dns"))
#
# Based on the entry type do additional work
#
# For entries with alternate identifying needs we need to rebuild the
# local dn. Typically this is for entries that use ipaUniqueId as the
# RDN attr
if entry_type != "custom" and 'alt_id' in DB_OBJECTS[entry_type]:
attr = DB_OBJECTS[entry_type]['alt_id']['attr']
base = DB_OBJECTS[entry_type]['alt_id']['base']
srch_filter = f'({attr}={entry_attrs[attr][0]})'
if DB_OBJECTS[entry_type]['alt_id']['isDN'] is True:
# Convert the filter to match the local suffix
srch_filter = self.replace_suffix_value(srch_filter)
srch_base = base + str(self.local_suffix)
try:
entries = self.local_conn.get_entries(DN(srch_base),
filter=srch_filter)
if len(entries) == 1:
local_dn = entries[0].dn
elif len(entries) == 0:
# Not found, no problem just proceed and we will add it
pass
else:
# Found too many entries - should not happen
self.log_error('Found too many local matching entries '
f'for "{local_dn}"')
if self.args.force:
stats['ignored_errors'] += 1
return
else:
sys.exit(1)
except errors.EmptyResult:
# Not found, no problem just proceed and we will add it later
pass
except (errors.NetworkError, errors.DatabaseError) as e:
self.log_error('Failed to find a local matching entry for '
f'"{local_dn}" error: {str(e)}')
if self.args.force:
stats['ignored_errors'] += 1
return
else:
sys.exit(1)
# See if the entry exists on the local server
try:
local_entry = self.local_conn.get_entry(DN(local_dn),
attrs_list=['*', '+'])
# Merge the two entry's attributes
local_entry = self.update_local_entry(entry_type,
local_dn,
local_entry,
remote_attrs)
if local_entry is None:
return
if self.dryrun:
self.write_update_to_ldif(local_entry)
if entry_type == "custom":
stats['custom'] += 1
else:
DB_OBJECTS[entry_type]['count'] += 1
stats['total_db_migrated'] += 1
return
# Update the local entry
try:
self.local_conn.update_entry(local_entry)
if entry_type == "custom":
stats['custom'] += 1
else:
DB_OBJECTS[entry_type]['count'] += 1
except errors.ExecutionError as e:
self.log_error(f'Failed to update "{local_dn}" error: '
f'{str(e)}')
if self.args.force:
stats['ignored_errors'] += 1
return
else:
sys.exit(1)
except errors.NotFound:
# Entry does not exist on the local server, add it
try:
add_entry = self.build_ldap_entry(local_dn, remote_attrs)
if self.dryrun:
self.log_debug(f"Add db entry '{local_dn} - {entry_type}'")
self.write_update_to_ldif(add_entry, add_entry=True)
if entry_type == "custom":
stats['custom'] += 1
else:
DB_OBJECTS[entry_type]['count'] += 1
stats['total_db_migrated'] += 1
return
self.local_conn.add_entry(add_entry)
if entry_type == "custom":
stats['custom'] += 1
else:
DB_OBJECTS[entry_type]['count'] += 1
self.log_debug(f"Added entry: {local_dn}")
except errors.ExecutionError as e:
self.log_error(f'Failed to add "{local_dn}" error: {str(e)}')
if self.args.force:
stats['ignored_errors'] += 1
return
else:
sys.exit(1)
stats['total_db_migrated'] += 1
def processDBOffline(self):
"""
Call our LDIFParser to go through each LDIF entry one at a time to
avoid loading the entries LDIF into memory
"""
ldifParser = LDIFParser(open(self.args.db_ldif, "r"),
ignored_attr_types=STRIP_OP_ATTRS)
ldifParser.set_class(self)
ldifParser.parse_entry_records()
def processDBOnline(self):
"""
Search UserRoot using a Paged Result search. This prevents loading
too many entries into memory at one time
"""
results_done = False
paged_ctrl = SimplePagedResultsControl(True, size=500, cookie='')
controls = [paged_ctrl]
req_pr_ctrl = controls[0]
db_filter = ("(objectclass=*)")
# Start the paged results search
try:
remote_msgid = self.remote_conn.conn.search_ext(
str(self.remote_suffix),
ldap.SCOPE_SUBTREE,
db_filter,
['*', 'nsaccountlock'],
serverctrls=controls)
except ldap.LDAPError as e:
self.log_error(f"Failed to get remote entries: {str(e)}")
sys.exit(1)
while not results_done:
try:
if not results_done:
type, db_data, db_msgid, db_ctrls = \
self.remote_conn.conn.result3(remote_msgid)
if self.args.verbose:
self.log_debug("Database search succeeded: "
f"type {type} msgid {db_msgid}")
except ldap.LDAPError as e:
self.handle_error("Database search failed: "
f"{str(e)} type {type} msgid {db_msgid}")
#
# Process this chunk of remote entries
#
for entry in db_data:
entry_dn = entry[0]
entry_attrs = decode_attr_vals(entry[1])
self.process_db_entry(entry_dn, entry_attrs)
# Get the next batch of entries
dbctrls = [
c
for c in db_ctrls
if c.controlType == SimplePagedResultsControl.controlType
]
if dbctrls and dbctrls[0].cookie:
try:
req_pr_ctrl.cookie = dbctrls[0].cookie
controls = [req_pr_ctrl]
remote_msgid = self.remote_conn.conn.search_ext(
str(self.remote_suffix),
ldap.SCOPE_SUBTREE,
db_filter,
['*', 'nsaccountlock'],
serverctrls=controls)
except ldap.LDAPError as e:
self.handle_error("Problem searching the remote server: "
f"{str(e)}")
else:
results_done = True
def migrateDB(self):
"""
Used paged search for online method to avoid large memory footprint
"""
self.log_info("Migrating database ... (this may take a while)")
if self.args.db_ldif is not None:
self.processDBOffline()
else:
self.processDBOnline()
print_progress(f"Processed {stats['total_db_entries']} entries.\n")
#
# Schema Migration
#
def migrateSchema(self):
"""
Add any missing schema definitions to this server
"""
self.log_info("Migrating schema ...")
if self.args.schema_ldif is not None:
self.log_debug("Getting schema from LDIF file ...")
schema_entry = get_ldif_records(self.args.schema_ldif)
# Grab attribute list
normalize_attr(schema_entry[0][1], 'attributeTypes')
attributes = schema_entry[0][1]['attributeTypes']
# Grab objectclass list
normalize_attr(schema_entry[0][1], 'objectClasses')
objectclasses = schema_entry[0][1]['objectClasses']
else:
# Query the remote server for its schema
self.log_debug("Getting schema from the remote server ...")
schema = self.remote_conn._get_schema()
schema_entry = schema.ldap_entry()
# Grab attribute list
normalize_attr(schema_entry, 'attributeTypes')
attributes = ensure_list_str(schema_entry['attributeTypes'])
# Grab objectclass list
normalize_attr(schema_entry, 'objectClasses')
objectclasses = ensure_list_str(schema_entry['objectClasses'])
self.log_debug(f"Retrieved {len(attributes)} attributes and "
f"{len(objectclasses)} objectClasses")
# Loop over attributes and objectclasses and count them
schema = self.local_conn.schema
local_schema = schema.ldap_entry()
for schema_type in [(attributes, "attributeTypes"),
(objectclasses, "objectClasses")]:
for attr_val in schema_type[0]:
stats['schema_processed'] += 1
if not self.args.schema_overwrite:
# Check if this attribute exists in the local server,
# if so skip it.
remote_name = attr_val.split()[3].lower()
skip_value = False
# Loop over all the attributes and check for a match
normalize_attr(local_schema, schema_type[1])
for local_val in ensure_list_str(
local_schema[schema_type[1]]):
local_name = local_val.split()[3].lower()
if local_name == remote_name:
# Found a match, skip it
skip_value = True
break
if skip_value:
if schema_type[1] == "attributeTypes":
stats['schema_attrs_skipped'] += 1
else:
stats['schema_oc_skipped'] += 1
continue
try:
if self.dryrun:
self.log_debug("Schema add "
f"{schema_type[1]}: {attr_val}")
if schema_type[1] == "attributeTypes":
stats['schema_attrs_added'] += 1
else:
stats['schema_oc_added'] += 1
# Write schema update to ldif file
if self.dryrun_record is not None:
schema_update = "dn: cn=schema\n"
schema_update += "changetype: modify\n"
schema_update += f"add: {schema_type[1]}\n"
schema_update += f"{schema_type[1]}: attr_val\n\n"
self.dryrun_record.write(schema_update)
continue
self.local_conn.conn.modify_ext_s(
"cn=schema", [(
ldap.MOD_ADD,
schema_type[1],
bytes(attr_val, 'utf-8')
)]
)
if schema_type[1] == "attributeTypes":
stats['schema_attrs_added'] += 1
else:
stats['schema_oc_added'] += 1
self.log_debug(
f"Added schema - {schema_type[1]}: {attr_val}")
except ldap.TYPE_OR_VALUE_EXISTS:
# Error 16 - this attribute already exists, move on
if schema_type[1] == "attributeTypes":
stats['schema_attrs_skipped'] += 1
else:
stats['schema_oc_skipped'] += 1
except ldap.LDAPError as e:
if self.args.force:
self.log_debug(
"Skipping schema value that triggered an "
f"error: '{attr_val}' - {str(e)}")
if schema_type[1] == "attributeTypes":
stats['schema_attrs_skipped'] += 1
else:
stats['schema_oc_skipped'] += 1
stats['ignored_errors'] += 1
else:
self.handle_error("Failed to add schema value: "
f"'{attr_val}' - {str(e)}")
# Flush the schema cache
self.local_conn._flush_schema()
self.log_debug(f"Migrated {stats['schema_attrs_added']} attributes "
f"and {stats['schema_oc_added']} objectClasses")
self.log_debug(f"Skipped {stats['schema_attrs_skipped']} attributes "
f"and {stats['schema_oc_skipped']} objectClasses")
#
# Configuration Migration
#
def process_config_entry(self, dn, remote_attrs, ds_config,
add_missing=False):
"""
Get the local entry, and check the attributes in ds_config
for any differences and apply them
"""
all_attrs = ds_config['attrs'] + ds_config['multivalued']
updated_entry = False
try:
local_entry = self.local_conn.get_entry(DN(dn))
for check_attr in all_attrs:
# Because the attribute case could be different we have to do
# all these "for" loops to properly check and properly update
# the local entry
for remote_attr in remote_attrs:
if remote_attr.lower() == check_attr.lower():
# The remote entry has this attribute, proceed
attr_exists = False
for local_attr in local_entry:
if check_attr.lower() == local_attr.lower():
# The local entry also has this attr, proceed
attr_exists = True
remote_vals = self.convert_values(
remote_attrs[remote_attr])
local_vals = self.normalize_vals(
local_entry[local_attr])
for rval in remote_vals:
# Check values
if rval not in local_vals:
updated_entry = True
if check_attr in ds_config[
'multivalued'
]:
# Append value
local_entry[local_attr].append(
rval)
self.log_debug("Config setting "
f"{local_attr}' "
"added value "
f"'{rval}'"
f" in '{dn}'")
else:
# Replace attr value
old_vals = local_entry[
local_attr
]
local_entry[local_attr] = \
remote_vals
val = remote_vals[0]
self.log_debug("Config setting '"
f"{local_attr}' "
"replaced "
f"'{str(old_vals)}'"
f" with '{val}'"
f" in '{dn}'")
break
if not attr_exists:
# local entry is missing this attribute, add it
remote_vals = self.convert_values(
remote_attrs[remote_attr])
local_entry[remote_attr] = remote_vals
self.log_debug("Config setting '"
f"{remote_attr}' "
"added: '{remote_vals}'"
f" under '{dn}'")
if updated_entry:
if not self.dryrun:
try:
self.local_conn.update_entry(local_entry)
except Exception as e:
if not self.args.force:
self.handle_error(
f"Error updating local entry: {str(e)}")
else:
self.log_error(
f"Error updating local entry: {str(e)}")
stats['ignored_errors'] += 1
self.write_update_to_ldif(local_entry)
ds_config['count'] += 1
stats['config_migrated'] += 1
except errors.NotFound:
# This entry does not exist in the local server
if add_missing:
# Add the missing entry
add_entry = self.build_ldap_entry(dn, remote_attrs)
if not self.dryrun:
self.local_conn.add_entry(add_entry)
self.write_update_to_ldif(add_entry, add_entry=True)
ds_config['count'] += 1
stats['config_migrated'] += 1
self.log_debug(f"Added config entry: {dn}")
def migrateConfig(self):
"""
Process and migrate settings and entries from cn=config(dse.ldif)
"""
self.log_info("Migrating configuration ...")
remote_dse = []
if self.args.config_ldif is not None:
self.log_debug("Getting config from LDIF file ...")
dse_entries = get_ldif_records(self.args.config_ldif)
for entry in dse_entries:
if str(entry[0]) == '':
continue
remote_dse.append({
'dn': entry[0],
'attrs': entry[1]
})
else:
self.log_debug("Getting config from the remote server ...")
config_entries = self.remote_conn.get_entries(DN("cn=config"))
for entry in config_entries:
attrs = {}
for attr in entry:
attrs[attr] = self.get_ldapentry_attr_vals(entry, attr)
remote_dse.append({
'dn': str(entry.dn),
'attrs': attrs,
})
# Now we have a uniform representation of the remote dse, start
# processing the entries
for entry in remote_dse:
for dse_item in DS_CONFIG.items():
if dse_item[0] == "dna" and self.mode == "stage-mode":
# Do not migrate DNA ranges in staging mode
continue
dse = dse_item[1]
for dn in dse['dn']:
if DN(dn) == DN(entry['dn']):
# We found an entry to migrate
self.process_config_entry(
dn, entry['attrs'], dse)
stats['config_processed'] += 1
# Now do indexes/attr encryption (need to process child entries
# compared to DS_CONFIG entries)
for dse_item in DS_INDEXES.items():
dse = dse_item[1]
if dse['dn'] in entry['dn'].lower():
# We found an index/encrypted attr to migrate
self.process_config_entry(
entry['dn'], entry['attrs'], dse,
add_missing=True)
stats['config_processed'] += 1
#
# Migration
#
def do_migration(self):
"""
Get the data and convert it all to LDIF files which we will parse later
"""
start_time = time.time()
# Log header with all the config settings
self.log_debug('=' * 80)
self.log_info('IPA to IPA migration starting ...')
self.log_debug('Migration options:')
for arg in vars(self.args):
narg = arg.replace('_', '-')
if narg != "bind-pw":
self.log_debug(f'--{narg}={getattr(self.args, arg)}')
# Initialize our connections
self.connect_to_local_ds()
if ((self.args.config_ldif is None and not self.args.skip_config)
or (self.args.schema_ldif is None
and not self.args.skip_schema)
or self.args.db_ldif is None):
# Need to query remote DS so lets connect to it
self.connect_to_remote_ds()
# Check if schema checking is disabled on remote server
local_config = self.local_conn.get_entry(DN("cn=config"),
['nsslapd-schemacheck'])
if self.remote_conn is not None:
remote_config = self.remote_conn.get_entry(
DN("cn=config"), ['nsslapd-schemacheck'])
if remote_config['nsslapd-schemacheck'][0].lower() == "off" and \
local_config['nsslapd-schemacheck'][0].lower() == "on":
self.log_info("WARNING - Schema checking is disabled on the "
"remote server, but it is enabled on the local "
"server. This could cause failures when "
"migrating the database.")
# Get the suffixes for each server
self.local_suffix = self.get_base_dn()
self.remote_suffix = self.get_base_dn(remote=True)
# Make sure local IPA server is in migration mode
if not self.dryrun:
config_dn = f"cn=ipaconfig,cn=etc,{self.local_suffix}"
ldap = api.Backend.ldap2
config = ldap.get_entry(DN(config_dn), ['ipaMigrationEnabled'])
if not config['ipaMigrationEnabled'][0]:
config['ipaMigrationEnabled'] = ["TRUE"]
ldap.update_entry(config)
self.post_notes.append("The local server has been put into "
"migration mode. Once all migration "
"tasks are done you will have to take "
"the server out of migration mode.")
else:
self.post_notes.append("The local server is in migration "
"mode. Once all migration tasks are "
"done you will have to take the "
"server out of migration mode.")
# Get the remote domain
domain_parts = self.args.hostname.split(".")[1:]
self.remote_domain = '.'.join(domain_parts)
# Get the remote realm
self.get_remote_realm()
# Open dryrun ldif file
if self.args.dryrun_record is not None:
self.dryrun_record = open(self.args.dryrun_record, "w")
if self.args.skip_schema:
self.log_info("Skipping schema migration")
else:
# Do the schema
self.migrateSchema()
if self.args.skip_config:
self.log_info("Skipping configuration migration")
else:
# Do the DS config
self.migrateConfig()
# Do the Database
self.migrateDB()
# Close dryrun ldif file
if self.dryrun_record is not None:
self.dryrun_record.close()
#
# Do the remaining 1% ...
#
# Run ipa-server-upgrade
self.log_info("Running ipa-server-upgrade ... "
"(this may take a while)")
if self.dryrun:
self.log_info("Skipping ipa-server-upgrade in dryrun mode.")
else:
popen = subprocess.Popen(["/usr/sbin/ipa-server-upgrade"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
for stdout_line in iter(popen.stdout.readline, ""):
self.log_debug(stdout_line.rstrip())
for stdout_line in iter(popen.stderr.readline, ""):
self.log_debug(stdout_line.rstrip())
popen.stdout.close()
return_code = popen.wait()
if return_code:
self.log_error(f"ipa-server-upgrade failed: {return_code}")
self.post_notes.append("ipa-server-upgrade failed, "
"needs investigation")
# Run SIDGEN task
self.log_info("Running SIDGEN task ...")
if self.dryrun:
self.log_info("Skipping SIDGEN task in dryrun mode.")
else:
try:
cmd = ["/usr/bin/ipa config-mod --enable-sid --add-sids"]
result = subprocess.run(cmd, shell=True, check=True,
capture_output=True, text=True)
self.log_debug("SIDGEN task:\n" + result.stdout)
except subprocess.CalledProcessError as e:
self.log_error("SIDGEN task failed: " + str(e))
self.post_notes.append("SIDGEN task failed, "
"needs investigation.")
# TODO handle the LDIF conflict entries? (not used yet)
# Wrap it up with the summary report
self.display_stats(round(time.time() - start_time))
def run(self):
"""
Run the IPA to IPA migration tool
"""
# Validate user and setup
if not is_ipa_configured():
self.handle_error('IPA is not configured', err=2)
if os.getegid() != 0:
self.handle_error(f'Must be root to run {self.command_name}')
# Setup the arguments
desc = 'IPA to IPA Migration Tool'
parser = argparse.ArgumentParser(description=desc, allow_abbrev=True)
self.add_options(parser)
self.validate_options()
# Check for dryrun mode
if self.args.dryrun or self.args.dryrun_record is not None:
self.dryrun = True
# Prompt for confirmation
if not self.args.no_prompt and not self.dryrun:
print('Warning - the migration process is irreversible! Make '
'sure you have a backup of the local IPA server before '
'doing the migration')
answer = input('To proceed type "yes": ')
if answer.lower() != "yes":
self.handle_error('Aborting migration.')
print("Initializing ...")
# Init the API
api.bootstrap(in_server=True, confdir=paths.ETC_IPA)
try:
api.finalize()
except Exception as e:
self.handle_error(f'Problem with IPA installation: {str(e)}',
err=2)
print("Connecting to local server ...")
api.Backend.ldap2.connect()
self.ldapiuri = realm_to_ldapi_uri(api.env.realm)
self.realm = api.env.realm
self.api = api
self.local_hostname = socket.getfqdn()
domain_parts = self.local_hostname.split(".")[1:]
self.local_domain = '.'.join(domain_parts)
# Check that we have kerberos credentials
try:
subprocess.run(["/usr/bin/ipa server-show "
+ self.local_hostname],
capture_output=True,
shell=True, check=True)
except subprocess.CalledProcessError:
self.handle_error("Did not receive Kerberos credentials")
# Setup our logging
self.setup_logging()
# Let's do the migration
self.do_migration()
| 89,968
|
Python
|
.py
| 1,940
| 30.581959
| 79
| 0.50587
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,744
|
ipa_pkinit_manage.py
|
freeipa_freeipa/ipaserver/install/ipa_pkinit_manage.py
|
#
# Copyright (C) 2017 FreeIPA Contributors see COPYING for license
#
from __future__ import print_function, absolute_import
import logging
from ipalib import api
from ipaplatform.paths import paths
from ipapython.admintool import AdminTool
from ipaserver.install import installutils
from ipaserver.install.krbinstance import KrbInstance, is_pkinit_enabled
logger = logging.getLogger(__name__)
class PKINITManage(AdminTool):
command_name = "ipa-pkinit-manage"
usage = "%prog <enable|disable|status>"
description = "Manage PKINIT."
def validate_options(self):
super(PKINITManage, self).validate_options(needs_root=True)
installutils.check_server_configuration()
option_parser = self.option_parser
if not self.args:
option_parser.error("action not specified")
elif len(self.args) > 1:
option_parser.error("too many arguments")
action = self.args[0]
if action not in {'enable', 'disable', 'status'}:
option_parser.error("unrecognized action '{}'".format(action))
def run(self):
api.bootstrap(in_server=True, confdir=paths.ETC_IPA)
api.finalize()
api.Backend.ldap2.connect()
try:
action = self.args[0]
if action == 'enable':
self.enable()
elif action == 'disable':
self.disable()
elif action == 'status':
self.status()
finally:
api.Backend.ldap2.disconnect()
return 0
def _setup(self, setup_pkinit):
config = api.Command.config_show()['result']
ca_enabled = api.Command.ca_is_enabled()['result']
krb = KrbInstance()
krb.init_info(
realm_name=api.env.realm,
host_name=api.env.host,
setup_pkinit=setup_pkinit,
subject_base=config['ipacertificatesubjectbase'][0],
)
if bool(is_pkinit_enabled()) is not bool(setup_pkinit):
try:
krb.stop_tracking_certs()
except RuntimeError as e:
if ca_enabled:
logger.warning(
"Failed to stop tracking certificates: %s", e)
# remove the cert and key
krb.delete_pkinit_cert()
krb.enable_ssl()
if setup_pkinit:
if not is_pkinit_enabled():
krb.setup_pkinit()
krb.pkinit_enable()
else:
krb.pkinit_disable()
def enable(self):
if not api.Command.ca_is_enabled()['result']:
logger.error("Cannot enable PKINIT in CA-less deployment")
logger.error("Use ipa-server-certinstall to install KDC "
"certificate manually")
raise RuntimeError("Cannot enable PKINIT in CA-less deployment")
self._setup(True)
def disable(self):
self._setup(False)
def status(self):
if is_pkinit_enabled():
print("PKINIT is enabled")
else:
print("PKINIT is disabled")
| 3,090
|
Python
|
.py
| 81
| 28.123457
| 76
| 0.599933
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,745
|
conncheck.py
|
freeipa_freeipa/ipaserver/install/conncheck.py
|
#
# Copyright (C) 2016 FreeIPA Contributors see COPYING for license
#
"""
Connection check module
"""
from ipalib.install import service
from ipalib.install.service import enroll_only, replica_install_only
from ipapython.install.core import knob
class ConnCheckInterface(service.ServiceAdminInstallInterface):
"""
Interface common to all installers which perform connection check to the
remote master.
"""
skip_conncheck = knob(
None,
description="skip connection check to remote master",
)
skip_conncheck = enroll_only(skip_conncheck)
skip_conncheck = replica_install_only(skip_conncheck)
| 644
|
Python
|
.py
| 20
| 28.55
| 76
| 0.764136
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,746
|
acmeinstance.py
|
freeipa_freeipa/ipaserver/install/acmeinstance.py
|
#
# Copyright (C) 2024 FreeIPA Contributors see COPYING for license
#
import logging
from ipaserver.install.dogtaginstance import DogtagInstance
logger = logging.getLogger(__name__)
class ACMEInstance(DogtagInstance):
"""
ACME is deployed automatically with a CA subsystem but it is the
responsibility of IPA to uninstall the service.
This is mostly a placeholder for the uninstaller. We can
eventually move the ACME installation routines into this class
if we want but it might result in an extra PKI restart which
would be slow.
"""
def __init__(self, realm=None, host_name=None):
super(ACMEInstance, self).__init__(
realm=realm,
subsystem="ACME",
service_desc="ACME server",
host_name=host_name
)
def uninstall(self):
DogtagInstance.uninstall(self)
| 872
|
Python
|
.py
| 24
| 30.375
| 68
| 0.700357
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,747
|
opendnssecinstance.py
|
freeipa_freeipa/ipaserver/install/opendnssecinstance.py
|
#
# Copyright (C) 2014 FreeIPA Contributors see COPYING for license
#
from __future__ import absolute_import
import logging
import os
import stat
import shutil
from subprocess import CalledProcessError
from ipalib.install import sysrestore
from ipaserver.dnssec.opendnssec import tasks
from ipaserver.install import service
from ipaserver.masters import ENABLED_SERVICE
from ipapython.dn import DN
from ipapython import directivesetter
from ipapython import ipautil
from ipaplatform import services
from ipaplatform.constants import constants
from ipaplatform.paths import paths
from ipalib import errors, api
from ipaserver import p11helper
from ipalib.constants import SOFTHSM_DNSSEC_TOKEN_LABEL
logger = logging.getLogger(__name__)
KEYMASTER = u'dnssecKeyMaster'
def get_dnssec_key_masters(conn):
"""
This method can be used only for admin connections, common users do not
have permission to access content of service containers.
:return: list of active dnssec key masters
"""
assert conn is not None
# please check ipalib/dns.py:dnssec_installed() method too, if you do
# any modifications here
dn = DN(api.env.container_masters, api.env.basedn)
filter_attrs = {
u'cn': u'DNSSEC',
u'objectclass': u'ipaConfigObject',
u'ipaConfigString': [KEYMASTER, ENABLED_SERVICE],
}
only_masters_f = conn.make_filter(filter_attrs, rules=conn.MATCH_ALL)
try:
entries = conn.find_entries(filter=only_masters_f, base_dn=dn)
except errors.NotFound:
return []
keymasters_list = []
for entry in entries[0]:
keymasters_list.append(str(entry.dn[1].value))
return keymasters_list
class OpenDNSSECInstance(service.Service):
def __init__(self, fstore=None):
service.Service.__init__(
self, "ods-enforcerd",
service_desc="OpenDNSSEC enforcer daemon",
)
self.conf_file_dict = {
'SOFTHSM_LIB': paths.LIBSOFTHSM2_SO,
'TOKEN_LABEL': SOFTHSM_DNSSEC_TOKEN_LABEL,
'KASP_DB': paths.OPENDNSSEC_KASP_DB,
'ODS_USER': constants.ODS_USER,
'ODS_GROUP': constants.ODS_GROUP,
}
self.kasp_file_dict = {}
self.extra_config = [KEYMASTER]
if fstore:
self.fstore = fstore
else:
self.fstore = sysrestore.FileStore(paths.SYSRESTORE)
suffix = ipautil.dn_attribute_property('_suffix')
def get_masters(self):
return get_dnssec_key_masters(api.Backend.ldap2)
def create_instance(self, fqdn, realm_name, generate_master_key=True,
kasp_db_file=None):
if self.get_state("enabled") is None:
self.backup_state("enabled", self.is_enabled())
if self.get_state("running") is None:
self.backup_state("running", self.is_running())
self.fqdn = fqdn
self.realm = realm_name
self.suffix = ipautil.realm_to_suffix(self.realm)
self.kasp_db_file = kasp_db_file
try:
self.stop()
except Exception:
pass
self.step("setting up configuration files", self.__setup_conf_files)
self.step("setting up ownership and file mode bits", self.__setup_ownership_file_modes)
if generate_master_key:
self.step("generating master key", self.__generate_master_key)
self.step("setting up OpenDNSSEC", self.__setup_dnssec)
self.step("setting up ipa-dnskeysyncd", self.__setup_dnskeysyncd)
self.step("starting OpenDNSSEC enforcer", self.__start)
self.step("configuring OpenDNSSEC enforcer to start on boot", self.__enable)
self.start_creation()
def __enable(self):
try:
self.ldap_configure('DNSSEC', self.fqdn, None,
self.suffix, self.extra_config)
except errors.DuplicateEntry:
logger.error("DNSSEC service already exists")
# add the KEYMASTER identifier into ipaConfigString
# this is needed for the re-enabled DNSSEC master
dn = DN(('cn', 'DNSSEC'), ('cn', self.fqdn), api.env.container_masters,
api.env.basedn)
try:
entry = api.Backend.ldap2.get_entry(dn, ['ipaConfigString'])
except errors.NotFound as e:
logger.error(
"DNSSEC service entry not found in the LDAP (%s)", e)
else:
config = entry.setdefault('ipaConfigString', [])
if KEYMASTER not in config:
config.append(KEYMASTER)
api.Backend.ldap2.update_entry(entry)
def __setup_conf_files(self):
if not self.fstore.has_file(paths.OPENDNSSEC_CONF_FILE):
self.fstore.backup_file(paths.OPENDNSSEC_CONF_FILE)
if not self.fstore.has_file(paths.OPENDNSSEC_KASP_FILE):
self.fstore.backup_file(paths.OPENDNSSEC_KASP_FILE)
if not self.fstore.has_file(paths.OPENDNSSEC_ZONELIST_FILE):
self.fstore.backup_file(paths.OPENDNSSEC_ZONELIST_FILE)
pin_fd = open(paths.DNSSEC_SOFTHSM_PIN, "r")
pin = pin_fd.read()
pin_fd.close()
# add pin to template
sub_conf_dict = self.conf_file_dict
sub_conf_dict['PIN'] = pin
if paths.ODS_KSMUTIL is not None and os.path.exists(paths.ODS_KSMUTIL):
# OpenDNSSEC 1.4
sub_conf_dict['INTERVAL'] = '<Interval>PT3600S</Interval>'
else:
# OpenDNSSEC 2.x
sub_conf_dict['INTERVAL'] = '<!-- Interval not used in 2x -->'
ods_conf_txt = ipautil.template_file(
os.path.join(paths.USR_SHARE_IPA_DIR, "opendnssec_conf.template"),
sub_conf_dict)
ods_conf_fd = open(paths.OPENDNSSEC_CONF_FILE, 'w')
ods_conf_fd.seek(0)
ods_conf_fd.truncate(0)
ods_conf_fd.write(ods_conf_txt)
ods_conf_fd.close()
ods_kasp_txt = ipautil.template_file(
os.path.join(paths.USR_SHARE_IPA_DIR, "opendnssec_kasp.template"),
self.kasp_file_dict)
ods_kasp_fd = open(paths.OPENDNSSEC_KASP_FILE, 'w')
ods_kasp_fd.seek(0)
ods_kasp_fd.truncate(0)
ods_kasp_fd.write(ods_kasp_txt)
ods_kasp_fd.close()
if not self.fstore.has_file(paths.SYSCONFIG_ODS):
self.fstore.backup_file(paths.SYSCONFIG_ODS)
if not os.path.isfile(paths.SYSCONFIG_ODS):
# create file, it's not shipped on Debian
with open(paths.SYSCONFIG_ODS, 'a') as f:
os.fchmod(f.fileno(), 0o644)
directivesetter.set_directive(paths.SYSCONFIG_ODS,
'SOFTHSM2_CONF',
paths.DNSSEC_SOFTHSM2_CONF,
quotes=False, separator='=')
def __setup_ownership_file_modes(self):
assert constants.ODS_USER.uid is not None
assert constants.ODS_GROUP.gid is not None
# workarounds for packaging bugs in opendnssec-1.4.5-2.fc20.x86_64
# https://bugzilla.redhat.com/show_bug.cgi?id=1098188
for (root, dirs, files) in os.walk(paths.ETC_OPENDNSSEC_DIR):
for directory in dirs:
dir_path = os.path.join(root, directory)
os.chmod(dir_path, 0o770)
# chown to root:ods
os.chown(dir_path, 0, constants.ODS_GROUP.gid)
for filename in files:
file_path = os.path.join(root, filename)
os.chmod(file_path, 0o660)
# chown to root:ods
os.chown(file_path, 0, constants.ODS_GROUP.gid)
for (root, dirs, files) in os.walk(paths.VAR_OPENDNSSEC_DIR):
for directory in dirs:
dir_path = os.path.join(root, directory)
os.chmod(dir_path, 0o770)
# chown to ods:ods
constants.ODS_USER.chown(dir_path, gid=constants.ODS_GROUP.gid)
for filename in files:
file_path = os.path.join(root, filename)
os.chmod(file_path, 0o660)
# chown to ods:ods
constants.ODS_USER.chown(file_path,
gid=constants.ODS_GROUP.gid)
def __generate_master_key(self):
with open(paths.DNSSEC_SOFTHSM_PIN, "r") as f:
pin = f.read()
os.environ["SOFTHSM2_CONF"] = paths.DNSSEC_SOFTHSM2_CONF
p11 = p11helper.P11_Helper(
SOFTHSM_DNSSEC_TOKEN_LABEL, pin, paths.LIBSOFTHSM2_SO)
try:
# generate master key
logger.debug("Creating master key")
p11helper.generate_master_key(p11)
# change tokens mod/owner
logger.debug("Changing ownership of token files")
for (root, dirs, files) in os.walk(paths.DNSSEC_TOKENS_DIR):
for directory in dirs:
dir_path = os.path.join(root, directory)
os.chmod(dir_path, 0o770 | stat.S_ISGID)
# chown to ods:named
constants.ODS_USER.chown(dir_path,
gid=constants.NAMED_GROUP.gid)
for filename in files:
file_path = os.path.join(root, filename)
os.chmod(file_path, 0o660 | stat.S_ISGID)
# chown to ods:named
constants.ODS_USER.chown(file_path,
gid=constants.NAMED_GROUP.gid)
finally:
p11.finalize()
def __setup_dnssec(self):
# run once only
if self.get_state("kasp_db_configured") and not self.kasp_db_file:
logger.debug("Already configured, skipping step")
return
self.backup_state("kasp_db_configured", True)
if not self.fstore.has_file(paths.OPENDNSSEC_KASP_DB):
self.fstore.backup_file(paths.OPENDNSSEC_KASP_DB)
if self.kasp_db_file:
# copy user specified kasp.db to proper location and set proper
# privileges
shutil.copy(self.kasp_db_file, paths.OPENDNSSEC_KASP_DB)
constants.ODS_USER.chown(paths.OPENDNSSEC_KASP_DB,
gid=constants.ODS_GROUP.gid)
os.chmod(paths.OPENDNSSEC_KASP_DB, 0o660)
else:
# initialize new kasp.db
tasks.run_ods_setup()
def __setup_dnskeysyncd(self):
# set up dnskeysyncd this is DNSSEC master
directivesetter.set_directive(paths.SYSCONFIG_IPA_DNSKEYSYNCD,
'ISMASTER',
'1',
quotes=False, separator='=')
def __start(self):
self.restart() # needed to reload conf files
tasks.run_ods_policy_import()
if self.kasp_db_file:
# regenerate zonelist.xml
result = tasks.run_ods_manager(
['zonelist', 'export'], capture_output=True
)
with open(paths.OPENDNSSEC_ZONELIST_FILE, 'w') as f:
f.write(result.output)
constants.ODS_USER.chown(f.fileno(),
gid=constants.ODS_GROUP.gid)
os.fchmod(f.fileno(), 0o660)
def uninstall(self):
if not self.is_configured():
return
self.print_msg("Unconfiguring %s" % self.service_name)
running = self.restore_state("running")
enabled = self.restore_state("enabled")
# stop DNSSEC services before backing up kasp.db
try:
self.stop()
except Exception:
pass
ods_exporter = services.service('ipa-ods-exporter', api)
try:
ods_exporter.stop()
except Exception:
pass
# remove directive from ipa-dnskeysyncd, this server is not DNSSEC
# master anymore
directivesetter.set_directive(paths.SYSCONFIG_IPA_DNSKEYSYNCD,
'ISMASTER', None,
quotes=False, separator='=')
restore_list = [paths.OPENDNSSEC_CONF_FILE, paths.OPENDNSSEC_KASP_FILE,
paths.SYSCONFIG_ODS, paths.OPENDNSSEC_ZONELIST_FILE]
if os.path.isfile(paths.OPENDNSSEC_KASP_DB):
# force to export data
cmd = [paths.IPA_ODS_EXPORTER, 'ipa-full-update']
try:
self.print_msg("Exporting DNSSEC data before uninstallation")
ipautil.run(cmd, runas=constants.ODS_USER)
except CalledProcessError:
logger.error("DNSSEC data export failed")
try:
shutil.copy(paths.OPENDNSSEC_KASP_DB,
paths.IPA_KASP_DB_BACKUP)
except IOError as e:
logger.error(
"Unable to backup OpenDNSSEC database %s, "
"restore will be skipped: %s", paths.OPENDNSSEC_KASP_DB, e)
else:
logger.info("OpenDNSSEC database backed up in %s",
paths.IPA_KASP_DB_BACKUP)
# restore OpenDNSSEC's KASP DB only if backup succeeded
# removing the file without backup could totally break DNSSEC
restore_list.append(paths.OPENDNSSEC_KASP_DB)
for f in restore_list:
try:
self.fstore.restore_file(f)
except ValueError as error:
logger.debug("%s", error)
self.restore_state("kasp_db_configured") # just eat state
# disabled by default, by ldap_configure()
if enabled:
self.enable()
if running:
self.restart()
ipautil.remove_file(paths.DNSSEC_ENGINE_SOCK)
| 13,849
|
Python
|
.py
| 306
| 33.068627
| 95
| 0.591098
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,748
|
bindinstance.py
|
freeipa_freeipa/ipaserver/install/bindinstance.py
|
# Authors: Simo Sorce <ssorce@redhat.com>
#
# Copyright (C) 2007 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import
from __future__ import print_function
import logging
import tempfile
import os
import netaddr
import re
import shutil
import sys
import time
import ldap
import six
from dns.exception import DNSException
from ipaserver.dns_data_management import (
IPASystemRecords,
IPADomainIsNotManagedByIPAError,
)
from ipaserver.install import installutils
from ipaserver.install import service
from ipaserver.install import sysupgrade
from ipaserver.masters import get_masters
from ipapython import ipaldap
from ipapython import ipautil
from ipapython import dnsutil
from ipapython.dnsutil import DNSName
from ipapython.dn import DN
from ipapython.admintool import ScriptError
import ipalib
from ipalib import api, errors
from ipalib.constants import IPA_CA_RECORD
from ipalib.install import dnsforwarders
from ipaplatform import services
from ipaplatform.tasks import tasks
from ipaplatform.constants import constants
from ipaplatform.paths import paths
from ipalib.util import (validate_zonemgr_str, normalize_zonemgr,
get_dns_forward_zone_update_policy,
get_dns_reverse_zone_update_policy,
normalize_zone, get_reverse_zone_default,
zone_is_reverse, validate_dnssec_global_forwarder,
DNSSECSignatureMissingError, EDNS0UnsupportedError,
UnresolvableRecordError)
if six.PY3:
unicode = str
logger = logging.getLogger(__name__)
named_conf_section_ipa_start_re = re.compile(r'\s*dyndb\s+"ipa"\s+"[^"]+"\s+{')
named_conf_section_options_start_re = re.compile(r'\s*options\s+{')
named_conf_section_end_re = re.compile('};')
named_conf_arg_ipa_re = re.compile(
r'(?P<indent>\s*)(?P<name>\S+)\s"(?P<value>[^"]+)";')
named_conf_arg_options_re = re.compile(
r'(?P<indent>\s*)(?P<name>\S+)\s+"(?P<value>[^"]+)"\s*;')
named_conf_arg_ipa_template = "%(indent)s%(name)s \"%(value)s\";\n"
named_conf_arg_options_template = "%(indent)s%(name)s \"%(value)s\";\n"
# non string args for options section
named_conf_arg_options_re_nonstr = re.compile(
r'(?P<indent>\s*)(?P<name>\S+)\s+(?P<value>[^"]+)\s*;')
named_conf_arg_options_template_nonstr = "%(indent)s%(name)s %(value)s;\n"
# include directive
named_conf_include_re = re.compile(r'\s*include\s+"(?P<path>.*)"\s*;')
named_conf_include_template = "include \"%(path)s\";\n"
NAMED_SECTION_OPTIONS = "options"
NAMED_SECTION_IPA = "ipa"
def create_reverse():
return ipautil.user_input(
"Do you want to search for missing reverse zones?",
True
)
def named_conf_exists():
"""
Checks that named.conf exists AND that it contains IPA-related config.
"""
try:
with open(paths.NAMED_CONF, 'r') as named_fd:
lines = named_fd.readlines()
except IOError:
return False
for line in lines:
if named_conf_section_ipa_start_re.match(line):
return True
return False
def named_conf_get_directive(name, section=NAMED_SECTION_IPA, str_val=True):
"""Get a configuration option in bind-dyndb-ldap section of named.conf
:str_val - set to True if directive value is string
(only for NAMED_SECTION_OPTIONS)
"""
if section == NAMED_SECTION_IPA:
named_conf_section_start_re = named_conf_section_ipa_start_re
named_conf_arg_re = named_conf_arg_ipa_re
elif section == NAMED_SECTION_OPTIONS:
named_conf_section_start_re = named_conf_section_options_start_re
if str_val:
named_conf_arg_re = named_conf_arg_options_re
else:
named_conf_arg_re = named_conf_arg_options_re_nonstr
else:
raise NotImplementedError('Section "%s" is not supported' % section)
with open(paths.NAMED_CONF, 'r') as f:
target_section = False
for line in f:
if named_conf_section_start_re.match(line):
target_section = True
continue
if named_conf_section_end_re.match(line):
if target_section:
break
if target_section:
match = named_conf_arg_re.match(line)
if match and name == match.group('name'):
return match.group('value')
return None
def named_conf_set_directive(name, value, section=NAMED_SECTION_IPA,
str_val=True):
"""
Set configuration option in bind-dyndb-ldap section of named.conf.
When the configuration option with given name does not exist, it
is added at the end of ipa section in named.conf.
If the value is set to None, the configuration option is removed
from named.conf.
:str_val - set to True if directive value is string
(only for NAMED_SECTION_OPTIONS)
"""
new_lines = []
if section == NAMED_SECTION_IPA:
named_conf_section_start_re = named_conf_section_ipa_start_re
named_conf_arg_re = named_conf_arg_ipa_re
named_conf_arg_template = named_conf_arg_ipa_template
elif section == NAMED_SECTION_OPTIONS:
named_conf_section_start_re = named_conf_section_options_start_re
if str_val:
named_conf_arg_re = named_conf_arg_options_re
named_conf_arg_template = named_conf_arg_options_template
else:
named_conf_arg_re = named_conf_arg_options_re_nonstr
named_conf_arg_template = named_conf_arg_options_template_nonstr
else:
raise NotImplementedError('Section "%s" is not supported' % section)
with open(paths.NAMED_CONF, 'r') as f:
target_section = False
matched = False
last_indent = "\t"
for line in f:
if named_conf_section_start_re.match(line):
target_section = True
if named_conf_section_end_re.match(line):
if target_section and not matched and \
value is not None:
# create a new conf
new_conf = named_conf_arg_template \
% dict(indent=last_indent,
name=name,
value=value)
new_lines.append(new_conf)
target_section = False
if target_section and not matched:
match = named_conf_arg_re.match(line)
if match:
last_indent = match.group('indent')
if name == match.group('name'):
matched = True
if value is not None:
if not isinstance(value, str):
value = str(value)
new_conf = named_conf_arg_template \
% dict(indent=last_indent,
name=name,
value=value)
new_lines.append(new_conf)
continue
new_lines.append(line)
# write new configuration
with open(paths.NAMED_CONF, 'w') as f:
f.write("".join(new_lines))
def named_conf_include_exists(path):
"""
Check if include exists in named.conf
:param path: path in include directive
:return: True if include exists, else False
"""
with open(paths.NAMED_CONF, 'r') as f:
for line in f:
match = named_conf_include_re.match(line)
if match and path == match.group('path'):
return True
return False
def named_conf_add_include(path):
"""
append include at the end of file
:param path: path to be insert to include directive
"""
with open(paths.NAMED_CONF, 'a') as f:
f.write(named_conf_include_template % {'path': path})
def dns_container_exists(suffix):
"""
Test whether the dns container exists.
"""
assert isinstance(suffix, DN)
return api.Backend.ldap2.entry_exists(DN(('cn', 'dns'), suffix))
def dns_zone_exists(name, api=api):
try:
zone = api.Command.dnszone_show(unicode(name))
except ipalib.errors.NotFound:
return False
if len(zone) == 0:
return False
else:
return True
def get_reverse_record_name(zone, ip_address):
ip = netaddr.IPAddress(ip_address)
rev = '.' + normalize_zone(zone)
fullrev = '.' + normalize_zone(ip.reverse_dns)
if not fullrev.endswith(rev):
raise ValueError("IP address does not match reverse zone")
return fullrev[1:-len(rev)]
def verify_reverse_zone(zone, ip_address):
try:
get_reverse_record_name(zone, ip_address)
except ValueError:
return False
return True
def find_reverse_zone(ip_address, api=api):
ip = netaddr.IPAddress(ip_address)
zone = normalize_zone(ip.reverse_dns)
while len(zone) > 0:
if dns_zone_exists(zone, api):
return zone
zone = zone.partition('.')[2]
return None
def read_reverse_zone(default, ip_address, allow_zone_overlap=False):
while True:
zone = ipautil.user_input("Please specify the reverse zone name", default=default)
if not zone:
return None
if not verify_reverse_zone(zone, ip_address):
logger.error("Invalid reverse zone %s for IP address %s",
zone, ip_address)
continue
if not allow_zone_overlap:
try:
dnsutil.check_zone_overlap(zone, raise_on_error=False)
except dnsutil.DNSZoneAlreadyExists as e:
logger.error("Reverse zone %s will not be used: %s",
zone, e)
continue
break
return normalize_zone(zone)
def get_auto_reverse_zones(ip_addresses, allow_zone_overlap=False):
auto_zones = []
for ip in ip_addresses:
try:
dnsutil.resolve_address(str(ip))
except DNSException:
pass
else:
# PTR exist there is no reason to create reverse zone
logger.info("Reverse record for IP address %s already exists", ip)
continue
default_reverse = get_reverse_zone_default(ip)
if not allow_zone_overlap:
try:
dnsutil.check_zone_overlap(default_reverse)
except ValueError as e:
logger.info("Reverse zone %s for IP address %s already exists",
default_reverse, ip)
logger.debug('%s', e)
continue
auto_zones.append((ip, default_reverse))
return auto_zones
def add_zone(name, zonemgr=None, dns_backup=None, ns_hostname=None,
update_policy=None, force=False, skip_overlap_check=False,
api=api):
# always normalize zones
name = normalize_zone(name)
if update_policy is None:
if zone_is_reverse(name):
update_policy = get_dns_reverse_zone_update_policy(api.env.realm, name)
else:
update_policy = get_dns_forward_zone_update_policy(api.env.realm)
if not zonemgr:
zonemgr = 'hostmaster.%s' % name
if ns_hostname:
ns_hostname = normalize_zone(ns_hostname)
ns_hostname = unicode(ns_hostname)
try:
api.Command.dnszone_add(unicode(name),
idnssoamname=ns_hostname,
idnssoarname=unicode(zonemgr),
idnsallowdynupdate=True,
idnsupdatepolicy=unicode(update_policy),
idnsallowquery=u'any',
idnsallowtransfer=u'none',
skip_overlap_check=skip_overlap_check,
force=force)
except (errors.DuplicateEntry, errors.EmptyModlist):
pass
def add_rr(zone, name, type, rdata, dns_backup=None, api=api, **kwargs):
addkw = {'%srecord' % str(type.lower()): unicode(rdata)}
addkw.update(kwargs)
try:
api.Command.dnsrecord_add(unicode(zone), unicode(name), **addkw)
except (errors.DuplicateEntry, errors.EmptyModlist):
pass
if dns_backup:
dns_backup.add(zone, type, name, rdata)
def add_fwd_rr(zone, host, ip_address, api=api):
addr = netaddr.IPAddress(ip_address)
if addr.version == 4:
add_rr(zone, host, "A", ip_address, None, api)
elif addr.version == 6:
add_rr(zone, host, "AAAA", ip_address, None, api)
def add_ptr_rr(zone, ip_address, fqdn, dns_backup=None, api=api):
name = get_reverse_record_name(zone, ip_address)
add_rr(zone, name, "PTR", normalize_zone(fqdn), dns_backup, api)
def add_ns_rr(zone, hostname, dns_backup=None, force=True, api=api):
hostname = normalize_zone(hostname)
add_rr(zone, "@", "NS", hostname, dns_backup=dns_backup,
force=force, api=api)
def del_rr(zone, name, type, rdata, api=api):
delkw = { '%srecord' % str(type.lower()) : unicode(rdata) }
try:
api.Command.dnsrecord_del(unicode(zone), unicode(name), **delkw)
except (errors.NotFound, errors.AttrValueNotFound, errors.EmptyModlist):
pass
def del_fwd_rr(zone, host, ip_address, api=api):
addr = netaddr.IPAddress(ip_address)
if addr.version == 4:
del_rr(zone, host, "A", ip_address, api=api)
elif addr.version == 6:
del_rr(zone, host, "AAAA", ip_address, api=api)
def del_ns_rr(zone, name, rdata, api=api):
del_rr(zone, name, 'NS', rdata, api=api)
def get_rr(zone, name, type, api=api):
rectype = '%srecord' % unicode(type.lower())
ret = api.Command.dnsrecord_find(unicode(zone), unicode(name))
if ret['count'] > 0:
for r in ret['result']:
if rectype in r:
return r[rectype]
return []
def get_fwd_rr(zone, host, api=api):
return [x for t in ("A", "AAAA") for x in get_rr(zone, host, t, api)]
def zonemgr_callback(option, opt_str, value, parser):
"""
Properly validate and convert --zonemgr Option to IA5String
"""
if value is not None:
# validate the value first
if six.PY3:
try:
validate_zonemgr_str(value)
except ValueError as e:
parser.error("invalid zonemgr: {}".format(e))
else:
try:
# IDNA support requires unicode
encoding = getattr(sys.stdin, 'encoding', None)
if encoding is None:
encoding = 'utf-8'
# value is of a string type in both py2 and py3
if not isinstance(value, unicode):
value = value.decode(encoding)
validate_zonemgr_str(value)
except ValueError as e:
# FIXME we can do this in better way
# https://fedorahosted.org/freeipa/ticket/4804
# decode to proper stderr encoding
stderr_encoding = getattr(sys.stderr, 'encoding', None)
if stderr_encoding is None:
stderr_encoding = 'utf-8'
error = unicode(e).encode(stderr_encoding)
parser.error(b"invalid zonemgr: " + error)
parser.values.zonemgr = value
def check_reverse_zones(ip_addresses, reverse_zones, options, unattended,
search_reverse_zones=False):
checked_reverse_zones = []
if (not options.no_reverse and not reverse_zones
and not options.auto_reverse):
if unattended:
options.no_reverse = True
else:
options.no_reverse = not create_reverse()
# shortcut
if options.no_reverse:
return []
# verify zones passed in options
for rz in reverse_zones:
# isn't the zone managed by someone else
if not options.allow_zone_overlap:
try:
dnsutil.check_zone_overlap(rz)
except ValueError as e:
msg = "Reverse zone %s will not be used: %s" % (rz, e)
if unattended:
raise ScriptError(msg)
else:
logger.warning('%s', msg)
continue
checked_reverse_zones.append(normalize_zone(rz))
# check that there is reverse zone for every IP
ips_missing_reverse = []
for ip in ip_addresses:
if search_reverse_zones and find_reverse_zone(str(ip)):
# reverse zone is already in LDAP
continue
for rz in checked_reverse_zones:
if verify_reverse_zone(rz, ip):
# reverse zone was entered by user
break
else:
ips_missing_reverse.append(ip)
# create reverse zone for IP addresses that does not have one
for (ip, rz) in get_auto_reverse_zones(ips_missing_reverse,
options.allow_zone_overlap):
if options.auto_reverse:
logger.info("Reverse zone %s will be created", rz)
checked_reverse_zones.append(rz)
elif unattended:
logger.warning("Missing reverse record for IP address %s", ip)
else:
if ipautil.user_input("Do you want to create reverse zone for IP "
"%s" % ip, True):
rz = read_reverse_zone(rz, str(ip), options.allow_zone_overlap)
checked_reverse_zones.append(rz)
return checked_reverse_zones
def check_forwarders(dns_forwarders):
print("Checking DNS forwarders, please wait ...")
forwarders_dnssec_valid = True
for forwarder in dns_forwarders:
logger.debug("Checking DNS server: %s", forwarder)
try:
validate_dnssec_global_forwarder(forwarder)
except DNSSECSignatureMissingError as e:
forwarders_dnssec_valid = False
logger.warning("DNS server %s does not support DNSSEC: %s",
forwarder, e)
logger.warning(
"Please fix forwarder configuration to enable DNSSEC "
"support.\n"
)
print("DNS server %s: %s" % (forwarder, e))
print("Please fix forwarder configuration to enable DNSSEC support.")
except EDNS0UnsupportedError as e:
forwarders_dnssec_valid = False
logger.warning("DNS server %s does not support ENDS0 "
"(RFC 6891): %s", forwarder, e)
logger.warning("Please fix forwarder configuration. "
"DNSSEC support cannot be enabled without EDNS0")
print(("WARNING: DNS server %s does not support EDNS0 "
"(RFC 6891): %s" % (forwarder, e)))
except UnresolvableRecordError as e:
logger.error("DNS server %s: %s", forwarder, e)
raise RuntimeError("DNS server %s: %s" % (forwarder, e))
return forwarders_dnssec_valid
def remove_master_dns_records(hostname, realm):
bind = BindInstance()
bind.remove_master_dns_records(hostname, realm, realm.lower())
bind.remove_server_ns_records(hostname)
def ensure_dnsserver_container_exists(ldap, api_instance, logger=logger):
"""
Create cn=servers,cn=dns,$SUFFIX container. If logger is not None, emit a
message that the container already exists when DuplicateEntry is raised
"""
entry = ldap.make_entry(
DN(api_instance.env.container_dnsservers, api_instance.env.basedn),
{
u'objectclass': [u'top', u'nsContainer'],
u'cn': [u'servers']
}
)
try:
ldap.add_entry(entry)
except errors.DuplicateEntry:
logger.debug('cn=servers,cn=dns container already exists')
class DnsBackup:
def __init__(self, service):
self.service = service
self.zones = {}
def add(self, zone, record_type, host, rdata):
"""
Backup a DNS record in the file store so it can later be removed.
"""
if zone not in self.zones:
zone_id = len(self.zones)
self.zones[zone] = (zone_id, 0)
self.service.backup_state("dns_zone_%s" % zone_id, zone)
(zone_id, record_id) = self.zones[zone]
self.service.backup_state("dns_record_%s_%s" % (zone_id, record_id),
"%s %s %s" % (record_type, host, rdata))
self.zones[zone] = (zone_id, record_id + 1)
def clear_records(self, have_ldap):
"""
Remove all records from the file store. If we are connected to
ldap, we will also remove them there.
"""
i = 0
while True:
zone = self.service.restore_state("dns_zone_%s" % i)
if not zone:
return
j = 0
while True:
dns_record = self.service.restore_state("dns_record_%s_%s" % (i, j))
if not dns_record:
break
if have_ldap:
type, host, rdata = dns_record.split(" ", 2)
try:
delkw = { '%srecord' % str(type.lower()) : unicode(rdata) }
api.Command.dnsrecord_del(unicode(zone), unicode(host), **delkw)
except Exception:
pass
j += 1
i += 1
class BindInstance(service.Service):
def __init__(self, fstore=None, api=api):
super(BindInstance, self).__init__(
"named",
service_desc="DNS",
fstore=fstore,
api=api,
service_user=constants.NAMED_USER,
service_prefix=u'DNS',
keytab=paths.NAMED_KEYTAB
)
self.dns_backup = DnsBackup(self)
self.domain = None
self.host = None
self.ip_addresses = ()
self.forwarders = ()
self.forward_policy = None
self.zonemgr = None
self.no_dnssec_validation = False
self.sub_dict = None
self.reverse_zones = ()
self.named_conflict = services.service('named-conflict', api)
suffix = ipautil.dn_attribute_property('_suffix')
def setup(self, fqdn, ip_addresses, realm_name, domain_name, forwarders,
forward_policy, reverse_zones, zonemgr=None,
no_dnssec_validation=False):
"""Setup bindinstance for installation
"""
self.setup_templating(
fqdn=fqdn,
realm_name=realm_name,
domain_name=domain_name,
no_dnssec_validation=no_dnssec_validation
)
self.ip_addresses = ip_addresses
self.forwarders = forwarders
self.forward_policy = forward_policy
self.reverse_zones = reverse_zones
if not zonemgr:
self.zonemgr = 'hostmaster.%s' % normalize_zone(self.domain)
else:
self.zonemgr = normalize_zonemgr(zonemgr)
def setup_templating(
self, fqdn, realm_name, domain_name, no_dnssec_validation=None
):
"""Setup bindinstance for templating
"""
self.fqdn = fqdn
self.realm = realm_name
self.domain = domain_name
self.host = fqdn.split(".")[0]
self.suffix = ipautil.realm_to_suffix(self.realm)
self.no_dnssec_validation = no_dnssec_validation
self._setup_sub_dict()
@property
def host_domain(self):
return self.fqdn.split(".", 1)[1]
@property
def first_instance(self):
return not dns_container_exists(self.suffix)
@property
def host_in_rr(self):
# when a host is not in a default domain, it needs to be referred
# with FQDN and not in a domain-relative host name
if not self.host_in_default_domain():
return normalize_zone(self.fqdn)
return self.host
def host_in_default_domain(self):
return normalize_zone(self.host_domain) == normalize_zone(self.domain)
def create_file_with_system_records(self):
system_records = IPASystemRecords(self.api, all_servers=True)
text = u'\n'.join(
IPASystemRecords.records_list_from_zone(
system_records.get_base_records()
)
)
with tempfile.NamedTemporaryFile(
mode="w", prefix="ipa.system.records.",
suffix=".db", delete=False
) as f:
f.write(text)
print("Please add records in this file to your DNS system:",
f.name)
def create_instance(self):
try:
self.stop()
except Exception:
pass
for ip_address in self.ip_addresses:
if installutils.record_in_hosts(str(ip_address), self.fqdn) is None:
installutils.add_record_to_hosts(str(ip_address), self.fqdn)
# Make sure generate-rndc-key.sh runs before named restart
self.step("generating rndc key file", self.__generate_rndc_key)
if self.first_instance:
self.step("adding DNS container", self.__setup_dns_container)
if not dns_zone_exists(self.domain, self.api):
self.step("setting up our zone", self.__setup_zone)
if self.reverse_zones:
self.step("setting up reverse zone", self.__setup_reverse_zone)
self.step("setting up our own record", self.__add_self)
if self.first_instance:
self.step("setting up records for other masters", self.__add_others)
# all zones must be created before this step
self.step("adding NS record to the zones", self.__add_self_ns)
# The service entry is used for LDAPI autobind. The keytab is no
# longer used to authenticate the server. The server still needs
# the keytab to handle incoming nsupdate requests with TSIG.
self.step("setting up kerberos principal", self.__setup_principal)
self.step("setting up LDAPI autobind", self.setup_autobind)
self.step("setting up named.conf", self.setup_named_conf)
self.step("setting up server configuration",
self.__setup_server_configuration)
# named has to be started after softhsm initialization
# self.step("restarting named", self.__start)
self.step("configuring named to start on boot", self.switch_service)
self.step(
"changing resolv.conf to point to ourselves",
self.setup_resolv_conf
)
self.start_creation()
def start_named(self):
self.print_msg("Restarting named")
self.__start()
def __start(self):
try:
self.restart()
except Exception as e:
logger.error("Named service failed to start (%s)", e)
print("named service failed to start")
def switch_service(self):
self.mask_conflict()
self.__enable()
def __enable(self):
# We do not let the system start IPA components on its own,
# Instead we reply on the IPA init script to start only enabled
# components as found in our LDAP configuration tree
try:
self.ldap_configure('DNS', self.fqdn, None, self.suffix)
except errors.DuplicateEntry:
# service already exists (forced DNS reinstall)
# don't crash, just report error
logger.error("DNS service already exists")
def mask_conflict(self):
# disable named-conflict (either named or named-pkcs11)
try:
self.named_conflict.stop()
except Exception as e:
logger.debug("Unable to stop %s (%s)",
self.named_conflict.systemd_name, e)
try:
self.named_conflict.mask()
except Exception as e:
logger.debug("Unable to mask %s (%s)",
self.named_conflict.systemd_name, e)
def _get_dnssec_validation(self):
"""get dnssec-validation value
1) command line overwrite --no-dnssec-validation
2) setting dnssec-enabled or dnssec-validation from named.conf
3) "yes" by default
Note: The dnssec-enabled is deprecated and defaults to "yes". If the
setting is "no", then it is migrated as "dnssec-validation no".
"""
dnssec_validation = "yes"
if self.no_dnssec_validation:
# command line overwrite
logger.debug(
"dnssec-validation 'no' command line overwrite"
)
dnssec_validation = "no"
elif os.path.isfile(paths.NAMED_CONF):
# get prev_ value from /etc/named.conf
prev_dnssec_validation = named_conf_get_directive(
"dnssec-validation",
NAMED_SECTION_OPTIONS,
str_val=False
)
prev_dnssec_enable = named_conf_get_directive(
"dnssec-enable",
NAMED_SECTION_OPTIONS,
str_val=False
)
if prev_dnssec_validation == "no" or prev_dnssec_enable == "no":
logger.debug(
"Setting dnssec-validation 'no' from existing %s",
paths.NAMED_CONF
)
logger.debug(
"dnssec-enabled was %s (None is yes)", prev_dnssec_enable
)
logger.debug(
"dnssec-validation was %s", prev_dnssec_validation
)
dnssec_validation = "no"
assert dnssec_validation in {"yes", "no"}
logger.info("dnssec-validation %s", dnssec_validation)
return dnssec_validation
def _setup_sub_dict(self):
if paths.NAMED_CRYPTO_POLICY_FILE is not None:
crypto_policy = 'include "{}";'.format(
paths.NAMED_CRYPTO_POLICY_FILE
)
else:
crypto_policy = "// not available"
self.sub_dict = dict(
FQDN=self.fqdn,
SERVER_ID=ipaldap.realm_to_serverid(self.realm),
SUFFIX=self.suffix,
MANAGED_KEYS_DIR=paths.NAMED_MANAGED_KEYS_DIR,
ROOT_KEY=paths.NAMED_ROOT_KEY,
NAMED_KEYTAB=self.keytab,
RFC1912_ZONES=paths.NAMED_RFC1912_ZONES,
NAMED_PID=paths.NAMED_PID,
NAMED_VAR_DIR=paths.NAMED_VAR_DIR,
BIND_LDAP_SO=paths.BIND_LDAP_SO,
INCLUDE_CRYPTO_POLICY=crypto_policy,
NAMED_CONF=paths.NAMED_CONF,
NAMED_CUSTOM_CONF=paths.NAMED_CUSTOM_CONF,
NAMED_CUSTOM_OPTIONS_CONF=paths.NAMED_CUSTOM_OPTIONS_CONF,
NAMED_LOGGING_OPTIONS_CONF=paths.NAMED_LOGGING_OPTIONS_CONF,
NAMED_DATA_DIR=constants.NAMED_DATA_DIR,
NAMED_ZONE_COMMENT=constants.NAMED_ZONE_COMMENT,
NAMED_DNSSEC_VALIDATION=self._get_dnssec_validation(),
)
def __setup_dns_container(self):
self._ldap_mod("dns.ldif", self.sub_dict)
self.__fix_dns_privilege_members()
def __fix_dns_privilege_members(self):
ldap = self.api.Backend.ldap2
cn = 'Update PBAC memberOf %s' % time.time()
task_dn = DN(('cn', cn), ('cn', 'memberof task'), ('cn', 'tasks'),
('cn', 'config'))
basedn = DN(self.api.env.container_privilege, self.api.env.basedn)
entry = ldap.make_entry(
task_dn,
objectclass=['top', 'extensibleObject'],
cn=[cn],
basedn=[basedn],
filter=['(objectclass=*)'],
ttl=[10])
ldap.add_entry(entry)
start_time = time.time()
while True:
try:
task = ldap.get_entry(task_dn)
except errors.NotFound:
break
if 'nstaskexitcode' in task:
break
time.sleep(1)
if time.time() > (start_time + 60):
raise errors.TaskTimeout(task='memberof', task_dn=task_dn)
def __setup_zone(self):
# Always use force=True as named is not set up yet
add_zone(self.domain, self.zonemgr, dns_backup=self.dns_backup,
ns_hostname=self.api.env.host, force=True,
skip_overlap_check=True, api=self.api)
add_rr(self.domain, "_kerberos", "TXT", self.realm, api=self.api)
def __add_self_ns(self):
# add NS record to all zones
ns_hostname = normalize_zone(self.api.env.host)
result = self.api.Command.dnszone_find()
for zone in result['result']:
zone = unicode(zone['idnsname'][0]) # we need unicode due to backup
logger.debug("adding self NS to zone %s apex", zone)
add_ns_rr(zone, ns_hostname, self.dns_backup, force=True,
api=self.api)
def __setup_reverse_zone(self):
# Always use force=True as named is not set up yet
for reverse_zone in self.reverse_zones:
add_zone(reverse_zone, self.zonemgr, ns_hostname=self.api.env.host,
dns_backup=self.dns_backup, force=True,
skip_overlap_check=True, api=self.api)
def __add_master_records(self, fqdn, addrs):
host, zone = fqdn.split(".", 1)
# Add forward and reverse records to self
for addr in addrs:
# Check first if the zone is a master zone
# (if it is a forward zone, dns_zone_exists will return False)
if dns_zone_exists(zone, api=self.api):
add_fwd_rr(zone, host, addr, self.api)
else:
logger.debug("Skip adding record %s to a zone %s "
"not managed by IPA", addr, zone)
reverse_zone = find_reverse_zone(addr, self.api)
if reverse_zone:
add_ptr_rr(reverse_zone, addr, fqdn, None, api=self.api)
def __add_self(self):
self.__add_master_records(self.fqdn, self.ip_addresses)
def __add_others(self):
entries = api.Backend.ldap2.get_entries(
DN(api.env.container_masters, self.suffix),
api.Backend.ldap2.SCOPE_ONELEVEL, None, ['dn'])
for entry in entries:
fqdn = entry.dn[0]['cn']
if fqdn == self.fqdn:
continue
addrs = installutils.resolve_ip_addresses_nss(fqdn)
logger.debug("Adding DNS records for master %s", fqdn)
self.__add_master_records(fqdn, addrs)
def __setup_principal(self):
installutils.kadmin_addprinc(self.principal)
# Store the keytab on disk
self.fstore.backup_file(self.keytab)
installutils.create_keytab(self.keytab, self.principal)
p = self.move_service(self.principal)
if p is None:
# the service has already been moved, perhaps we're doing a DNS reinstall
dns_principal = DN(('krbprincipalname', self.principal),
('cn', 'services'), ('cn', 'accounts'), self.suffix)
else:
dns_principal = p
# Make sure access is strictly reserved to the named user
self.service_user.chown(self.keytab)
os.chmod(self.keytab, 0o400)
# modify the principal so that it is marked as an ipa service so that
# it can host the memberof attribute, then also add it to the
# dnsserver role group, this way the DNS is allowed to perform
# DNS Updates
dns_group = DN(('cn', 'DNS Servers'), ('cn', 'privileges'), ('cn', 'pbac'), self.suffix)
mod = [(ldap.MOD_ADD, 'member', dns_principal)]
try:
api.Backend.ldap2.modify_s(dns_group, mod)
except ldap.TYPE_OR_VALUE_EXISTS:
pass
except Exception as e:
logger.critical("Could not modify principal's %s entry: %s",
dns_principal, str(e))
raise
# bind-dyndb-ldap persistent search feature requires both size and time
# limit-free connection
mod = [(ldap.MOD_REPLACE, 'nsTimeLimit', '-1'),
(ldap.MOD_REPLACE, 'nsSizeLimit', '-1'),
(ldap.MOD_REPLACE, 'nsIdleTimeout', '-1'),
(ldap.MOD_REPLACE, 'nsLookThroughLimit', '-1')]
try:
api.Backend.ldap2.modify_s(dns_principal, mod)
except Exception as e:
logger.critical("Could not set principal's %s LDAP limits: %s",
dns_principal, str(e))
raise
def setup_autobind(self):
self.add_autobind_entry(
constants.NAMED_USER, constants.NAMED_GROUP, self.principal
)
def setup_named_conf(self, backup=False):
"""Create, update, or migrate named configuration files
The method is used by installer and upgrade process. The named.conf
is backed up the first time and overwritten every time. The user
specific config files are created once and not modified in subsequent
calls.
The "dnssec-validation" option is migrated
:returns: True if any config file was modified, else False
"""
# files are owned by root:named and are readable by user and group
uid = 0
gid = constants.NAMED_GROUP.gid
mode = 0o640
changed = False
if not self.fstore.has_file(paths.NAMED_CONF):
self.fstore.backup_file(paths.NAMED_CONF)
# named.conf
txt = ipautil.template_file(
os.path.join(paths.NAMED_CONF_SRC), self.sub_dict
)
with open(paths.NAMED_CONF) as f:
old_txt = f.read()
if txt == old_txt:
logger.debug("%s is unmodified", paths.NAMED_CONF)
else:
if backup:
if not os.path.isfile(paths.NAMED_CONF_BAK):
shutil.copyfile(paths.NAMED_CONF, paths.NAMED_CONF_BAK)
logger.info("created backup %s", paths.NAMED_CONF_BAK)
else:
logger.warning(
"backup %s already exists", paths.NAMED_CONF_BAK
)
with open(paths.NAMED_CONF, "w") as f:
os.fchmod(f.fileno(), mode)
os.fchown(f.fileno(), uid, gid)
f.write(txt)
logger.info("created new %s", paths.NAMED_CONF)
changed = True
# user configurations
user_configs = (
(paths.NAMED_CUSTOM_CONF_SRC, paths.NAMED_CUSTOM_CONF),
(
paths.NAMED_CUSTOM_OPTIONS_CONF_SRC,
paths.NAMED_CUSTOM_OPTIONS_CONF
),
(
paths.NAMED_LOGGING_OPTIONS_CONF_SRC,
paths.NAMED_LOGGING_OPTIONS_CONF,
),
)
for src, dest in user_configs:
if not os.path.exists(dest):
txt = ipautil.template_file(src, self.sub_dict)
with open(dest, "w") as f:
os.fchmod(f.fileno(), mode)
os.fchown(f.fileno(), uid, gid)
f.write(txt)
logger.info("created named user config '%s'", dest)
changed = True
else:
logger.info("named user config '%s' already exists", dest)
return changed
def __setup_server_configuration(self):
ensure_dnsserver_container_exists(api.Backend.ldap2, self.api)
try:
self.api.Command.dnsserver_add(
self.fqdn, idnssoamname=DNSName(self.fqdn).make_absolute(),
)
except errors.DuplicateEntry:
# probably reinstallation of DNS
pass
try:
self.api.Command.dnsserver_mod(
self.fqdn,
idnsforwarders=[unicode(f) for f in self.forwarders],
idnsforwardpolicy=unicode(self.forward_policy)
)
except errors.EmptyModlist:
pass
sysupgrade.set_upgrade_state('dns', 'server_config_to_ldap', True)
def setup_resolv_conf(self):
searchdomains = [self.domain]
nameservers = set()
resolve1_enabled = dnsforwarders.detect_resolve1_resolv_conf()
for ip_address in self.ip_addresses:
if ip_address.version == 4:
nameservers.add("127.0.0.1")
elif ip_address.version == 6:
nameservers.add("::1")
try:
tasks.configure_dns_resolver(
sorted(nameservers), searchdomains,
resolve1_enabled=resolve1_enabled, fstore=self.fstore
)
except IOError as e:
logger.error('Could not update DNS config: %s', e)
else:
# python DNS might have global resolver cached in this variable
# we have to re-initialize it because resolv.conf has changed
dnsutil.reset_default_resolver()
def __generate_rndc_key(self):
installutils.check_entropy()
ipautil.run([paths.GENERATE_RNDC_KEY])
def add_master_dns_records(self, fqdn, ip_addresses, realm_name, domain_name,
reverse_zones):
self.fqdn = fqdn
self.ip_addresses = ip_addresses
self.realm = realm_name
self.domain = domain_name
self.host = fqdn.split(".")[0]
self.suffix = ipautil.realm_to_suffix(self.realm)
self.reverse_zones = reverse_zones
self.zonemgr = 'hostmaster.%s' % self.domain
self.__add_self()
def remove_ipa_ca_cnames(self, domain_name):
# get ipa-ca CNAMEs
try:
cnames = get_rr(domain_name, IPA_CA_RECORD, "CNAME", api=self.api)
except errors.NotFound:
# zone does not exists
cnames = None
if not cnames:
return
logger.info('Removing IPA CA CNAME records')
# create CNAME to FQDN mapping
cname_fqdn = {}
for cname in cnames:
if cname.endswith('.'):
fqdn = cname[:-1]
else:
fqdn = '%s.%s' % (cname, domain_name)
cname_fqdn[cname] = fqdn
# get FQDNs of all IPA masters
try:
masters = set(get_masters(self.api.Backend.ldap2))
except errors.NotFound:
masters = set()
# check if all CNAMEs point to IPA masters
for cname in cnames:
fqdn = cname_fqdn[cname]
if fqdn not in masters:
logger.warning(
"Cannot remove IPA CA CNAME please remove them manually "
"if necessary")
return
# delete all CNAMEs
for cname in cnames:
del_rr(domain_name, IPA_CA_RECORD, "CNAME", cname, api=self.api)
def remove_master_dns_records(self, fqdn, realm_name, domain_name):
host, zone = fqdn.split(".", 1)
self.host = host
self.fqdn = fqdn
self.domain = domain_name
if not dns_zone_exists(zone, api=self.api):
# Zone may be a forward zone, skip update
return
areclist = get_fwd_rr(zone, host, api=self.api)
for rdata in areclist:
del_fwd_rr(zone, host, rdata, api=self.api)
rzone = find_reverse_zone(rdata)
if rzone is not None:
record = get_reverse_record_name(rzone, rdata)
del_rr(rzone, record, "PTR", normalize_zone(fqdn),
api=self.api)
self.update_system_records()
def remove_server_ns_records(self, fqdn):
"""
Remove all NS records pointing to this server
"""
ldap = self.api.Backend.ldap2
ns_rdata = normalize_zone(fqdn)
# find all NS records pointing to this server
search_kw = {}
search_kw['nsrecord'] = ns_rdata
attr_filter = ldap.make_filter(search_kw, rules=ldap.MATCH_ALL)
attributes = ['idnsname', 'objectclass']
dn = DN(self.api.env.container_dns, self.api.env.basedn)
entries, _truncated = ldap.find_entries(
attr_filter, attributes, base_dn=dn)
# remove records
if entries:
logger.debug("Removing all NS records pointing to %s:", ns_rdata)
for entry in entries:
if 'idnszone' in entry['objectclass']:
# zone record
zone = entry.single_value['idnsname']
logger.debug("zone record %s", zone)
del_ns_rr(zone, u'@', ns_rdata, api=self.api)
else:
zone = entry.dn[1].value # get zone from DN
record = entry.single_value['idnsname']
logger.debug("record %s in zone %s", record, zone)
del_ns_rr(zone, record, ns_rdata, api=self.api)
def update_system_records(self):
self.print_msg("Updating DNS system records")
system_records = IPASystemRecords(self.api)
try:
(
(_ipa_rec, failed_ipa_rec),
(_loc_rec, failed_loc_rec)
) = system_records.update_dns_records()
except IPADomainIsNotManagedByIPAError:
logger.error(
"IPA domain is not managed by IPA, please update records "
"manually")
else:
if failed_ipa_rec or failed_loc_rec:
logger.error("Update of following records failed:")
for attr in (failed_ipa_rec, failed_loc_rec):
for rname, node, error in attr:
for record in IPASystemRecords.records_list_from_node(
rname, node
):
logger.error("%s (%s)", record, error)
def check_global_configuration(self):
"""
Check global DNS configuration in LDAP server and inform user when it
set and thus overrides his configured options in named.conf.
"""
result = self.api.Command.dnsconfig_show()
global_conf_set = any(
param.name in result['result'] for param in
self.api.Object['dnsconfig'].params() if
u'virtual_attribute' not in param.flags
)
if not global_conf_set:
print("Global DNS configuration in LDAP server is empty")
print("You can use 'dnsconfig-mod' command to set global DNS options that")
print("would override settings in local named.conf files")
return
print("Global DNS configuration in LDAP server is not empty")
print("The following configuration options override local settings in named.conf:")
print("")
textui = ipalib.cli.textui(self.api)
self.api.Command.dnsconfig_show.output_for_cli(textui, result, None,
reverse=False)
def is_configured(self):
"""
Override the default logic querying StateFile for configuration status
and look whether named.conf was already modified by IPA installer.
"""
return named_conf_exists()
def uninstall(self):
if self.is_configured():
self.print_msg("Unconfiguring %s" % self.service_name)
self.dns_backup.clear_records(self.api.Backend.ldap2.isconnected())
try:
self.fstore.restore_file(paths.NAMED_CONF)
except ValueError as error:
logger.debug('%s', error)
try:
tasks.unconfigure_dns_resolver(fstore=self.fstore)
except Exception:
logger.exception("Failed to unconfigure DNS resolver")
ipautil.rmtree(paths.BIND_LDAP_DNS_IPA_WORKDIR)
self.disable()
self.stop()
self.named_conflict.unmask()
ipautil.remove_file(paths.NAMED_CONF_BAK)
ipautil.remove_file(paths.NAMED_CUSTOM_CONF)
ipautil.remove_file(paths.NAMED_CUSTOM_OPTIONS_CONF)
ipautil.remove_file(paths.NAMED_LOGGING_OPTIONS_CONF)
ipautil.remove_file(paths.RNDC_KEY)
ipautil.remove_file(
os.path.join(paths.NAMED_VAR_DIR, "_default.tsigkeys")
)
try:
while self.fstore.restore_file(self.keytab):
pass
except ValueError:
pass
ipautil.remove_keytab(self.keytab)
ipautil.remove_ccache(run_as=self.service_user)
| 49,131
|
Python
|
.py
| 1,149
| 31.720627
| 96
| 0.589713
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,749
|
dsinstance.py
|
freeipa_freeipa/ipaserver/install/dsinstance.py
|
# Authors: Karl MacMillan <kmacmillan@mentalrootkit.com>
# Simo Sorce <ssorce@redhat.com>
#
# Copyright (C) 2007 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function, absolute_import
import logging
import shutil
import os
import tempfile
import fnmatch
from lib389 import DirSrv
from lib389.idm.ipadomain import IpaDomain
from lib389.instance.options import General2Base, Slapd2Base
from lib389.instance.remove import remove_ds_instance as lib389_remove_ds
from lib389.instance.setup import SetupDs
from lib389.utils import get_default_db_lib
from ipalib import x509
from ipalib.install import certmonger, certstore
from ipapython.certdb import (IPA_CA_TRUST_FLAGS,
EXTERNAL_CA_TRUST_FLAGS,
TrustFlags)
from ipapython import ipautil, ipaldap
from ipapython import dogtag
from ipaserver.install import service
from ipaserver.install import installutils
from ipaserver.install import certs
from ipaserver.install import replication
from ipaserver.install import sysupgrade
from ipaserver.install import upgradeinstance
from ipaserver.install import ldapupdate
from ipalib import api
from ipalib import errors
from ipalib import constants
from ipaplatform.constants import constants as platformconstants
from ipaplatform.tasks import tasks
from ipapython.dn import DN
from ipapython.admintool import ScriptError
from ipaplatform import services
from ipaplatform.paths import paths
logger = logging.getLogger(__name__)
DS_USER = platformconstants.DS_USER
DS_GROUP = platformconstants.DS_GROUP
IPA_SCHEMA_FILES = ("60kerberos.ldif",
"60samba.ldif",
"60ipaconfig.ldif",
"60basev2.ldif",
"60basev3.ldif",
"60basev4.ldif",
"60ipapk11.ldif",
"60ipadns.ldif",
"60certificate-profiles.ldif",
"61kerberos-ipav3.ldif",
"65ipacertstore.ldif",
"65ipasudo.ldif",
"70ipaotp.ldif",
"70topology.ldif",
"71idviews.ldif",
"72domainlevels.ldif",
"73certmap.ldif",
"15rfc2307bis.ldif",
"15rfc4876.ldif")
ALL_SCHEMA_FILES = IPA_SCHEMA_FILES + ("05rfc2247.ldif", )
DS_INSTANCE_PREFIX = 'slapd-'
def find_server_root():
if os.path.isdir(paths.USR_LIB_DIRSRV_64):
return paths.USR_LIB_DIRSRV_64
else:
return paths.USR_LIB_DIRSRV
def config_dirname(serverid):
return (paths.ETC_DIRSRV_SLAPD_INSTANCE_TEMPLATE % serverid) + "/"
def schema_dirname(serverid):
return config_dirname(serverid) + "/schema/"
def remove_ds_instance(serverid):
"""Call the lib389 api to remove the instance. Because of the
design of the api, there is no "force" command. Provided a marker
file exists, it will attempt the removal, and the marker is the *last*
file to be removed. IE just run this multiple times til it works (if
you even need multiple times ....)
"""
logger.debug("Attempting to remove instance %s", serverid)
# Alloc the local instance by name (no creds needed!)
ds = DirSrv(verbose=True, external_log=logger)
ds.local_simple_allocate(serverid)
# Remove it
lib389_remove_ds(ds)
logger.debug("Instance removed correctly.")
def get_ds_instances():
'''
Return a sorted list of all 389ds instances.
If the instance name ends with '.removed' it is ignored. This
matches 389ds behavior.
'''
dirsrv_instance_dir = paths.ETC_DIRSRV
instances = []
for basename in os.listdir(dirsrv_instance_dir):
pathname = os.path.join(dirsrv_instance_dir, basename)
# Must be a directory
if os.path.isdir(pathname):
# Must start with prefix and not end with .removed
if (basename.startswith(DS_INSTANCE_PREFIX) and
not basename.endswith('.removed')):
# Strip off prefix
instance = basename[len(DS_INSTANCE_PREFIX):]
# Must be non-empty
if instance:
instances.append(instance)
instances.sort()
return instances
def check_ports():
"""
Check of Directory server ports are open.
Returns a tuple with two booleans, one for unsecure port 389 and one for
secure port 636. True means that the port is free, False means that the
port is taken.
"""
ds_unsecure = not ipautil.host_port_open(None, 389)
ds_secure = not ipautil.host_port_open(None, 636)
return (ds_unsecure, ds_secure)
def is_ds_running(server_id=''):
return services.knownservices.dirsrv.is_running(instance_name=server_id)
def get_domain_level(api=api):
dn = DN(('cn', 'Domain Level'),
('cn', 'ipa'), ('cn', 'etc'), api.env.basedn)
with ipaldap.LDAPClient.from_realm(api.env.realm) as conn:
conn.external_bind()
try:
entry = conn.get_entry(dn, ['ipaDomainLevel'])
except errors.NotFound:
return constants.DOMAIN_LEVEL_0
else:
return int(entry.single_value['ipaDomainLevel'])
def get_all_external_schema_files(root):
"""Get all schema files"""
f = []
for path, _subdirs, files in os.walk(root):
for name in files:
if fnmatch.fnmatch(name, "*.ldif"):
f.append(os.path.join(path, name))
return sorted(f)
class DsInstance(service.Service):
def __init__(self, realm_name=None, domain_name=None, fstore=None,
domainlevel=None, config_ldif=None):
super(DsInstance, self).__init__(
"dirsrv",
service_desc="directory server",
fstore=fstore,
service_prefix=u'ldap',
keytab=paths.DS_KEYTAB,
service_user=DS_USER,
realm_name=realm_name
)
self.nickname = 'Server-Cert'
self.sub_dict = None
self.domain = domain_name
self.master_fqdn = None
self.pkcs12_info = None
self.cacert_name = None
self.ca_is_configured = True
self.cert = None
self.idstart = None
self.idmax = None
self.ca_subject = None
self.subject_base = None
self.open_ports = []
self.run_init_memberof = True
self.config_ldif = config_ldif # updates for dse.ldif
self.domainlevel = domainlevel
if realm_name:
self.suffix = ipautil.realm_to_suffix(self.realm)
self.serverid = ipaldap.realm_to_serverid(self.realm)
if self.domain is None:
self.domain = self.realm.lower()
self.__setup_sub_dict()
else:
self.suffix = DN()
self.serverid = None
subject_base = ipautil.dn_attribute_property('_subject_base')
def __common_setup(self):
self.step("creating directory server instance", self.__create_instance)
if get_default_db_lib() == 'bdb':
self.step("tune ldbm plugin", self.__tune_ldbm)
if self.config_ldif is not None:
self.step("stopping directory server", self.__stop_instance)
self.step(
"updating configuration in dse.ldif", self.__update_dse_ldif
)
self.step("starting directory server", self.__start_instance)
self.step("adding default schema", self.__add_default_schemas)
self.step("enabling memberof plugin", self.__add_memberof_module)
self.step("enabling winsync plugin", self.__add_winsync_module)
self.step("configure password logging", self.__password_logging)
self.step("configuring replication version plugin", self.__config_version_module)
self.step("enabling IPA enrollment plugin", self.__add_enrollment_module)
self.step("configuring uniqueness plugin", self.__set_unique_attrs)
self.step("configuring uuid plugin", self.__config_uuid_module)
self.step("configuring modrdn plugin", self.__config_modrdn_module)
self.step("configuring DNS plugin", self.__config_dns_module)
self.step("enabling entryUSN plugin", self.__enable_entryusn)
self.step("configuring lockout plugin", self.__config_lockout_module)
self.step("configuring graceperiod plugin",
self.config_graceperiod_module)
self.step("configuring topology plugin", self.__config_topology_module)
self.step("creating indices", self.__create_indices)
self.step("enabling referential integrity plugin", self.__add_referint_module)
self.step("configuring certmap.conf", self.__certmap_conf)
self.step("configure new location for managed entries", self.__repoint_managed_entries)
self.step("configure dirsrv ccache and keytab",
self.configure_systemd_ipa_env)
self.step("enabling SASL mapping fallback",
self.__enable_sasl_mapping_fallback)
def __common_post_setup(self):
self.step("initializing group membership", self.init_memberof)
self.step("adding master entry", self.__add_master_entry)
self.step("initializing domain level", self.__set_domain_level)
self.step("configuring Posix uid/gid generation",
self.__config_uidgid_gen)
self.step("adding replication acis", self.__add_replication_acis)
self.step("activating sidgen plugin", self._add_sidgen_plugin)
self.step("activating extdom plugin", self._add_extdom_plugin)
self.step("configuring directory to start on boot", self.__enable)
# restart to enable plugins
# speeds up creation of DNA plugin entries in cn=dna,cn=ipa,cn=etc
self.step("restarting directory server", self.__restart_instance)
def init_info(self, realm_name, fqdn, domain_name, dm_password,
subject_base, ca_subject,
idstart, idmax, pkcs12_info, ca_file=None,
setup_pkinit=False):
self.realm = realm_name.upper()
self.serverid = ipaldap.realm_to_serverid(self.realm)
self.suffix = ipautil.realm_to_suffix(self.realm)
self.fqdn = fqdn
self.dm_password = dm_password
self.domain = domain_name
self.subject_base = subject_base
self.ca_subject = ca_subject
self.idstart = idstart
self.idmax = idmax
self.pkcs12_info = pkcs12_info
if pkcs12_info:
self.ca_is_configured = False
self.setup_pkinit = setup_pkinit
self.ca_file = ca_file
self.__setup_sub_dict()
def create_instance(self, realm_name, fqdn, domain_name,
dm_password, pkcs12_info=None,
idstart=1100, idmax=999999,
subject_base=None, ca_subject=None,
hbac_allow=True, ca_file=None, setup_pkinit=False):
self.init_info(
realm_name, fqdn, domain_name, dm_password,
subject_base, ca_subject,
idstart, idmax, pkcs12_info, ca_file=ca_file,
setup_pkinit=setup_pkinit)
self.__common_setup()
self.step("restarting directory server", self.__restart_instance)
self.step("adding sasl mappings to the directory", self.__configure_sasl_mappings)
self.step("adding default layout", self.__add_default_layout)
self.step("adding delegation layout", self.__add_delegation_layout)
self.step("creating container for managed entries", self.__managed_entries)
self.step("configuring user private groups", self.__user_private_groups)
self.step("configuring netgroups from hostgroups", self.__host_nis_groups)
self.step("creating default Sudo bind user", self.__add_sudo_binduser)
self.step("creating default Auto Member layout", self.__add_automember_config)
self.step("adding range check plugin", self.__add_range_check_plugin)
if hbac_allow:
self.step("creating default HBAC rule allow_all", self.add_hbac)
self.step("adding entries for topology management", self.__add_topology_entries)
self.__common_post_setup()
self.start_creation(runtime=30)
def enable_ssl(self):
self.steps = []
self.step("configuring TLS for DS instance", self.__enable_ssl)
if self.master_fqdn is None:
self.step("adding CA certificate entry", self.__upload_ca_cert)
else:
self.step("importing CA certificates from LDAP",
self.__import_ca_certs)
self.step("restarting directory server", self.__restart_instance)
self.start_creation()
def create_replica(self, realm_name, master_fqdn, fqdn,
domain_name, dm_password,
subject_base, ca_subject,
api, pkcs12_info=None, ca_file=None,
ca_is_configured=None,
setup_pkinit=False):
# idstart and idmax are configured so that the range is seen as
# depleted by the DNA plugin and the replica will go and get a
# new range from the master.
# This way all servers use the initially defined range by default.
idstart = 1101
idmax = 1100
self.init_info(
realm_name=realm_name,
fqdn=fqdn,
domain_name=domain_name,
dm_password=dm_password,
subject_base=subject_base,
ca_subject=ca_subject,
idstart=idstart,
idmax=idmax,
pkcs12_info=pkcs12_info,
ca_file=ca_file,
setup_pkinit=setup_pkinit,
)
self.master_fqdn = master_fqdn
if ca_is_configured is not None:
self.ca_is_configured = ca_is_configured
self.promote = True
self.api = api
self.__common_setup()
self.step("restarting directory server", self.__restart_instance)
self.step("creating DS keytab", self.request_service_keytab)
# 389-ds allows to ignore time skew during replication. It is disabled
# by default to avoid issues with non-contiguous CSN values which
# derived from a time stamp when the change occurs. However, there are
# cases when we are interested only in the changes coming from the
# other side and should therefore allow ignoring the time skew.
#
# This helps with initial replication or force-sync because
# the receiving side has no valuable changes itself yet.
self.step("ignore time skew for initial replication",
self.replica_ignore_initial_time_skew)
self.step("setting up initial replication", self.__setup_replica)
self.step("prevent time skew after initial replication",
self.replica_revert_time_skew)
self.step("adding sasl mappings to the directory", self.__configure_sasl_mappings)
self.step("updating schema", self.__update_schema)
# See LDIFs for automember configuration during replica install
self.step("setting Auto Member configuration", self.__add_replica_automember_config)
self.step("enabling S4U2Proxy delegation", self.__setup_s4u2proxy)
self.__common_post_setup()
self.start_creation(runtime=30)
def _get_replication_manager(self):
# Always connect to self over ldapi
conn = ipaldap.LDAPClient.from_realm(self.realm)
conn.external_bind()
repl = replication.ReplicationManager(
self.realm, self.fqdn, self.dm_password, conn=conn
)
if self.dm_password is not None and not self.promote:
bind_dn = DN(('cn', 'Directory Manager'))
bind_pw = self.dm_password
else:
bind_dn = bind_pw = None
return repl, bind_dn, bind_pw
def __setup_replica(self):
"""
Setup initial replication between replica and remote master.
GSSAPI is always used as a replication bind method. Note, however,
that the bind method for the replication differs between domain levels:
* in domain level 0, Directory Manager credentials are used to bind
to remote master
* in domain level 1, GSSAPI using admin/privileged host credentials
is used (we do not have access to masters' DM password in this
stage)
"""
replication.enable_replication_version_checking(
self.realm,
self.dm_password)
repl, bind_dn, bind_pw = self._get_replication_manager()
repl.setup_promote_replication(
self.master_fqdn,
r_binddn=bind_dn,
r_bindpw=bind_pw,
cacert=self.ca_file
)
self.run_init_memberof = repl.needs_memberof_fixup()
def finalize_replica_config(self):
repl, bind_dn, bind_pw = self._get_replication_manager()
repl.finalize_replica_config(
self.master_fqdn,
r_binddn=bind_dn,
r_bindpw=bind_pw,
cacert=self.ca_file
)
def __configure_sasl_mappings(self):
# we need to remove any existing SASL mappings in the directory as otherwise they
# they may conflict.
try:
res = api.Backend.ldap2.get_entries(
DN(('cn', 'mapping'), ('cn', 'sasl'), ('cn', 'config')),
api.Backend.ldap2.SCOPE_ONELEVEL,
"(objectclass=nsSaslMapping)")
for r in res:
try:
api.Backend.ldap2.delete_entry(r)
except Exception as e:
logger.critical(
"Error during SASL mapping removal: %s", e)
raise
except Exception as e:
logger.critical("Error while enumerating SASL mappings %s", e)
raise
entry = api.Backend.ldap2.make_entry(
DN(
('cn', 'Full Principal'), ('cn', 'mapping'), ('cn', 'sasl'),
('cn', 'config')),
objectclass=["top", "nsSaslMapping"],
cn=["Full Principal"],
nsSaslMapRegexString=[r'\(.*\)@\(.*\)'],
nsSaslMapBaseDNTemplate=[self.suffix],
nsSaslMapFilterTemplate=['(krbPrincipalName=\\1@\\2)'],
nsSaslMapPriority=['10'],
)
api.Backend.ldap2.add_entry(entry)
entry = api.Backend.ldap2.make_entry(
DN(
('cn', 'Name Only'), ('cn', 'mapping'), ('cn', 'sasl'),
('cn', 'config')),
objectclass=["top", "nsSaslMapping"],
cn=["Name Only"],
nsSaslMapRegexString=['^[^:@]+$'],
nsSaslMapBaseDNTemplate=[self.suffix],
nsSaslMapFilterTemplate=['(krbPrincipalName=&@%s)' % self.realm],
nsSaslMapPriority=['10'],
)
api.Backend.ldap2.add_entry(entry)
def __update_schema(self):
# FIXME: https://fedorahosted.org/389/ticket/47490
self._ldap_mod("schema-update.ldif")
def __enable(self):
self.backup_state("enabled", self.is_enabled())
# At the end of the installation ipa-server-install will enable the
# 'ipa' service wich takes care of starting/stopping dirsrv
self.disable()
def __setup_sub_dict(self):
server_root = find_server_root()
self.sub_dict = ldapupdate.get_sub_dict(
realm=self.realm,
domain=self.domain,
suffix=self.suffix,
fqdn=self.fqdn,
idstart=self.idstart,
idmax=self.idmax,
)
self.sub_dict.update(
DOMAIN_LEVEL=self.domainlevel,
SERVERID=self.serverid,
PASSWORD=self.dm_password,
RANDOM_PASSWORD=ipautil.ipa_generate_password(),
USER=DS_USER,
GROUP=DS_GROUP,
SERVER_ROOT=server_root,
)
def __create_instance(self):
self.backup_state("serverid", self.serverid)
# The new installer is api driven. We can pass it a log function
# and it will use it. Because of this, we can pass verbose true,
# and allow our logger to control the display based on level.
sds = SetupDs(verbose=True, dryrun=False, log=logger)
# General environmental options.
general_options = General2Base(logger)
general_options.set('strict_host_checking', False)
# Check that our requested configuration is actually valid ...
general_options.verify()
general = general_options.collect()
# Slapd options, ie instance name.
slapd_options = Slapd2Base(logger)
slapd_options.set('instance_name', self.serverid)
slapd_options.set('root_password', self.dm_password)
slapd_options.set('self_sign_cert', False)
slapd_options.verify()
slapd = slapd_options.collect()
# Create userroot. Note that the new install does NOT
# create sample entries, so this is *empty*.
userroot = {
'cn': 'userRoot',
'nsslapd-suffix': self.suffix.ldap_text()
}
backends = [userroot]
sds.create_from_args(general, slapd, backends, None)
# Now create the new domain root object in the format that IPA expects.
# Get the instance and setup LDAPI with root autobind.
inst = DirSrv(verbose=True, external_log=logger)
inst.local_simple_allocate(
serverid=self.serverid,
ldapuri=ipaldap.get_ldap_uri(realm=self.realm, protocol='ldapi'),
)
inst.setup_ldapi()
inst.open()
def get_entry(dn, attrs):
return inst.getEntry(str(dn), attrlist=attrs)
self.sub_dict['REPLICATION_PLUGIN'] = (
installutils.get_replication_plugin_name(get_entry)
)
try:
ipadomain = IpaDomain(inst, dn=self.suffix.ldap_text())
ipadomain.create(properties={
'dc': self.realm.split('.')[0].lower(),
'info': 'IPA V2.0',
})
finally:
inst.close()
# Done!
logger.debug("completed creating DS instance")
def __tune_ldbm(self):
self._ldap_mod("ldbm-tuning.ldif")
def __update_dse_ldif(self):
"""
This method updates dse.ldif right after instance creation. This is
supposed to allow admin modify configuration of the DS which has to be
done before IPA is fully installed (for example: settings for
replication on replicas)
DS must be turned off.
"""
dse_filename = os.path.join(
paths.ETC_DIRSRV_SLAPD_INSTANCE_TEMPLATE % self.serverid,
'dse.ldif'
)
with tempfile.NamedTemporaryFile(
mode='w', delete=False) as new_dse_ldif:
temp_filename = new_dse_ldif.name
with open(dse_filename, "r") as input_file:
parser = installutils.ModifyLDIF(input_file, new_dse_ldif)
if self.config_ldif:
# parse modifications from ldif file supplied by the admin
with open(self.config_ldif, "r") as config_ldif:
parser.modifications_from_ldif(config_ldif)
parser.parse()
new_dse_ldif.flush()
shutil.copy2(temp_filename, dse_filename)
tasks.restore_context(dse_filename)
try:
os.remove(temp_filename)
except OSError as e:
logger.debug("Failed to clean temporary file: %s", e)
def __add_default_schemas(self):
for schema_fname in IPA_SCHEMA_FILES:
target_fname = schema_dirname(self.serverid) + schema_fname
shutil.copyfile(
os.path.join(paths.USR_SHARE_IPA_DIR, schema_fname),
target_fname)
os.chmod(target_fname, 0o440) # read access for dirsrv user/group
DS_USER.chown(target_fname)
try:
shutil.move(schema_dirname(self.serverid) + "05rfc2247.ldif",
schema_dirname(self.serverid) + "05rfc2247.ldif.old")
target_fname = schema_dirname(self.serverid) + "05rfc2247.ldif"
shutil.copyfile(
os.path.join(paths.USR_SHARE_IPA_DIR, "05rfc2247.ldif"),
target_fname)
os.chmod(target_fname, 0o440)
DS_USER.chown(target_fname)
except IOError:
# Does not apply with newer DS releases
pass
def start(self, instance_name="", capture_output=True, wait=True):
super(DsInstance, self).start(
instance_name, capture_output=capture_output, wait=wait
)
api.Backend.ldap2.connect()
def stop(self, instance_name="", capture_output=True):
if api.Backend.ldap2.isconnected():
api.Backend.ldap2.disconnect()
super(DsInstance, self).stop(
instance_name, capture_output=capture_output
)
def restart(self, instance_name="", capture_output=True, wait=True):
if api.Backend.ldap2.isconnected():
api.Backend.ldap2.disconnect()
try:
super(DsInstance, self).restart(
instance_name, capture_output=capture_output, wait=wait
)
if not is_ds_running(instance_name):
logger.critical("Failed to restart the directory server. "
"See the installation log for details.")
raise ScriptError()
except SystemExit as e:
raise e
except Exception as e:
# TODO: roll back here?
logger.critical("Failed to restart the directory server (%s). "
"See the installation log for details.", e)
api.Backend.ldap2.connect()
def __start_instance(self):
self.start(self.serverid)
def __stop_instance(self):
self.stop(self.serverid)
def __restart_instance(self):
self.restart(self.serverid)
def __enable_entryusn(self):
self._ldap_mod("entryusn.ldif")
def __add_memberof_module(self):
self._ldap_mod("memberof-conf.ldif")
def init_memberof(self):
if not self.run_init_memberof:
return
self._ldap_mod("memberof-task.ldif", self.sub_dict)
# Note, keep dn in sync with dn in install/share/memberof-task.ldif
dn = DN(('cn', 'IPA install %s' % self.sub_dict["TIME"]), ('cn', 'memberof task'),
('cn', 'tasks'), ('cn', 'config'))
logger.debug("Waiting for memberof task to complete.")
with ipaldap.LDAPClient.from_realm(self.realm) as conn:
conn.external_bind()
replication.wait_for_task(conn, dn)
def apply_updates(self):
schema_files = get_all_external_schema_files(paths.EXTERNAL_SCHEMA_DIR)
data_upgrade = upgradeinstance.IPAUpgrade(self.realm,
schema_files=schema_files)
try:
data_upgrade.create_instance()
except Exception as e:
# very fatal errors only will raise exception
raise RuntimeError("Update failed: %s" % e)
installutils.store_version()
def __add_referint_module(self):
self._ldap_mod("referint-conf.ldif")
def __set_unique_attrs(self):
self._ldap_mod("unique-attributes.ldif", self.sub_dict)
def __config_uidgid_gen(self):
self._ldap_mod("dna.ldif", self.sub_dict)
def __add_master_entry(self):
self._ldap_mod("master-entry.ldif", self.sub_dict)
def __add_topology_entries(self):
self._ldap_mod("topology-entries.ldif", self.sub_dict)
def __add_winsync_module(self):
self._ldap_mod("ipa-winsync-conf.ldif")
def __password_logging(self):
self._ldap_mod("pw-logging-conf.ldif")
def __config_version_module(self):
self._ldap_mod("version-conf.ldif", self.sub_dict)
def __config_uuid_module(self):
self._ldap_mod("uuid-conf.ldif")
self._ldap_mod("uuid.ldif", self.sub_dict)
def __config_modrdn_module(self):
self._ldap_mod("modrdn-conf.ldif")
self._ldap_mod("modrdn-krbprinc.ldif", self.sub_dict)
def __config_dns_module(self):
# Configure DNS plugin unconditionally as we would otherwise have
# troubles if other replica just configured DNS with ipa-dns-install
self._ldap_mod("ipa-dns-conf.ldif")
def __config_lockout_module(self):
self._ldap_mod("lockout-conf.ldif")
def config_graceperiod_module(self):
if not api.Backend.ldap2.isconnected():
api.Backend.ldap2.connect()
dn = DN('cn=IPA Graceperiod,cn=plugins,cn=config')
try:
api.Backend.ldap2.get_entry(dn)
except errors.NotFound:
self._ldap_mod("graceperiod-conf.ldif")
def __config_topology_module(self):
self._ldap_mod("ipa-topology-conf.ldif", self.sub_dict)
def __repoint_managed_entries(self):
self._ldap_mod("repoint-managed-entries.ldif", self.sub_dict)
def configure_systemd_ipa_env(self):
template = os.path.join(
paths.USR_SHARE_IPA_DIR, "ds-ipa-env.conf.template"
)
sub_dict = dict(
KRB5_KTNAME=paths.DS_KEYTAB,
KRB5CCNAME=paths.TMP_KRB5CC % platformconstants.DS_USER.uid
)
conf = ipautil.template_file(template, sub_dict)
destfile = paths.SLAPD_INSTANCE_SYSTEMD_IPA_ENV_TEMPLATE % (
self.serverid
)
destdir = os.path.dirname(destfile)
if not os.path.isdir(destdir):
# create dirsrv-$SERVERID.service.d
os.mkdir(destdir, 0o755)
with open(destfile, 'w') as f:
os.fchmod(f.fileno(), 0o644)
f.write(conf)
tasks.restore_context(destfile)
# remove variables from old /etc/sysconfig/dirsrv file
if os.path.isfile(paths.SYSCONFIG_DIRSRV):
self.fstore.backup_file(paths.SYSCONFIG_DIRSRV)
ipautil.config_replace_variables(
paths.SYSCONFIG_DIRSRV,
removevars={'KRB5_KTNAME', 'KRB5CCNAME'}
)
# reload systemd to materialize new config file
tasks.systemd_daemon_reload()
def __managed_entries(self):
self._ldap_mod("managed-entries.ldif", self.sub_dict)
def __user_private_groups(self):
self._ldap_mod("user_private_groups.ldif", self.sub_dict)
def __host_nis_groups(self):
self._ldap_mod("host_nis_groups.ldif", self.sub_dict)
def __add_enrollment_module(self):
self._ldap_mod("enrollment-conf.ldif", self.sub_dict)
def __enable_ssl(self):
dirname = config_dirname(self.serverid)
dsdb = certs.CertDB(
self.realm,
nssdir=dirname,
subject_base=self.subject_base,
ca_subject=self.ca_subject,
)
if self.pkcs12_info:
if self.ca_is_configured:
trust_flags = IPA_CA_TRUST_FLAGS
else:
trust_flags = EXTERNAL_CA_TRUST_FLAGS
dsdb.create_from_pkcs12(self.pkcs12_info[0], self.pkcs12_info[1],
ca_file=self.ca_file,
trust_flags=trust_flags)
# rewrite the pin file with current password
dsdb.create_pin_file()
server_certs = dsdb.find_server_certs()
if len(server_certs) == 0:
raise RuntimeError("Could not find a suitable server cert in import in %s" % self.pkcs12_info[0])
# We only handle one server cert
self.nickname = server_certs[0][0]
self.cert = dsdb.get_cert_from_db(self.nickname)
if self.ca_is_configured:
dsdb.track_server_cert(
self.nickname, self.principal, dsdb.passwd_fname,
'restart_dirsrv %s' % self.serverid)
self.add_cert_to_service()
else:
dsdb.create_from_cacert()
# rewrite the pin file with current password
dsdb.create_pin_file()
if self.master_fqdn is None:
ca_args = [
paths.CERTMONGER_DOGTAG_SUBMIT,
'--ee-url', 'https://%s:8443/ca/ee/ca' % self.fqdn,
'--certfile', paths.RA_AGENT_PEM,
'--keyfile', paths.RA_AGENT_KEY,
'--cafile', paths.IPA_CA_CRT,
'--agent-submit'
]
helper = " ".join(ca_args)
prev_helper = certmonger.modify_ca_helper('IPA', helper)
else:
prev_helper = None
try:
cmd = 'restart_dirsrv %s' % self.serverid
certmonger.request_and_wait_for_cert(
certpath=dirname,
storage='NSSDB',
nickname=self.nickname,
principal=self.principal,
passwd_fname=dsdb.passwd_fname,
subject=str(DN(('CN', self.fqdn), self.subject_base)),
ca='IPA',
profile=dogtag.DEFAULT_PROFILE,
dns=[self.fqdn],
post_command=cmd,
resubmit_timeout=api.env.certmonger_wait_timeout
)
finally:
if prev_helper is not None:
certmonger.modify_ca_helper('IPA', prev_helper)
# restart_dirsrv in the request above restarts DS, reconnect ldap2
api.Backend.ldap2.disconnect()
api.Backend.ldap2.connect()
self.cert = dsdb.get_cert_from_db(self.nickname)
if prev_helper is not None:
self.add_cert_to_service()
self.cacert_name = dsdb.cacert_name
# use LDAPI?
conn = ipaldap.LDAPClient.from_realm(self.realm)
conn.external_bind()
encrypt_entry = conn.make_entry(
DN(('cn', 'encryption'), ('cn', 'config')),
nsSSLClientAuth=b'allowed',
nsSSL3Ciphers=b'default',
allowWeakCipher=b'off'
)
try:
conn.update_entry(encrypt_entry)
except errors.EmptyModlist:
logger.debug(
"cn=encryption,cn=config is already properly configured")
conf_entry = conn.make_entry(
DN(('cn', 'config')),
# one does not simply uses '-' in variable name
**{'nsslapd-security': b'on'}
)
try:
conn.update_entry(conf_entry)
except errors.EmptyModlist:
logger.debug("nsslapd-security is already on")
entry = conn.make_entry(
DN(('cn', 'RSA'), ('cn', 'encryption'), ('cn', 'config')),
objectclass=["top", "nsEncryptionModule"],
cn=["RSA"],
nsSSLPersonalitySSL=[self.nickname],
nsSSLToken=["internal (software)"],
nsSSLActivation=["on"],
)
try:
conn.add_entry(entry)
except errors.DuplicateEntry:
# 389-DS >= 1.4.0 has a default entry, update it.
conn.update_entry(entry)
conn.unbind()
# check for open secure port 636 from now on
self.open_ports.append(636)
def __upload_ca_cert(self):
"""
Upload the CA certificate from the NSS database to the LDAP directory.
"""
dirname = config_dirname(self.serverid)
dsdb = certs.CertDB(self.realm, nssdir=dirname,
subject_base=self.subject_base)
trust_flags = dict(reversed(dsdb.list_certs()))
conn = ipaldap.LDAPClient.from_realm(self.realm)
conn.external_bind()
nicknames = dsdb.find_root_cert(self.cacert_name)[:-1]
for nickname in nicknames:
cert = dsdb.get_cert_from_db(nickname)
certstore.put_ca_cert_nss(conn, self.suffix, cert, nickname,
trust_flags[nickname])
nickname = self.cacert_name
cert = dsdb.get_cert_from_db(nickname)
cacert_flags = trust_flags[nickname]
if self.setup_pkinit:
cacert_flags = TrustFlags(
cacert_flags.has_key,
cacert_flags.trusted,
cacert_flags.ca,
(cacert_flags.usages |
{x509.EKU_PKINIT_CLIENT_AUTH, x509.EKU_PKINIT_KDC}),
)
certstore.put_ca_cert_nss(conn, self.suffix, cert, nickname,
cacert_flags,
config_ipa=self.ca_is_configured,
config_compat=self.master_fqdn is None)
conn.unbind()
def __import_ca_certs(self):
dirname = config_dirname(self.serverid)
dsdb = certs.CertDB(self.realm, nssdir=dirname,
subject_base=self.subject_base)
with ipaldap.LDAPClient.from_realm(self.realm) as conn:
conn.external_bind()
self.export_ca_certs_nssdb(dsdb, self.ca_is_configured, conn)
def __add_default_layout(self):
self._ldap_mod("bootstrap-template.ldif", self.sub_dict)
def __add_delegation_layout(self):
self._ldap_mod("delegation.ldif", self.sub_dict)
def __add_replication_acis(self):
self._ldap_mod("replica-acis.ldif", self.sub_dict)
def __setup_s4u2proxy(self):
def __add_principal(last_cn, principal, self):
dn = DN(('cn', last_cn), ('cn', 's4u2proxy'),
('cn', 'etc'), self.suffix)
value = '{principal}/{fqdn}@{realm}'.format(fqdn=self.fqdn,
realm=self.realm,
principal=principal)
entry = api.Backend.ldap2.get_entry(dn, ['memberPrincipal'])
try:
entry['memberPrincipal'].append(value)
api.Backend.ldap2.update_entry(entry)
except errors.EmptyModlist:
pass
__add_principal('ipa-http-delegation', 'HTTP', self)
__add_principal('ipa-ldap-delegation-targets', 'ldap', self)
def __create_indices(self):
self._ldap_update(["20-indices.update"])
def __certmap_conf(self):
write_certmap_conf(self.realm, self.ca_subject)
sysupgrade.set_upgrade_state(
'certmap.conf',
'subject_base',
str(self.subject_base)
)
def __enable_sasl_mapping_fallback(self):
self._ldap_mod("sasl-mapping-fallback.ldif", self.sub_dict)
def add_hbac(self):
self._ldap_mod("default-hbac.ldif", self.sub_dict)
def change_admin_password(self, password):
logger.debug("Changing admin password")
dir_ipa = paths.VAR_LIB_IPA
with tempfile.NamedTemporaryFile("w", dir=dir_ipa) as dmpwdfile, \
tempfile.NamedTemporaryFile("w", dir=dir_ipa) as admpwdfile:
dmpwdfile.write(self.dm_password)
dmpwdfile.flush()
admpwdfile.write(password)
admpwdfile.flush()
args = [paths.LDAPPASSWD, "-H", "ldap://{}".format(self.fqdn),
"-ZZ", "-x", "-D", str(DN(('cn', 'Directory Manager'))),
"-y", dmpwdfile.name, "-T", admpwdfile.name,
str(DN(('uid', 'admin'), ('cn', 'users'), ('cn', 'accounts'), self.suffix))]
try:
env = {'LDAPTLS_CACERTDIR': os.path.dirname(paths.IPA_CA_CRT),
'LDAPTLS_CACERT': paths.IPA_CA_CRT}
ipautil.run(args, env=env)
logger.debug("ldappasswd done")
except ipautil.CalledProcessError as e:
print("Unable to set admin password", e)
logger.debug("Unable to set admin password %s", e)
def uninstall(self):
if self.is_configured():
self.print_msg("Unconfiguring directory server")
enabled = self.restore_state("enabled")
# Just eat this state if it exists
self.restore_state("running")
try:
self.fstore.restore_file(paths.LIMITS_CONF)
except ValueError as error:
logger.debug("%s: %s", paths.LIMITS_CONF, error)
try:
self.fstore.restore_file(paths.SYSCONFIG_DIRSRV)
except ValueError as error:
logger.debug("%s: %s", paths.SYSCONFIG_DIRSRV, error)
# disabled during IPA installation
if enabled:
logger.debug("Re-enabling instance of Directory Server")
self.enable()
serverid = self.restore_state("serverid")
if serverid is not None:
# What if this fails? Then what?
self.stop_tracking_certificates(serverid)
logger.debug("Removing DS instance %s", serverid)
try:
remove_ds_instance(serverid)
except ipautil.CalledProcessError:
logger.error("Failed to remove DS instance. You may "
"need to remove instance data manually")
destfile = paths.SLAPD_INSTANCE_SYSTEMD_IPA_ENV_TEMPLATE % (
serverid
)
ipautil.remove_file(destfile)
ipautil.remove_directory(os.path.dirname(destfile))
else:
logger.error("Failed to remove DS instance. No serverid present "
"in sysrestore file.")
ipautil.remove_keytab(paths.DS_KEYTAB)
ipautil.remove_ccache(run_as=DS_USER)
if serverid is None:
# Remove scripts dir
scripts = paths.VAR_LIB_DIRSRV_INSTANCE_SCRIPTS_TEMPLATE % (
serverid)
ipautil.rmtree(scripts)
# remove systemd unit file
unitfile = paths.SLAPD_INSTANCE_SYSTEMD_IPA_ENV_TEMPLATE % (
serverid
)
ipautil.remove_file(unitfile)
try:
os.rmdir(os.path.dirname(unitfile))
except OSError:
# not empty
pass
# Just eat this state
self.restore_state("user_exists")
# Make sure some upgrade-related state is removed. This could cause
# re-installation problems.
self.restore_state('nsslapd-port')
self.restore_state('nsslapd-security')
self.restore_state('nsslapd-ldapiautobind')
# If any dirsrv instances remain after we've removed ours then
# (re)start them.
for ds_instance in get_ds_instances():
try:
services.knownservices.dirsrv.restart(ds_instance, wait=False)
except Exception as e:
logger.error(
'Unable to restart DS instance %s: %s', ds_instance, e)
def get_server_cert_nickname(self, serverid=None):
"""
Retrieve the nickname of the server cert used by dirsrv.
The method directly reads the dse.ldif to find the attribute
nsSSLPersonalitySSL of cn=RSA,cn=encryption,cn=config because
LDAP is not always accessible when we need to get the nickname
(for instance during uninstall).
"""
if serverid is None:
serverid = self.get_state("serverid")
if serverid is not None:
dirname = config_dirname(serverid)
config_file = os.path.join(dirname, "dse.ldif")
rsa_dn = "cn=RSA,cn=encryption,cn=config"
with open(config_file, "r") as in_file:
parser = upgradeinstance.GetEntryFromLDIF(
in_file,
entries_dn=[rsa_dn])
parser.parse()
try:
config_entry = parser.get_results()[rsa_dn]
nickname = config_entry["nsSSLPersonalitySSL"][0]
return nickname.decode('utf-8')
except (KeyError, IndexError):
logger.error("Unable to find server cert nickname in %s",
config_file)
logger.debug("Falling back to nickname Server-Cert")
return 'Server-Cert'
def stop_tracking_certificates(self, serverid=None):
if serverid is None:
serverid = self.get_state("serverid")
if serverid is not None:
nickname = self.get_server_cert_nickname(serverid)
# drop the trailing / off the config_dirname so the directory
# will match what is in certmonger
dirname = config_dirname(serverid)[:-1]
dsdb = certs.CertDB(self.realm, nssdir=dirname)
dsdb.untrack_server_cert(nickname)
def start_tracking_certificates(self, serverid):
nickname = self.get_server_cert_nickname(serverid)
dirname = config_dirname(serverid)[:-1]
dsdb = certs.CertDB(self.realm, nssdir=dirname)
if dsdb.is_ipa_issued_cert(api, nickname):
dsdb.track_server_cert(
nickname,
self.principal,
password_file=dsdb.passwd_fname,
command='restart_dirsrv %s' % serverid,
profile=dogtag.DEFAULT_PROFILE)
else:
logger.debug("Will not track DS server certificate %s as it is "
"not issued by IPA", nickname)
# we could probably move this function into the service.Service
# class - it's very generic - all we need is a way to get an
# instance of a particular Service
def add_ca_cert(self, cacert_fname, cacert_name=''):
"""Add a CA certificate to the directory server cert db. We
first have to shut down the directory server in case it has
opened the cert db read-only. Then we use the CertDB class
to add the CA cert. We have to provide a nickname, and we
do not use 'IPA CA' since that's the default, so
we use 'Imported CA' if none specified. Then we restart
the server."""
# first make sure we have a valid cacert_fname
try:
if not os.access(cacert_fname, os.R_OK):
logger.critical("The given CA cert file named [%s] could not "
"be read", cacert_fname)
return False
except OSError as e:
logger.critical("The given CA cert file named [%s] could not "
"be read: %s", cacert_fname, str(e))
return False
# ok - ca cert file can be read
# shutdown the server
self.stop()
dirname = config_dirname(
ipaldap.realm_to_serverid(self.realm))
certdb = certs.CertDB(
self.realm,
nssdir=dirname,
subject_base=self.subject_base,
ca_subject=self.ca_subject,
)
if not cacert_name or len(cacert_name) == 0:
cacert_name = "Imported CA"
# we can't pass in the nickname, so we set the instance variable
certdb.cacert_name = cacert_name
status = True
try:
certdb.load_cacert(cacert_fname, EXTERNAL_CA_TRUST_FLAGS)
except ipautil.CalledProcessError as e:
logger.critical("Error importing CA cert file named [%s]: %s",
cacert_fname, str(e))
status = False
# restart the directory server
self.start()
return status
def __add_sudo_binduser(self):
self._ldap_mod("sudobind.ldif", self.sub_dict)
def __add_automember_config(self):
self._ldap_mod("automember.ldif", self.sub_dict)
def __add_replica_automember_config(self):
self._ldap_mod("replica-automember.ldif", self.sub_dict)
def __add_range_check_plugin(self):
self._ldap_mod("range-check-conf.ldif", self.sub_dict)
def _add_sidgen_plugin(self):
"""
Add sidgen directory server plugin configuration if it does not already exist.
"""
self.add_sidgen_plugin(self.sub_dict['SUFFIX'])
def add_sidgen_plugin(self, suffix):
"""
Add sidgen plugin configuration only if it does not already exist.
"""
dn = DN('cn=IPA SIDGEN,cn=plugins,cn=config')
try:
api.Backend.ldap2.get_entry(dn)
except errors.NotFound:
self._ldap_mod('ipa-sidgen-conf.ldif', dict(SUFFIX=suffix))
else:
logger.debug("sidgen plugin is already configured")
def _add_extdom_plugin(self):
"""
Add directory server configuration for the extdom extended operation.
"""
self.add_extdom_plugin(self.sub_dict['SUFFIX'])
def add_extdom_plugin(self, suffix):
"""
Add extdom configuration if it does not already exist.
"""
dn = DN('cn=ipa_extdom_extop,cn=plugins,cn=config')
try:
api.Backend.ldap2.get_entry(dn)
except errors.NotFound:
self._ldap_mod('ipa-extdom-extop-conf.ldif', dict(SUFFIX=suffix))
else:
logger.debug("extdom plugin is already configured")
def find_subject_base(self):
"""
Try to find the current value of certificate subject base.
1) Look in sysupgrade first
2) If no value is found there, look in DS (start DS if necessary)
3) If all fails, log loudly and return None
Note that this method can only be executed AFTER the ipa server
is configured, the api is initialized elsewhere and
that a ticket already have been acquired.
"""
logger.debug(
'Trying to find certificate subject base in sysupgrade')
subject_base = sysupgrade.get_upgrade_state(
'certmap.conf', 'subject_base')
if subject_base:
logger.debug(
'Found certificate subject base in sysupgrade: %s',
subject_base)
return subject_base
logger.debug(
'Unable to find certificate subject base in sysupgrade')
logger.debug(
'Trying to find certificate subject base in DS')
ds_is_running = is_ds_running()
if not ds_is_running:
try:
self.start()
ds_is_running = True
except ipautil.CalledProcessError as e:
logger.error('Cannot start DS to find certificate '
'subject base: %s', e)
if ds_is_running:
try:
ret = api.Command['config_show']()
subject_base = str(
ret['result']['ipacertificatesubjectbase'][0])
logger.debug(
'Found certificate subject base in DS: %s', subject_base)
except errors.PublicError as e:
logger.error('Cannot connect to DS to find certificate '
'subject base: %s', e)
if subject_base:
return subject_base
logger.debug('Unable to find certificate subject base in certmap.conf')
return None
def __set_domain_level(self):
# Create global domain level entry and set the domain level
if self.domainlevel is not None:
self._ldap_mod("domainlevel.ldif", self.sub_dict)
def write_certmap_conf(realm, ca_subject):
"""(Re)write certmap.conf with given CA subject DN."""
serverid = ipaldap.realm_to_serverid(realm)
ds_dirname = config_dirname(serverid)
certmap_filename = os.path.join(ds_dirname, "certmap.conf")
shutil.copyfile(
os.path.join(paths.USR_SHARE_IPA_DIR, "certmap.conf.template"),
certmap_filename)
installutils.update_file(
certmap_filename,
'$ISSUER_DN', # lgtm [py/regex/unmatchable-dollar]
str(ca_subject)
)
| 52,804
|
Python
|
.py
| 1,169
| 33.88024
| 113
| 0.600739
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,750
|
ipa_restore.py
|
freeipa_freeipa/ipaserver/install/ipa_restore.py
|
# Authors: Rob Crittenden <rcritten@redhat.com>
#
# Copyright (C) 2013 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, print_function
import logging
import optparse # pylint: disable=deprecated-module
import os
import shutil
import sys
import tempfile
import time
import ldif
import itertools
import six
from ipaclient.install.client import update_ipa_nssdb
from ipalib import api, errors
from ipalib.constants import FQDN
from ipapython import version, ipautil
from ipapython.ipautil import run, user_input
from ipapython import admintool, certdb
from ipapython.dn import DN
from ipaserver.install.replication import (wait_for_task, ReplicationManager,
get_cs_replication_manager)
from ipaserver.install import installutils, ldapupdate
from ipaserver.install import dsinstance, httpinstance, cainstance, krbinstance
from ipaserver.masters import get_masters
from ipapython import ipaldap
import ipapython.errors
from ipaplatform.constants import constants
from ipaplatform.tasks import tasks
from ipaplatform import services
from ipaplatform.paths import paths
from lib389.cli_ctl.dblib import run_dbscan
try:
from ipaserver.install import adtrustinstance
except ImportError:
adtrustinstance = None
# pylint: disable=import-error
if six.PY3:
# The SafeConfigParser class has been renamed to ConfigParser in Py3
from configparser import ConfigParser as SafeConfigParser
else:
from ConfigParser import SafeConfigParser
# pylint: enable=import-error
logger = logging.getLogger(__name__)
backends = [] # global to save running dbscan multiple times
def get_backends(db_dir):
"""Retrieve the set of backends directly from the current database"""
global backends
if backends:
return backends
output = run_dbscan(['-L', db_dir])
output = output.replace(db_dir + '/', '')
output = output.split('\n')
for line in output:
if '/' not in line:
continue
backends.append(line.split('/')[0].strip().lower())
backends = set(backends)
if 'changelog' in backends:
backends.remove('changelog')
return backends
def recursive_chown(path, uid, gid):
'''
Change ownership of all files and directories in a path.
'''
for root, dirs, files in os.walk(path):
for dir in dirs:
os.chown(os.path.join(root, dir), uid, gid)
os.chmod(os.path.join(root, dir), 0o750)
for file in files:
os.chown(os.path.join(root, file), uid, gid)
os.chmod(os.path.join(root, file), 0o640)
def decrypt_file(tmpdir, filename):
source = filename
(dest, ext) = os.path.splitext(filename)
if ext != '.gpg':
raise admintool.ScriptError('Trying to decrypt a non-gpg file')
dest = os.path.basename(dest)
dest = os.path.join(tmpdir, dest)
args = [
paths.GPG2,
'--batch',
'--output', dest,
'--decrypt', source,
]
result = run(args, raiseonerr=False)
if result.returncode != 0:
raise admintool.ScriptError('gpg failed: %s' % result.error_log)
return dest
class RemoveRUVParser(ldif.LDIFParser):
def __init__(self, input_file, writer):
ldif.LDIFParser.__init__(self, input_file)
self.writer = writer
def handle(self, dn, entry):
objectclass = None
nsuniqueid = None
for name, value in entry.items():
name = name.lower()
if name == 'objectclass':
objectclass = [x.lower() for x in value]
elif name == 'nsuniqueid':
nsuniqueid = [x.lower() for x in value]
if (
objectclass and nsuniqueid and
b'nstombstone' in objectclass and
b'ffffffff-ffffffff-ffffffff-ffffffff' in nsuniqueid
):
logger.debug("Removing RUV entry %s", dn)
return
self.writer.unparse(dn, entry)
class Restore(admintool.AdminTool):
command_name = 'ipa-restore'
log_file_name = paths.IPARESTORE_LOG
usage = "%prog [options] backup"
description = "Restore IPA files and databases."
# directories and files listed here will be removed from filesystem before
# files from backup are copied
DIRS_TO_BE_REMOVED = [
paths.DNSSEC_TOKENS_DIR,
]
FILES_TO_BE_REMOVED = []
# files listed here cannot be removed and these files will be
# replaced with zero-length files
FILES_TO_BE_CLEARED = [
paths.HTTPD_NSS_CONF
]
def __init__(self, options, args):
super(Restore, self).__init__(options, args)
self._conn = None
@classmethod
def add_options(cls, parser):
super(Restore, cls).add_options(parser, debug_option=True)
parser.add_option(
"-p", "--password", dest="password",
help="Directory Manager password")
parser.add_option(
"--gpg-keyring", dest="gpg_keyring",
help=optparse.SUPPRESS_HELP)
parser.add_option(
"--data", dest="data_only", action="store_true",
default=False, help="Restore only the data")
parser.add_option(
"--online", dest="online", action="store_true",
default=False,
help="Perform the LDAP restores online, for data only.")
parser.add_option(
"--instance", dest="instance",
help="The 389-ds instance to restore (defaults to all found)")
parser.add_option(
"--backend", dest="backend",
help="The backend to restore within the instance or instances")
parser.add_option(
'--no-logs', dest="no_logs", action="store_true",
default=False, help="Do not restore log files from the backup")
parser.add_option(
'-U', '--unattended', dest="unattended",
action="store_true", default=False,
help="Unattended restoration never prompts the user")
def setup_logging(self, log_file_mode='a'):
super(Restore, self).setup_logging(log_file_mode='a')
def validate_options(self):
parser = self.option_parser
options = self.options
super(Restore, self).validate_options(needs_root=True)
if len(self.args) < 1:
parser.error("must provide the backup to restore")
elif len(self.args) > 1:
parser.error("must provide exactly one name for the backup")
dirname = self.args[0]
if not os.path.isabs(dirname):
dirname = os.path.join(paths.IPA_BACKUP_DIR, dirname)
if not os.path.isdir(dirname):
parser.error("must provide path to backup directory")
if options.gpg_keyring:
print(
"--gpg-keyring is no longer supported, use GNUPGHOME "
"environment variable to use a custom GnuPG2 directory.",
file=sys.stderr
)
def ask_for_options(self):
options = self.options
super(Restore, self).ask_for_options()
# no IPA config means we are reinstalling from nothing so
# there is no need for the DM password
if not os.path.exists(paths.IPA_DEFAULT_CONF):
return
# get the directory manager password
self.dirman_password = options.password
if not options.password:
if not options.unattended:
self.dirman_password = installutils.read_password(
"Directory Manager (existing master)",
confirm=False, validate=False)
if self.dirman_password is None:
raise admintool.ScriptError(
"Directory Manager password required")
def enable_server(self):
"""Make sure the current server is marked as enabled"""
if not api.Backend.ldap2.isconnected():
api.Backend.ldap2.connect()
try:
api.Command.server_state(api.env.host, state='enabled')
except errors.EmptyModlist:
pass
def run(self):
options = self.options
super(Restore, self).run()
self.backup_dir = self.args[0]
if not os.path.isabs(self.backup_dir):
self.backup_dir = os.path.join(paths.IPA_BACKUP_DIR, self.backup_dir)
logger.info("Preparing restore from %s on %s",
self.backup_dir, FQDN)
self.header = os.path.join(self.backup_dir, 'header')
try:
self.read_header()
except IOError as e:
raise admintool.ScriptError("Cannot read backup metadata: %s" % e)
if options.data_only:
restore_type = 'DATA'
else:
restore_type = self.backup_type
# These checks would normally be in the validate method but
# we need to know the type of backup we're dealing with.
if restore_type == 'FULL':
if options.online:
raise admintool.ScriptError(
"File restoration cannot be done online")
if options.instance or options.backend:
raise admintool.ScriptError(
"Restore must be in data-only mode when restoring a "
"specific instance or backend")
else:
installutils.check_server_configuration()
self.init_api()
if options.instance:
instance_dir = (paths.VAR_LIB_SLAPD_INSTANCE_DIR_TEMPLATE %
options.instance)
if not os.path.exists(instance_dir):
raise admintool.ScriptError(
"Instance %s does not exist" % options.instance)
self.instances = [options.instance]
if options.backend:
for instance in self.instances:
db_dir = (paths.SLAPD_INSTANCE_DB_DIR_TEMPLATE %
(instance, ""))
backends = get_backends(db_dir)
if options.backend.lower() in backends:
break
else:
raise admintool.ScriptError(
"Backend %s does not exist" % options.backend)
self.backends = [options.backend]
missing_backends = []
for instance, backend in itertools.product(self.instances,
self.backends):
db_dir = (paths.SLAPD_INSTANCE_DB_DIR_TEMPLATE %
(instance, ""))
backends = get_backends(db_dir)
if backend.lower() not in backends:
missing_backends.append(backend)
if missing_backends:
raise admintool.ScriptError(
"Cannot restore a data backup into an empty system. "
"Missing backend(s) %s" % ', '.join(missing_backends)
)
logger.info("Performing %s restore from %s backup",
restore_type, self.backup_type)
if self.backup_host != FQDN:
raise admintool.ScriptError(
"Host name %s does not match backup name %s" %
(FQDN, self.backup_host))
if self.backup_ipa_version != str(version.VERSION):
logger.warning(
"Restoring data from a different release of IPA.\n"
"Data is version %s.\n"
"Server is running %s.",
self.backup_ipa_version, str(version.VERSION))
if (not options.unattended and
not user_input("Continue to restore?", False)):
raise admintool.ScriptError("Aborted")
# Check have optional dependencies been installed for extra
# features from backup
if restore_type == 'FULL':
if 'ADTRUST' in self.backup_services:
if not adtrustinstance or not adtrustinstance.check_inst():
raise admintool.ScriptError(
"Backup includes AD trust feature, it requires '{}' "
"package. Please install the package and "
"run ipa-restore again.".format(
constants.IPA_ADTRUST_PACKAGE_NAME
)
)
if 'DNS' in self.backup_services:
if not os.path.isfile(paths.IPA_DNS_INSTALL):
raise admintool.ScriptError(
"Backup includes Integrated DNS feature, it requires "
"'{}' package. Please install the package and "
"run ipa-restore again.".format(
constants.IPA_DNS_PACKAGE_NAME
)
)
# Temporary directory for decrypting files before restoring
self.top_dir = tempfile.mkdtemp("ipa")
constants.DS_USER.chown(self.top_dir)
os.chmod(self.top_dir, 0o750)
self.dir = os.path.join(self.top_dir, "ipa")
os.mkdir(self.dir)
os.chmod(self.dir, 0o750)
constants.DS_USER.chown(self.dir)
logger.info("Temporary setting umask to 022")
old_umask = os.umask(0o022)
try:
dirsrv = services.knownservices.dirsrv
self.extract_backup()
if restore_type == 'FULL':
self.restore_default_conf()
self.init_api(confdir=self.dir + paths.ETC_IPA)
databases = []
for instance in self.instances:
for backend in self.backends:
database = (instance, backend)
ldiffile = os.path.join(self.dir, '%s-%s.ldif' % database)
if os.path.exists(ldiffile):
databases.append(database)
else:
logger.warning(
"LDIF file '%s-%s.ldif' not found in backup",
instance, backend)
if options.instance:
for instance, backend in databases:
if instance == options.instance:
break
else:
raise admintool.ScriptError(
"Instance %s not found in backup" % options.instance)
if options.backend:
for instance, backend in databases:
if backend == options.backend:
break
else:
raise admintool.ScriptError(
"Backend %s not found in backup" % options.backend)
# Big fat warning
if (not options.unattended and
not user_input("Restoring data will overwrite existing live data. Continue to restore?", False)):
raise admintool.ScriptError("Aborted")
logger.info(
"Each master will individually need to be re-initialized or")
logger.info(
"re-created from this one. The replication agreements on")
logger.info(
"masters running IPA 3.1 or earlier will need to be manually")
logger.info(
"re-enabled. See the man page for details.")
logger.info("Disabling all replication.")
self.disable_agreements()
if restore_type != 'FULL':
if not options.online:
logger.info('Stopping Directory Server')
dirsrv.stop(capture_output=False)
else:
logger.info('Starting Directory Server')
dirsrv.start(capture_output=False)
else:
logger.info('Stopping IPA services')
result = run([paths.IPACTL, 'stop'], raiseonerr=False)
if result.returncode not in [0, 6]:
logger.warning('Stopping IPA failed: %s', result.error_log)
self.restore_selinux_booleans()
http = httpinstance.HTTPInstance()
# We do either a full file restore or we restore data.
if restore_type == 'FULL':
self.remove_old_files()
self.clear_old_files()
self.cert_restore_prepare()
self.file_restore(options.no_logs)
self.cert_restore()
if 'CA' in self.backup_services:
self.__create_dogtag_log_dirs()
# Always restore the data from ldif
# We need to restore both userRoot and ipaca.
for instance, backend in databases:
self.ldif2db(instance, backend, online=options.online)
if restore_type != 'FULL':
if not options.online:
logger.info('Starting Directory Server')
dirsrv.start(capture_output=False)
else:
# restore access controll configuration
auth_backup_path = os.path.join(paths.VAR_LIB_IPA, 'auth_backup')
if os.path.exists(auth_backup_path):
tasks.restore_auth_configuration(auth_backup_path)
# explicitly enable then disable the pki tomcatd service to
# re-register its instance. FIXME, this is really wierd.
services.knownservices.pki_tomcatd.enable()
services.knownservices.pki_tomcatd.disable()
logger.info('Restarting GSS-proxy')
gssproxy = services.service('gssproxy', api)
gssproxy.reload_or_restart()
logger.info('Starting IPA services')
run([paths.IPACTL, 'start'])
logger.info('Restarting SSSD')
sssd = services.service('sssd', api)
sssd.restart()
logger.info('Restarting oddjobd')
oddjobd = services.service('oddjobd', api)
if not oddjobd.is_enabled():
logger.info("Enabling oddjobd")
oddjobd.enable()
oddjobd.start()
http.remove_httpd_ccaches()
# update autobind configuration in case uid/gid have changed
ld = ldapupdate.LDAPUpdate(api=api)
autobind_update = os.path.join(
paths.UPDATES_DIR, "49-autobind-services.update"
)
ld.update([autobind_update])
# have the daemons pick up their restored configs
tasks.systemd_daemon_reload()
# Restart IPA a final time.
# Starting then restarting is necessary to make sure some
# daemons like httpd are restarted
# (https://pagure.io/freeipa/issue/8226).
logger.info('Restarting IPA services')
result = run([paths.IPACTL, 'restart'], raiseonerr=False)
if result.returncode != 0:
logger.error('Restarting IPA failed: %s', result.error_log)
self.enable_server()
finally:
shutil.rmtree(self.top_dir)
logger.info("Restoring umask to %s", old_umask)
os.umask(old_umask)
def get_connection(self):
'''
Create an ldapi connection and bind to it using autobind as root.
'''
instance_name = ipaldap.realm_to_serverid(api.env.realm)
if not services.knownservices.dirsrv.is_running(instance_name):
raise admintool.ScriptError(
"directory server instance is not running/configured"
)
if self._conn is not None:
return self._conn
self._conn = ipaldap.LDAPClient.from_realm(api.env.realm)
try:
self._conn.external_bind()
except Exception as e:
raise admintool.ScriptError('Unable to bind to LDAP server: %s'
% e)
return self._conn
def disable_agreements(self):
'''
Find all replication agreements on all masters and disable them.
Warn very loudly about any agreements/masters we cannot contact.
'''
try:
conn = self.get_connection()
except Exception as e:
logger.error('Unable to get connection, skipping disabling '
'agreements: %s', e)
return
masters = get_masters(conn)
for master in masters:
if master == api.env.host:
continue
try:
repl = ReplicationManager(api.env.realm, master,
self.dirman_password)
except Exception as e:
logger.critical("Unable to disable agreement on %s: %s",
master, e)
continue
master_dn = DN(('cn', master), api.env.container_masters,
api.env.basedn)
try:
services = repl.conn.get_entries(master_dn,
repl.conn.SCOPE_ONELEVEL)
except errors.NotFound:
continue
services_cns = [s.single_value['cn'] for s in services]
host_entries = repl.find_ipa_replication_agreements()
hosts = [rep.single_value.get('nsds5replicahost')
for rep in host_entries]
for host in hosts:
logger.info('Disabling replication agreement on %s to %s',
master, host)
repl.disable_agreement(host)
if 'CA' in services_cns:
try:
repl = get_cs_replication_manager(api.env.realm, master,
self.dirman_password)
except Exception as e:
logger.critical("Unable to disable agreement on %s: %s",
master, e)
continue
host_entries = repl.find_ipa_replication_agreements()
hosts = [rep.single_value.get('nsds5replicahost')
for rep in host_entries]
for host in hosts:
logger.info('Disabling CA replication agreement on %s to '
'%s', master, host)
repl.hostnames = [master, host]
repl.disable_agreement(host)
def ldif2db(self, instance, backend, online=True):
'''
Restore a LDIF backup of the data in this instance.
If executed online create a task and wait for it to complete.
'''
logger.info('Restoring from %s in %s', backend, instance)
cn = time.strftime('import_%Y_%m_%d_%H_%M_%S')
dn = DN(('cn', cn), ('cn', 'import'), ('cn', 'tasks'), ('cn', 'config'))
ldifdir = paths.SLAPD_INSTANCE_LDIF_DIR_TEMPLATE % instance
ldifname = '%s-%s.ldif' % (instance, backend)
ldiffile = os.path.join(ldifdir, ldifname)
srcldiffile = os.path.join(self.dir, ldifname)
if not os.path.exists(ldifdir):
os.mkdir(ldifdir)
os.chmod(ldifdir, 0o770)
constants.DS_USER.chown(ldifdir)
ipautil.backup_file(ldiffile)
with open(ldiffile, 'w') as out_file:
ldif_writer = ldif.LDIFWriter(out_file)
with open(srcldiffile, 'rb') as in_file:
ldif_parser = RemoveRUVParser(in_file, ldif_writer)
ldif_parser.parse()
# Make sure the modified ldiffile is owned by DS_USER
constants.DS_USER.chown(ldiffile)
if online:
conn = self.get_connection()
ent = conn.make_entry(
dn,
{
'objectClass': ['top', 'extensibleObject'],
'cn': [cn],
'nsFilename': [ldiffile],
'nsUseOneFile': ['true'],
}
)
ent['nsInstance'] = [backend]
try:
conn.add_entry(ent)
except Exception as e:
logger.error("Unable to bind to LDAP server: %s", e)
return
logger.info("Waiting for LDIF to finish")
wait_for_task(conn, dn)
else:
template_dir = paths.VAR_LOG_DIRSRV_INSTANCE_TEMPLATE % instance
try:
os.makedirs(template_dir)
except OSError:
pass
constants.DS_USER.chown(template_dir)
os.chmod(template_dir, 0o770)
# Restore SELinux context of template_dir
tasks.restore_context(template_dir)
args = [paths.DSCTL,
instance,
'ldif2db',
backend,
ldiffile]
result = run(args, raiseonerr=False)
if result.returncode != 0:
logger.critical("ldif2db failed: %s", result.error_log)
def bak2db(self, instance, backend, online=True):
'''
Restore a BAK backup of the data and changelog in this instance.
For offline restore backend is not used. All backends are restored.
If executed online create a task and wait for it to complete.
instance here is a loaded term. It can mean either a separate
389-ds install instance or a separate 389-ds backend. We only need
to treat ipaca specially.
'''
if backend is not None:
logger.info('Restoring %s in %s', backend, instance)
else:
logger.info('Restoring %s', instance)
cn = time.strftime('restore_%Y_%m_%d_%H_%M_%S')
dn = DN(('cn', cn), ('cn', 'restore'), ('cn', 'tasks'), ('cn', 'config'))
if online:
conn = self.get_connection()
ent = conn.make_entry(
dn,
{
'objectClass': ['top', 'extensibleObject'],
'cn': [cn],
'nsArchiveDir': [os.path.join(self.dir, instance)],
'nsDatabaseType': ['ldbm database'],
}
)
if backend is not None:
ent['nsInstance'] = [backend]
try:
conn.add_entry(ent)
except Exception as e:
raise admintool.ScriptError('Unable to bind to LDAP server: %s'
% e)
logger.info("Waiting for restore to finish")
wait_for_task(conn, dn)
else:
args = [paths.DSCTL,
instance,
'bak2db',
os.path.join(self.dir, instance)]
result = run(args, raiseonerr=False)
if result.returncode != 0:
logger.critical("bak2db failed: %s", result.error_log)
def restore_default_conf(self):
'''
Restore paths.IPA_DEFAULT_CONF to temporary directory.
Primary purpose of this method is to get configuration for api
finalization when restoring ipa after uninstall.
'''
args = ['tar',
'--xattrs',
'--selinux',
'-xzf',
os.path.join(self.dir, 'files.tar'),
paths.IPA_DEFAULT_CONF[1:],
]
result = run(args, raiseonerr=False, cwd=self.dir)
if result.returncode != 0:
logger.critical('Restoring %s failed: %s',
paths.IPA_DEFAULT_CONF, result.error_log)
def remove_old_files(self):
"""
Removes all directories, files or temporal files that should be
removed before backup files are copied, to prevent errors.
"""
for d in self.DIRS_TO_BE_REMOVED:
try:
shutil.rmtree(d)
except OSError as e:
if e.errno != 2: # 2: dir does not exist
logger.warning("Could not remove directory: %s (%s)", d, e)
for f in self.FILES_TO_BE_REMOVED:
try:
os.remove(f)
except OSError as e:
if e.errno != 2: # 2: file does not exist
logger.warning("Could not remove file: %s (%s)", f, e)
def clear_old_files(self):
"""
Replace exist files that cannot be removed with zero-length files
before backup
"""
for f in self.FILES_TO_BE_CLEARED:
if os.access(f, os.W_OK):
open(f, 'w').close()
else:
logger.warning('Could not open file for writing: %s', f)
def file_restore(self, nologs=False):
'''
Restore all the files in the tarball.
This MUST be done offline because we directly backup the 389-ds
databases.
'''
logger.info("Restoring files")
args = ['tar',
'--xattrs',
'--selinux',
'-xzf',
os.path.join(self.dir, 'files.tar')
]
if nologs:
args.append('--exclude')
args.append('var/log')
result = run(args, cwd='/', raiseonerr=False)
if result.returncode != 0:
logger.critical('Restoring files failed: %s', result.error_log)
def read_header(self):
'''
Read the backup file header that contains the meta data about
this particular backup.
'''
config = SafeConfigParser()
config.read(self.header)
self.backup_type = config.get('ipa', 'type')
self.backup_time = config.get('ipa', 'time')
self.backup_host = config.get('ipa', 'host')
self.backup_ipa_version = config.get('ipa', 'ipa_version')
self.backup_version = config.get('ipa', 'version')
# we can assume that returned object is string and it has .split()
# method
self.backup_services = config.get('ipa', 'services').split(',')
def extract_backup(self):
'''
Extract the contents of the tarball backup into a temporary location,
decrypting if necessary.
'''
encrypt = False
filename = None
if self.backup_type == 'FULL':
filename = os.path.join(self.backup_dir, 'ipa-full.tar')
else:
filename = os.path.join(self.backup_dir, 'ipa-data.tar')
if not os.path.exists(filename):
if not os.path.exists(filename + '.gpg'):
raise admintool.ScriptError('Unable to find backup file in %s' % self.backup_dir)
else:
filename = filename + '.gpg'
encrypt = True
if encrypt:
logger.info('Decrypting %s', filename)
filename = decrypt_file(self.dir, filename)
args = ['tar',
'--xattrs',
'--selinux',
'-xzf',
filename,
'.'
]
run(args, cwd=self.dir)
constants.DS_USER.chown(self.top_dir)
recursive_chown(
self.dir, constants.DS_USER.uid, constants.DS_USER.pgid
)
if encrypt:
# We can remove the decoded tarball
os.unlink(filename)
def __create_dogtag_log_dirs(self):
"""
If we are doing a full restore and the dogtag log directories do
not exist then tomcat will fail to start.
The directory is different depending on whether we have a d9-based
or a d10-based installation.
"""
dirs = []
# dogtag 10
if (os.path.exists(paths.VAR_LIB_PKI_TOMCAT_DIR) and
not os.path.exists(paths.TOMCAT_TOPLEVEL_DIR)):
dirs += [paths.TOMCAT_TOPLEVEL_DIR,
paths.TOMCAT_CA_DIR,
paths.TOMCAT_CA_ARCHIVE_DIR,
paths.TOMCAT_SIGNEDAUDIT_DIR]
try:
pent = constants.PKI_USER.entity
except KeyError:
logger.debug("No %s user exists, skipping CA directory creation",
constants.PKI_USER)
return
logger.debug('Creating log directories for dogtag')
for dir in dirs:
try:
logger.debug('Creating %s', dir)
os.mkdir(dir)
os.chmod(dir, 0o770)
os.chown(dir, pent.pw_uid, pent.pw_gid)
tasks.restore_context(dir)
except Exception as e:
# This isn't so fatal as to side-track the restore
logger.error('Problem with %s: %s', dir, e)
def restore_selinux_booleans(self):
bools = dict(constants.SELINUX_BOOLEAN_HTTPD)
if 'ADTRUST' in self.backup_services:
if adtrustinstance:
bools.update(constants.SELINUX_BOOLEAN_ADTRUST)
else:
logger.error(
'The AD trust package was not found, '
'not setting SELinux booleans.')
try:
tasks.set_selinux_booleans(bools)
except ipapython.errors.SetseboolError as e:
logger.error('%s', e)
def cert_restore_prepare(self):
cainstance.CAInstance().stop_tracking_certificates()
httpinstance.HTTPInstance().stop_tracking_certificates()
try:
dsinstance.DsInstance().stop_tracking_certificates(
ipaldap.realm_to_serverid(api.env.realm))
except (OSError, IOError):
# When IPA is not installed, DS NSS DB does not exist
pass
krbinstance.KrbInstance().stop_tracking_certs()
for basename in certdb.NSS_FILES:
filename = os.path.join(paths.IPA_NSSDB_DIR, basename)
try:
ipautil.backup_file(filename)
except OSError as e:
logger.error("Failed to backup %s: %s", filename, e)
tasks.remove_ca_certs_from_systemwide_ca_store()
def cert_restore(self):
try:
update_ipa_nssdb()
except RuntimeError as e:
logger.error("%s", e)
tasks.reload_systemwide_ca_store()
services.knownservices.certmonger.restart()
def init_api(self, **overrides):
overrides.setdefault('confdir', paths.ETC_IPA)
api.bootstrap(in_server=True, context='restore', **overrides)
api.finalize()
self.instances = [ipaldap.realm_to_serverid(api.env.realm)]
self.backends = ['userRoot', 'ipaca']
# no IPA config means we are reinstalling from nothing so
# there is nothing to test the DM password against.
if os.path.exists(paths.IPA_DEFAULT_CONF):
instance_name = ipapython.ipaldap.realm_to_serverid(api.env.realm)
if not services.knownservices.dirsrv.is_running(instance_name):
raise admintool.ScriptError(
"directory server instance is not running"
)
try:
ReplicationManager(api.env.realm, api.env.host,
self.dirman_password)
except errors.ACIError:
logger.error("Incorrect Directory Manager password provided")
raise
| 36,050
|
Python
|
.py
| 825
| 30.807273
| 113
| 0.56342
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,751
|
dogtag.py
|
freeipa_freeipa/ipaserver/install/dogtag.py
|
#
# Copyright (C) 2016 FreeIPA Contributors see COPYING for license
#
"""
Dogtag-based service installer module
"""
from ipalib.install import service
from ipalib.install.service import prepare_only, replica_install_only
from ipapython.install.core import knob
from ipaserver.install.dogtaginstance import PKIIniLoader
class DogtagInstallInterface(service.ServiceInstallInterface):
"""
Interface common to all Dogtag-based service installers
"""
ca_file = knob(
str, None,
description="location of CA PKCS#12 file",
cli_metavar='FILE',
)
ca_file = prepare_only(ca_file)
ca_file = replica_install_only(ca_file)
pki_config_override = knob(
str, None,
cli_names='--pki-config-override',
description="Path to ini file with config overrides.",
)
@pki_config_override.validator
def pki_config_override(self, value):
if value is not None:
PKIIniLoader.verify_pki_config_override(value)
| 999
|
Python
|
.py
| 30
| 28.333333
| 69
| 0.717256
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,752
|
krainstance.py
|
freeipa_freeipa/ipaserver/install/krainstance.py
|
# Authors: Ade Lee <alee@redhat.com>
#
# Copyright (C) 2014 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import
import logging
import os
import shutil
import tempfile
import base64
from ipalib import api
from ipalib import x509
from ipalib.constants import KRA_TRACKING_REQS
from ipaplatform.paths import paths
from ipapython import directivesetter
from ipapython import ipautil
from ipapython.dn import DN
from ipaserver.install import cainstance
from ipaserver.install import installutils
from ipaserver.install.dogtaginstance import DogtagInstance
from ipaserver.install.ca import (
lookup_random_serial_number_version,
lookup_hsm_configuration
)
logger = logging.getLogger(__name__)
# When IPA is installed with DNS support, this CNAME should hold all IPA
# replicas with KRA configured
IPA_KRA_RECORD = "ipa-kra"
ADMIN_GROUPS = [
'Enterprise CA Administrators',
'Enterprise KRA Administrators',
'Security Domain Administrators'
]
KRA_BASEDN = DN(('o', 'kra'), ('o', 'ipaca'))
KRA_AGENT_DN = DN(('uid', 'ipakra'), ('ou', 'people'), KRA_BASEDN)
class KRAInstance(DogtagInstance):
"""
We assume that the CA has already been installed, and we use the
same tomcat instance to host both the CA and KRA.
The mod_nss database will contain the RA agent cert that will be used
to do authenticated requests against dogtag. The RA agent cert will
be the same for both the CA and KRA.
"""
# Mapping of nicknames for tracking requests, and the profile to
# use for that certificate. 'configure_renewal()' reads this
# dict. The profile MUST be specified.
tracking_reqs = KRA_TRACKING_REQS
def __init__(self, realm):
super(KRAInstance, self).__init__(
realm=realm,
subsystem="KRA",
service_desc="KRA server",
config=paths.KRA_CS_CFG_PATH,
)
def uninstall(self):
DogtagInstance.uninstall(self)
ipautil.remove_file(paths.KRACERT_P12)
def configure_instance(self, realm_name, host_name, dm_password,
admin_password, pkcs12_info=None, master_host=None,
subject_base=None, ca_subject=None,
promote=False, pki_config_override=None,
token_password=None):
"""Create a KRA instance.
To create a clone, pass in pkcs12_info.
"""
self.fqdn = host_name
self.dm_password = dm_password
self.admin_groups = ADMIN_GROUPS
self.admin_password = admin_password
self.pkcs12_info = pkcs12_info
if self.pkcs12_info is not None or promote:
self.clone = True
self.master_host = master_host
self.pki_config_override = pki_config_override
# The remaining token values are available via sysrestore
self.token_password = token_password
self.subject_base = \
subject_base or installutils.default_subject_base(realm_name)
# eagerly convert to DN to ensure validity
self.ca_subject = DN(ca_subject)
self.realm = realm_name
self.suffix = ipautil.realm_to_suffix(realm_name)
# Confirm that a KRA does not already exist
if self.is_installed():
raise RuntimeError(
"KRA already installed.")
# Confirm that a Dogtag 10 CA instance already exists
ca = cainstance.CAInstance(self.realm)
if not ca.is_installed():
raise RuntimeError(
"KRA configuration failed. "
"A Dogtag CA must be installed first")
if promote:
self.step("creating ACIs for admin", self.add_ipaca_aci)
self.step("creating installation admin user", self.setup_admin)
self.step("configuring KRA instance", self.__spawn_instance)
if not self.clone:
self.step("create KRA agent",
self.__create_kra_agent)
if promote:
self.step("destroying installation admin user",
self.teardown_admin)
self.step("enabling ephemeral requests", self.enable_ephemeral)
self.step("restarting KRA", self.restart_instance)
self.step("configure certmonger for renewals",
self.configure_certmonger_renewal_helpers)
self.step("configure certificate renewals", self.configure_renewal)
if not self.clone:
self.step("add vault container", self.__add_vault_container)
self.step("apply LDAP updates", self.__apply_updates)
self.step("enabling KRA instance", self.__enable_instance)
try:
self.start_creation(runtime=120)
finally:
self.clean_pkispawn_files()
def __spawn_instance(self):
"""
Create and configure a new KRA instance using pkispawn.
Creates a configuration file with IPA-specific
parameters and passes it to the base class to call pkispawn
"""
self.tmp_agent_db = tempfile.mkdtemp(
prefix="tmp-", dir=paths.VAR_LIB_IPA)
tmp_agent_pwd = ipautil.ipa_generate_password()
# Create a temporary file for the admin PKCS #12 file
(admin_p12_fd, admin_p12_file) = tempfile.mkstemp()
os.close(admin_p12_fd)
cfg = dict(
pki_issuing_ca_uri="https://{}".format(
ipautil.format_netloc(self.fqdn, 443)),
# Client security database
pki_client_database_dir=self.tmp_agent_db,
pki_client_database_password=tmp_agent_pwd,
pki_client_database_purge=True,
pki_client_pkcs12_password=self.admin_password,
pki_import_admin_cert=False,
pki_client_admin_cert_p12=admin_p12_file,
)
if lookup_random_serial_number_version(api) > 0:
cfg['pki_key_id_generator'] = 'random'
cfg['pki_request_id_generator'] = 'random'
else:
cfg['pki_key_id_generator'] = 'legacy'
cfg['pki_request_id_generator'] = 'legacy'
if not (os.path.isdir(paths.PKI_TOMCAT_ALIAS_DIR) and
os.path.isfile(paths.PKI_TOMCAT_PASSWORD_CONF)):
# generate pin which we know can be used for FIPS NSS database
pki_pin = ipautil.ipa_generate_password()
cfg['pki_server_database_password'] = pki_pin
else:
pki_pin = None
ca = cainstance.CAInstance(self.realm)
if ca.hsm_enabled:
cfg['pki_hsm_enable'] = True
cfg['pki_token_name'] = ca.token_name
cfg['pki_token_password'] = self.token_password
cfg['pki_sslserver_token'] = 'internal'
# Require OAEP for nfast devices as they do not support
# PKCS1v15.
(_unused, token_library_path) = lookup_hsm_configuration(api)
if 'nfast' in token_library_path:
cfg['pki_use_oaep_rsa_keywrap'] = True
p12_tmpfile_name = None
if self.clone:
krafile = self.pkcs12_info[0]
if krafile:
_p12_tmpfile_handle, p12_tmpfile_name = tempfile.mkstemp(
dir=paths.TMP
)
shutil.copy(krafile, p12_tmpfile_name)
self.service_user.chown(p12_tmpfile_name)
self._configure_clone(
cfg,
security_domain_hostname=self.fqdn,
clone_pkcs12_path=p12_tmpfile_name,
)
cfg.update(
pki_clone_setup_replication=False,
)
else:
# the admin cert file is needed for the first instance of KRA
cert = self.get_admin_cert()
# First make sure that the directory exists
parentdir = os.path.dirname(paths.ADMIN_CERT_PATH)
if not os.path.exists(parentdir):
os.makedirs(parentdir)
with open(paths.ADMIN_CERT_PATH, "wb") as admin_path:
admin_path.write(
base64.b64encode(cert.public_bytes(x509.Encoding.DER))
)
# Generate configuration file
config = self._create_spawn_config(cfg)
with tempfile.NamedTemporaryFile('w', delete=False) as f:
config.write(f)
self.service_user.chown(f.fileno())
cfg_file = f.name
nolog_list = [
self.dm_password, self.admin_password, pki_pin, tmp_agent_pwd
]
try:
DogtagInstance.spawn_instance(
self, cfg_file,
nolog_list=nolog_list
)
finally:
if p12_tmpfile_name:
os.remove(p12_tmpfile_name)
os.remove(cfg_file)
os.remove(admin_p12_file)
if config.getboolean(
self.subsystem, 'pki_backup_keys', fallback=True
):
shutil.move(paths.KRA_BACKUP_KEYS_P12, paths.KRACERT_P12)
logger.debug("completed creating KRA instance")
def __create_kra_agent(self):
"""
Create KRA agent, assign a certificate, and add the user to
the appropriate groups for accessing KRA services.
"""
conn = api.Backend.ldap2
# get RA agent certificate
cert = x509.load_certificate_from_file(paths.RA_AGENT_PEM)
# create ipakra user with RA agent certificate
entry = conn.make_entry(
KRA_AGENT_DN,
objectClass=['top', 'person', 'organizationalPerson',
'inetOrgPerson', 'cmsuser'],
uid=["ipakra"],
sn=["IPA KRA User"],
cn=["IPA KRA User"],
usertype=["undefined"],
userCertificate=[cert],
description=['2;%s;%s;%s' % (
cert.serial_number,
self.ca_subject,
DN(('CN', 'IPA RA'), self.subject_base))])
conn.add_entry(entry)
# add ipakra user to Data Recovery Manager Agents group
group_dn = DN(
('cn', 'Data Recovery Manager Agents'), ('ou', 'groups'),
KRA_BASEDN)
conn.add_entry_to_group(KRA_AGENT_DN, group_dn, 'uniqueMember')
def __add_vault_container(self):
self._ldap_mod(
'vault.ldif', {'SUFFIX': self.suffix}, raise_on_err=True)
def __apply_updates(self):
self._ldap_update(['40-vault.update'])
def enable_ephemeral(self):
"""
Enable ephemeral KRA requests to reduce the number of LDAP
write operations.
"""
with installutils.stopped_service('pki-tomcatd', 'pki-tomcat'):
directivesetter.set_directive(
self.config,
'kra.ephemeralRequests',
'true', quotes=False, separator='=')
# A restart is required
def enable_oaep_wrap_algo(self):
"""
Enable KRA OAEP key wrap algorithm
"""
with installutils.stopped_service('pki-tomcatd', 'pki-tomcat'):
directivesetter.set_directive(
self.config,
'keyWrap.useOAEP',
'true', quotes=False, separator='=')
# A restart is required
def update_cert_config(self, nickname, cert):
"""
When renewing a KRA subsystem certificate the configuration file
needs to get the new certificate as well.
nickname is one of the known nicknames.
cert is a DER-encoded certificate.
"""
# The cert directive to update per nickname
directives = {
'auditSigningCert cert-pki-kra': 'kra.audit_signing.cert',
'storageCert cert-pki-kra': 'kra.storage.cert',
'transportCert cert-pki-kra': 'kra.transport.cert',
'subsystemCert cert-pki-kra': 'kra.subsystem.cert',
'Server-Cert cert-pki-ca': 'kra.sslserver.cert'}
if nickname in directives:
super(KRAInstance, self).update_cert_cs_cfg(
directives[nickname], cert)
def __enable_instance(self):
self.ldap_configure('KRA', self.fqdn, None, self.suffix)
| 12,835
|
Python
|
.py
| 301
| 32.601329
| 78
| 0.615557
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,753
|
dogtaginstance.py
|
freeipa_freeipa/ipaserver/install/dogtaginstance.py
|
# Authors: Ade Lee <alee@redhat.com>
#
# Copyright (C) 2014 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import
import base64
import logging
import time
import typing
import ldap
import os
import shutil
import traceback
import dbus
import re
import lxml.etree
from configparser import DEFAULTSECT, ConfigParser, RawConfigParser
import six
import pki
from pki.client import PKIConnection
import pki.system
import pki.util
from ipalib import api, errors, x509
from ipalib.install import certmonger
from ipalib.constants import CA_DBUS_TIMEOUT, IPA_CA_RECORD, RENEWAL_CA_NAME
from ipaplatform import services
from ipaplatform.constants import constants
from ipaplatform.paths import paths
from ipaplatform.tasks import tasks
from ipapython import directivesetter
from ipapython import ipaldap
from ipapython import ipautil
from ipapython.dn import DN
from ipaserver.install import service
from ipaserver.install import sysupgrade
from ipaserver.install import replication
from ipaserver.install.installutils import stopped_service
logger = logging.getLogger(__name__)
INTERNAL_TOKEN = "internal"
OU_GROUPS_DN = DN(('ou', 'groups'), ('o', 'ipaca'))
def _person_dn(uid):
return DN(('uid', uid), ('ou', 'people'), ('o', 'ipaca'))
def _group_dn(group):
return DN(('cn', group), OU_GROUPS_DN)
def get_security_domain():
"""
Get the security domain from the REST interface on the local Dogtag CA
This function will succeed if the local dogtag CA is up.
"""
connection = PKIConnection(
protocol='https',
hostname=api.env.ca_host,
port='8443',
cert_paths=paths.IPA_CA_CRT
)
domain_client = pki.system.SecurityDomainClient(connection)
info = domain_client.get_domain_info()
return info
def is_installing_replica(sys_type):
"""
We expect only one of each type of Dogtag subsystem in an IPA deployment.
That means that if a subsystem of the specified type has already been
deployed - and therefore appears in the security domain - then we must be
installing a replica.
"""
info = get_security_domain()
try:
sys_list = info.subsystems[sys_type]
return len(sys_list.hosts) > 0
except KeyError:
return False
class DogtagInstance(service.Service):
"""
This is the base class for a Dogtag 10+ instance, which uses a
shared tomcat instance and DS to host the relevant subsystems.
It contains functions that will be common to installations of the
CA, KRA, and eventually TKS and TPS.
"""
# Mapping of nicknames for tracking requests, and the profile to
# use for that certificate. 'configure_renewal()' reads this
# dict. The profile MUST be specified.
tracking_reqs = dict()
# HSM state is shared between CA and KRA
hsm_sstore = 'pki_hsm'
# override token for specific nicknames
token_names = dict()
def get_token_name(self, nickname):
"""Look up token name for nickname."""
return self.token_names.get(nickname, self.token_name)
groups_aci = (
b'(targetfilter="(objectClass=groupOfUniqueNames)")'
b'(targetattr="cn || description || objectclass || uniquemember")'
b'(version 3.0; acl "Allow users from o=ipaca to read groups"; '
b'allow (read, search, compare) '
b'userdn="ldap:///uid=*,ou=people,o=ipaca";)'
)
def __init__(self, realm, subsystem, service_desc, host_name=None,
nss_db=paths.PKI_TOMCAT_ALIAS_DIR, service_prefix=None,
config=None):
"""Initializer"""
super(DogtagInstance, self).__init__(
'pki-tomcatd',
service_desc=service_desc,
realm_name=realm,
service_user=constants.PKI_USER,
service_prefix=service_prefix
)
self.admin_password = None
self.fqdn = host_name
self.pkcs12_info = None
self.clone = False
self.basedn = None
self.admin_user = "admin"
self.admin_dn = _person_dn(self.admin_user)
self.admin_groups = None
self.tmp_agent_db = None
self.subsystem = subsystem
# replication parameters
self.master_host = None
self.master_replication_port = 389
self.nss_db = nss_db
self.config = config # Path to CS.cfg
# filled out by configure_instance
self.pki_config_override = None
self.ca_subject = None
self.subject_base = None
self.ajp_secret = None
def is_installed(self):
"""
Determine if subsystem instance has been installed.
Returns True/False
"""
try:
result = ipautil.run(
['pki-server', 'subsystem-show', self.subsystem.lower()],
capture_output=True)
# parse the command output
return 'Enabled: ' in result.output
except ipautil.CalledProcessError:
return False
def spawn_instance(self, cfg_file, nolog_list=()):
"""
Create and configure a new Dogtag instance using pkispawn.
Passes in a configuration file with IPA-specific
parameters.
"""
subsystem = self.subsystem
spawn_env = os.environ.copy()
timeout = str(api.env.startup_timeout)
spawn_env["PKISPAWN_STARTUP_TIMEOUT_SECONDS"] = timeout
args = [paths.PKISPAWN,
"-s", subsystem,
"-f", cfg_file,
"--debug"]
# specify --log-file <path> on PKI 11.0.0 or later
pki_version = pki.util.Version(pki.specification_version())
if pki_version >= pki.util.Version("11.0.0"):
timestamp = time.strftime(
"%Y%m%d%H%M%S",
time.localtime(time.time()))
log_file = os.path.join(
paths.VAR_LOG_PKI_DIR,
"pki-%s-spawn.%s.log" % (self.subsystem.lower(), timestamp))
args.extend(["--log-file", log_file])
with open(cfg_file) as f:
logger.debug(
'Contents of pkispawn configuration file (%s):\n%s',
cfg_file, ipautil.nolog_replace(f.read(), nolog_list))
try:
ipautil.run(args, nolog=nolog_list, env=spawn_env)
except ipautil.CalledProcessError as e:
self.handle_setup_error(e)
def clean_pkispawn_files(self):
if self.tmp_agent_db is not None:
logger.debug("Removing %s", self.tmp_agent_db)
shutil.rmtree(self.tmp_agent_db, ignore_errors=True)
client_dir = os.path.join(
'/root/.dogtag/pki-tomcat/', self.subsystem.lower())
logger.debug("Removing %s", client_dir)
shutil.rmtree(client_dir, ignore_errors=True)
def restart_instance(self):
self.restart('pki-tomcat')
def start_instance(self):
self.start('pki-tomcat')
def stop_instance(self):
try:
self.stop('pki-tomcat')
except Exception:
logger.debug("%s", traceback.format_exc())
logger.critical(
"Failed to stop the Dogtag instance."
"See the installation log for details.")
def enable_client_auth_to_db(self):
"""
Enable client auth connection to the internal db.
"""
sub_system_nickname = "subsystemCert cert-pki-ca"
if self.token_name != INTERNAL_TOKEN:
# TODO: Dogtag 10.6.9 does not like "internal" prefix.
sub_system_nickname = '{}:{}'.format(
self.token_name, sub_system_nickname
)
with stopped_service('pki-tomcatd', 'pki-tomcat'):
directivesetter.set_directive(
self.config,
'authz.instance.DirAclAuthz.ldap.ldapauth.authtype',
'SslClientAuth', quotes=False, separator='=')
directivesetter.set_directive(
self.config,
'authz.instance.DirAclAuthz.ldap.ldapauth.clientCertNickname',
sub_system_nickname, quotes=False, separator='=')
directivesetter.set_directive(
self.config,
'authz.instance.DirAclAuthz.ldap.ldapconn.port', '636',
quotes=False, separator='=')
directivesetter.set_directive(
self.config,
'authz.instance.DirAclAuthz.ldap.ldapconn.secureConn',
'true', quotes=False, separator='=')
directivesetter.set_directive(
self.config,
'internaldb.ldapauth.authtype',
'SslClientAuth', quotes=False, separator='=')
directivesetter.set_directive(
self.config,
'internaldb.ldapauth.clientCertNickname',
sub_system_nickname, quotes=False, separator='=')
directivesetter.set_directive(
self.config,
'internaldb.ldapconn.port', '636', quotes=False, separator='=')
directivesetter.set_directive(
self.config,
'internaldb.ldapconn.secureConn', 'true', quotes=False,
separator='=')
# Remove internaldb password as is not needed anymore
directivesetter.set_directive(paths.PKI_TOMCAT_PASSWORD_CONF,
'internaldb', None, separator='=')
def uninstall(self):
if self.is_installed():
self.print_msg("Unconfiguring %s" % self.subsystem)
args = []
pki_version = pki.util.Version(pki.specification_version())
if self.subsystem == "ACME":
if pki_version < pki.util.Version("11.0.0"):
return
elif (
pki.util.Version("11.0.0") <= pki_version
< pki.util.Version("11.6.0")
):
args = ['pki-server', 'acme-remove']
else:
# fall through for PKI >= 11.6.0
pass
if not args:
args = [paths.PKIDESTROY,
"-i", "pki-tomcat", "--force",
"-s", self.subsystem]
# specify --log-file <path> on PKI 11.0.0 or later
if pki_version >= pki.util.Version("11.0.0"):
timestamp = time.strftime(
"%Y%m%d%H%M%S",
time.localtime(time.time()))
log_file = os.path.join(
paths.VAR_LOG_PKI_DIR,
"pki-%s-destroy.%s.log" %
(self.subsystem.lower(), timestamp))
args.extend(["--log-file", log_file])
if pki_version >= pki.util.Version("11.6.0"):
args.extend(["--remove-conf", "--remove-logs"])
try:
ipautil.run(args)
except ipautil.CalledProcessError as e:
logger.critical("failed to uninstall %s instance %s",
self.subsystem, e)
def _is_newer_tomcat_version(self, default=None):
try:
result = ipautil.run([paths.BIN_TOMCAT, "version"],
capture_output=True)
sn = re.search(
r'Server number:\s+([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)',
result.output)
if sn is None:
logger.info("tomcat version cannot be parsed, "
"default to pre-%s", default)
return False
v = tasks.parse_ipa_version(sn.group(1))
if v >= tasks.parse_ipa_version(default):
return True
except ipautil.CalledProcessError as e:
logger.info(
"failed to discover tomcat version, "
"default to pre-%s, error: %s",
default, str(e))
return False
def secure_ajp_connector(self):
""" Update AJP connector to use a password protection """
server_xml = lxml.etree.parse(paths.PKI_TOMCAT_SERVER_XML)
doc = server_xml.getroot()
# no AJP connector means no need to update anything
connectors = doc.xpath('//Connector[@protocol="AJP/1.3"]')
if len(connectors) == 0:
return False
# Whether or not we should rewrite the tomcat server.xml file with
# our changes.
rewrite = False
# Detect tomcat version and choose the right option name
# pre-9.0.31.0 uses 'requiredSecret'
# 9.0.31.0 or later uses 'secret'
secretattr = 'requiredSecret'
oldattr = 'requiredSecret'
if self._is_newer_tomcat_version('9.0.31.0'):
secretattr = 'secret'
# AJP protocol is at version 1.3. With IPv4/IPv6 split, there might
# be multiple AJP adapters; update them all.
#
# First, iterate through all adapters and see if any of them have a
# secret value set.
for connector in connectors:
if not self._is_newer_tomcat_version('9.0.31.0'):
# For a time pki unconditionally added "secret" to the
# connector. Remove it prior to probing for the current
# secret.
if 'secret' in connector.attrib:
del connector.attrib['secret']
rewrite = True
if secretattr in connector.attrib or oldattr in connector.attrib:
# secret is already in place
#
# Perhaps, we need to synchronize it with Apache configuration
# or other AJP connector entries. Save it so we know we've
# found at least one. Because in our next loop we update the
# config value if incorrect, it is safe to overwrite
# self.ajp_adapter -- in the worst case, we'll create an
# entirely new value if this element happened to have an
# empty secret value. Plus, IPA is in charge of managing the
# value for the httpd side of the AJP connection as well
# which needs to happen after this call.
#
# The first secret found wins.
self.ajp_secret = connector.attrib.get(secretattr) or \
connector.attrib.get(oldattr)
break
# If no secret value was detected, create a single unique value.
if not self.ajp_secret:
# Generate password, don't use special chars to not break XML.
self.ajp_secret = ipautil.ipa_generate_password(special=None)
# Finally, iterate through them all again, upgrading adapter attribute
# and setting the secret value if missing or incorrect.
for connector in connectors:
if oldattr != secretattr and oldattr in connector.attrib:
# Sufficiently new Dogtag versions (10.9.0-a2) handle the
# upgrade for us; we need only to ensure that we're not both
# attempting to upgrade server.xml at the same time.
# Hopefully this is guaranteed for us.
connector.attrib[secretattr] = self.ajp_secret
del connector.attrib[oldattr]
rewrite = True
if (secretattr not in connector.attrib
or connector.attrib[secretattr] != self.ajp_secret):
# We hit this either when:
#
# 1. pkispawn was run on an older Dogtag version, or
# 2. there were multiple AJP adapters with mismatched
# secrets.
#
# Newer Dogtag versions will generate a random password
# during pkispawn. In the former scenario, it is always
# safe to change the AJP secret value. In the latter
# scenario we should always ensure the AJP connector is
# the one we use use with httpd, as we don't officially
# support multiple AJP adapters for non-IPA uses.
#
# In new Dogtag versions, Dogtag deploys separate IPv4 and
# IPv6 localhost adapters, which we should ensure have the
# same AJP secret for httpd's use.
connector.attrib[secretattr] = self.ajp_secret
rewrite = True
if rewrite:
with open(paths.PKI_TOMCAT_SERVER_XML, "wb") as fd:
server_xml.write(fd, pretty_print=True, encoding="utf-8")
os.fchmod(fd.fileno(), 0o660)
self.service_user.chown(fd.fileno())
return rewrite
def http_proxy(self):
""" Update the http proxy file """
template_filename = (
os.path.join(paths.USR_SHARE_IPA_DIR,
"ipa-pki-proxy.conf.template"))
sub_dict = dict(
DOGTAG_PORT=8009,
CLONE='' if self.clone else '#',
FQDN=self.fqdn,
DOGTAG_AJP_SECRET='',
)
if self.ajp_secret:
sub_dict['DOGTAG_AJP_SECRET'] = "secret={}".format(self.ajp_secret)
template = ipautil.template_file(template_filename, sub_dict)
with open(paths.HTTPD_IPA_PKI_PROXY_CONF, "w") as fd:
fd.write(template)
os.fchmod(fd.fileno(), 0o640)
@staticmethod
def configure_certmonger_renewal_helpers():
"""
Create a new CA type for certmonger that will retrieve updated
certificates from the dogtag master server.
"""
cmonger = services.knownservices.certmonger
cmonger.enable()
if not services.knownservices.dbus.is_running():
# some platforms protect dbus with RefuseManualStart=True
services.knownservices.dbus.start()
cmonger.start()
bus = dbus.SystemBus()
obj = bus.get_object('org.fedorahosted.certmonger',
'/org/fedorahosted/certmonger')
iface = dbus.Interface(obj, 'org.fedorahosted.certmonger')
for suffix, args in [
('', ''),
('-reuse', ' --reuse-existing'),
('-selfsigned', ' --force-self-signed'),
]:
name = RENEWAL_CA_NAME + suffix
path = iface.find_ca_by_nickname(name)
if not path:
command = paths.DOGTAG_IPA_CA_RENEW_AGENT_SUBMIT + args
iface.add_known_ca(
name,
command,
dbus.Array([], dbus.Signature('s')),
# Give dogtag extra time to generate cert
timeout=CA_DBUS_TIMEOUT)
def __get_pin(self, token_name=INTERNAL_TOKEN):
try:
return certmonger.get_pin(token_name)
except IOError as e:
logger.debug(
'Unable to determine PIN for the Dogtag instance: %s', e)
raise RuntimeError(e)
def configure_renewal(self):
""" Configure certmonger to renew system certs """
if self.hsm_enabled:
nss_user = constants.PKI_USER
else:
nss_user = None
for nickname, profile in self.tracking_reqs.items():
token_name = self.get_token_name(nickname)
pin = self.__get_pin(token_name)
try:
certmonger.start_tracking(
certpath=self.nss_db,
ca=RENEWAL_CA_NAME,
nickname=nickname,
token_name=token_name,
pin=pin,
pre_command='stop_pkicad',
post_command='renew_ca_cert "%s"' % nickname,
profile=profile,
nss_user=nss_user,
)
except RuntimeError as e:
logger.error(
"certmonger failed to start tracking certificate: %s", e)
def stop_tracking_certificates(self):
"""
Stop tracking our certificates. Called on uninstall. Also called
during upgrade to fix discrepancies.
"""
logger.debug(
"Configuring certmonger to stop tracking system certificates "
"for %s", self.subsystem)
cmonger = services.knownservices.certmonger
if not services.knownservices.dbus.is_running():
# some platforms protect dbus with RefuseManualStart=True
services.knownservices.dbus.start()
cmonger.start()
for nickname in self.tracking_reqs:
try:
certmonger.stop_tracking(
self.nss_db, nickname=nickname)
except RuntimeError as e:
logger.error(
"certmonger failed to stop tracking certificate: %s", e)
def update_cert_cs_cfg(self, directive, cert):
"""
When renewing a Dogtag subsystem certificate the configuration file
needs to get the new certificate as well.
``directive`` is the directive to update in CS.cfg
cert is IPACertificate.
cs_cfg is the path to the CS.cfg file
"""
with stopped_service('pki-tomcatd', 'pki-tomcat'):
directivesetter.set_directive(
self.config,
directive,
# the cert must be only the base64 string without headers
(base64.b64encode(cert.public_bytes(x509.Encoding.DER))
.decode('ascii')),
quotes=False,
separator='=')
def get_admin_cert(self):
"""
Get the certificate for the admin user by checking the ldap entry
for the user. There should be only one certificate per user.
"""
logger.debug('Trying to find the certificate for the admin user')
conn = None
try:
conn = ipaldap.LDAPClient.from_realm(self.realm)
conn.external_bind()
entry_attrs = conn.get_entry(self.admin_dn, ['usercertificate'])
admin_cert = entry_attrs.get('usercertificate')[0]
# TODO(edewata) Add check to warn if there is more than one cert.
finally:
if conn is not None:
conn.unbind()
return admin_cert
def handle_setup_error(self, e):
logger.critical("Failed to configure %s instance",
self.subsystem)
logger.critical("See the installation logs and the following "
"files/directories for more information:")
logger.critical(" %s", paths.TOMCAT_TOPLEVEL_DIR)
raise RuntimeError(
"%s configuration failed." % self.subsystem
) from None
def add_ipaca_aci(self):
"""Add ACI to allow ipaca users to read their own group information
Dogtag users aren't allowed to enumerate their own groups. The
setup_admin() method needs the permission to wait, until all group
information has been replicated.
"""
dn = OU_GROUPS_DN
mod = [(ldap.MOD_ADD, 'aci', [self.groups_aci])]
try:
api.Backend.ldap2.modify_s(dn, mod)
except ldap.TYPE_OR_VALUE_EXISTS:
logger.debug("%s already has ACI to read group information", dn)
else:
logger.debug("Added ACI to read groups to %s", dn)
@staticmethod
def ensure_group(group: str, desc: str) -> None:
"""Create the group if it does not exist."""
dn = _group_dn(group)
entry = api.Backend.ldap2.make_entry(
dn,
objectclass=["top", "groupOfUniqueNames"],
cn=[group],
description=[desc],
)
try:
api.Backend.ldap2.add_entry(entry)
except errors.DuplicateEntry:
pass
@staticmethod
def create_user(
uid: str,
cn: str,
sn: str,
user_type: str,
groups: typing.Collection[str],
force: bool,
) -> typing.Optional[str]:
"""
Create the user entry with a random password, and add the user to
the given groups.
If such a user entry already exists, ``force`` determines whether the
existing entry is replaced, or if the operation fails.
**Does not wait for replication**. This should be done by caller,
if necessary.
Return the password if entry was created, otherwise ``None``.
"""
user_types = {'adminType', 'agentType'}
if user_type not in user_types:
raise ValueError(f"user_type must be in {user_types}")
# if entry already exists, delete (force=True) or fail
dn = _person_dn(uid)
try:
api.Backend.ldap2.get_entry(dn, ['uid'])
except errors.NotFound:
pass
else:
if force:
api.Backend.ldap2.delete_entry(dn)
else:
return None
# add user
password = ipautil.ipa_generate_password()
entry = api.Backend.ldap2.make_entry(
dn,
objectclass=[
"top", "person", "organizationalPerson",
"inetOrgPerson", "cmsuser",
],
uid=[uid],
cn=[cn],
sn=[sn],
usertype=[user_type],
userPassword=[password],
userstate=['1'],
)
api.Backend.ldap2.add_entry(entry)
# add to groups
for group in groups:
mod = [(ldap.MOD_ADD, 'uniqueMember', [dn])]
try:
api.Backend.ldap2.modify_s(_group_dn(group), mod)
except ldap.TYPE_OR_VALUE_EXISTS:
pass # already there, somehow
return password
@staticmethod
def delete_user(uid: str) -> bool:
"""
Delete the user, removing group memberships along the way.
Return True if user was deleted or False if user entry
did not exist.
"""
dn = _person_dn(uid)
if not api.Backend.ldap2.isconnected():
api.Backend.ldap2.connect()
# remove group memberships
try:
entries = api.Backend.ldap2.get_entries(
OU_GROUPS_DN, filter=f'(uniqueMember={dn})')
except errors.EmptyResult:
entries = []
except errors.NotFound:
# basedn not found; Dogtag is probably not installed.
# Let's ignore this and keep going.
entries = []
for entry in entries:
# remove the uniquemember value
entry['uniquemember'] = [
v for v in entry['uniquemember']
if DN(v) != dn
]
api.Backend.ldap2.update_entry(entry)
# delete user entry
try:
api.Backend.ldap2.delete_entry(dn)
except errors.NotFound:
return False
else:
return True
def setup_admin(self):
self.admin_user = "admin-%s" % self.fqdn
self.admin_password = ipautil.ipa_generate_password()
self.admin_dn = _person_dn(self.admin_user)
result = self.create_user(
uid=self.admin_user,
cn=self.admin_user,
sn=self.admin_user,
user_type='adminType',
groups=self.admin_groups,
force=True,
)
if result is None:
return None # something went wrong
else:
self.admin_password = result
# Now wait until the other server gets replicated this data
master_conn = ipaldap.LDAPClient.from_hostname_secure(
self.master_host
)
logger.debug(
"Waiting %s seconds for %s to appear on %s",
api.env.replication_wait_timeout, self.admin_dn, master_conn
)
deadline = time.time() + api.env.replication_wait_timeout
while time.time() < deadline:
time.sleep(1)
try:
master_conn.simple_bind(self.admin_dn, self.admin_password)
except errors.ACIError:
# user not replicated yet
pass
else:
logger.debug("Successfully logged in as %s", self.admin_dn)
break
else:
logger.error(
"Unable to log in as %s on %s", self.admin_dn, master_conn
)
logger.info(
"[hint] tune with replication_wait_timeout"
)
raise errors.NotFound(
reason="{} did not replicate to {}".format(
self.admin_dn, master_conn
)
)
# wait for group membership
for group_dn in (_group_dn(group) for group in self.admin_groups):
replication.wait_for_entry(
master_conn,
group_dn,
timeout=api.env.replication_wait_timeout,
attr='uniqueMember',
attrvalue=self.admin_dn
)
def teardown_admin(self):
self.delete_user(self.admin_user)
def backup_config(self):
"""
Create a backup copy of CS.cfg
"""
config = self.config
bak = config + '.ipabkp'
if services.knownservices['pki_tomcatd'].is_running('pki-tomcat'):
raise RuntimeError(
"Dogtag must be stopped when creating backup of %s" % config)
shutil.copy(config, bak)
# shutil.copy() doesn't copy owner
s = os.stat(config)
os.chown(bak, s.st_uid, s.st_gid)
def reindex_task(self, force=False):
"""Reindex ipaca entries
pkispawn sometimes does not run its indextasks. This leads to slow
unindexed filters on attributes such as description, which is used
to log in with a certificate. Explicitly reindex attribute that
should have been reindexed by CA's indextasks.ldif.
See https://pagure.io/dogtagpki/issue/3083
"""
state_name = 'reindex_task'
if not force and sysupgrade.get_upgrade_state('dogtag', state_name):
return
cn = "indextask_ipaca_{}".format(int(time.time()))
dn = DN(
('cn', cn), ('cn', 'index'), ('cn', 'tasks'), ('cn', 'config')
)
entry = api.Backend.ldap2.make_entry(
dn,
objectClass=['top', 'extensibleObject'],
cn=[cn],
nsInstance=['ipaca'], # Dogtag PKI database
nsIndexAttribute=[
# from pki/base/ca/shared/conf/indextasks.ldif
'archivedBy', 'certstatus', 'clientId', 'dataType',
'dateOfCreate', 'description', 'duration', 'extension',
'issuedby', 'issuername', 'metaInfo', 'notafter',
'notbefore', 'ownername', 'publicKeyData', 'requestid',
'requestowner', 'requestsourceid', 'requeststate',
'requesttype', 'revInfo', 'revokedOn', 'revokedby',
'serialno', 'status', 'subjectname',
],
ttl=[10],
)
logger.debug('Creating ipaca reindex task %s', dn)
api.Backend.ldap2.add_entry(entry)
logger.debug('Waiting for task...')
exitcode = replication.wait_for_task(api.Backend.ldap2, dn)
logger.debug(
'Task %s has finished with exit code %i',
dn, exitcode
)
sysupgrade.set_upgrade_state('dogtag', state_name, True)
def set_hsm_state(self, config):
section_name = self.subsystem.upper()
assert section_name == 'CA'
if config.getboolean(section_name, 'pki_hsm_enable', fallback=False):
enable = True
token_name = config.get(section_name, 'pki_token_name')
else:
enable = False
token_name = INTERNAL_TOKEN
self.sstore.backup_state(self.hsm_sstore, "enabled", enable)
self.sstore.backup_state(self.hsm_sstore, "token_name", token_name)
def restore_hsm_state(self):
return (
self.sstore.restore_state(self.hsm_sstore, "enabled"),
self.sstore.restore_state(self.hsm_sstore, "token_name"),
)
@property
def hsm_enabled(self):
"""Is HSM support enabled?"""
return self.sstore.get_state(self.hsm_sstore, "enabled")
@property
def token_name(self):
"""HSM token name"""
return self.sstore.get_state(self.hsm_sstore, "token_name")
def _configure_clone(self, subsystem_config, security_domain_hostname,
clone_pkcs12_path):
subsystem_config.update(
# Security domain registration
pki_security_domain_hostname=security_domain_hostname,
pki_security_domain_https_port=443,
pki_security_domain_user=self.admin_user,
pki_security_domain_password=self.admin_password,
# Clone
pki_clone=True,
pki_clone_replication_security="TLS",
pki_clone_replication_master_port=self.master_replication_port,
pki_clone_replication_clone_port=389,
pki_clone_replicate_schema=False,
pki_clone_uri="https://%s" % ipautil.format_netloc(
self.master_host, 443),
)
if clone_pkcs12_path:
subsystem_config.update(
pki_clone_pkcs12_path=clone_pkcs12_path,
pki_clone_pkcs12_password=self.dm_password,
)
def _create_spawn_config(self, subsystem_config):
loader = PKIIniLoader(
subsystem=self.subsystem,
fqdn=self.fqdn,
domain=api.env.domain,
subject_base=self.subject_base,
ca_subject=self.ca_subject,
admin_user=self.admin_user,
admin_password=self.admin_password,
dm_password=self.dm_password,
pki_config_override=self.pki_config_override
)
return loader.create_spawn_config(subsystem_config)
class PKIIniLoader:
# supported subsystems
subsystems = ('CA', 'KRA')
# default, hard-coded, and immutable settings
ipaca_default = os.path.join(
paths.USR_SHARE_IPA_DIR, 'ipaca_default.ini'
)
# customizable settings
ipaca_customize = os.path.join(
paths.USR_SHARE_IPA_DIR, 'ipaca_customize.ini'
)
# keys that may be stored in a HSM token
token_stanzas = (
'pki_audit_signing_token',
'pki_subsystem_token',
'pki_ca_signing_token',
'pki_ocsp_signing_token',
'pki_storage_token',
'pki_transport_token',
)
# Set of immutable keys, initialized on demand
_immutable_keys = None
# Set of immutable config keys that are defined in dynamic code instead
# of ipaca_default config file.
_immutable_code_keys = frozenset({
# dogtaginstance
'pki_admin_password',
'pki_ds_password',
'pki_dns_domainname',
'pki_hostname',
'pki_subsystem',
'pki_subsystem_type',
# clone settings
'pki_security_domain_hostname',
'pki_security_domain_https_port',
'pki_security_domain_user',
'pki_security_domain_password',
'pki_clone',
'pki_clone_pkcs12_path',
'pki_clone_pkcs12_password',
'pki_clone_replication_security',
'pki_clone_replication_master_port',
'pki_clone_replication_clone_port',
'pki_clone_replicate_schema',
'pki_clone_uri',
# cainstance
'pki_ds_secure_connection',
'pki_server_database_password',
'pki_ds_create_new_db',
'pki_clone_setup_replication',
'pki_clone_reindex_data',
'pki_external',
'pki_ca_signing_csr_path',
'pki_ca_signing_cert_path',
'pki_cert_chain_path',
'pki_external_step_two',
# krainstance
'pki_issuing_ca_uri',
'pki_client_database_dir',
'pki_client_database_password',
'pki_client_database_purge',
'pki_client_pkcs12_password',
'pki_import_admin_cert',
'pki_client_admin_cert_p12',
})
def __init__(self, subsystem, fqdn, domain,
subject_base, ca_subject, admin_user, admin_password,
dm_password, pki_config_override=None):
self.pki_config_override = pki_config_override
self.defaults = dict(
# pretty much static
ipa_ca_pem_file=paths.IPA_CA_CRT,
pki_configuration_path=paths.PKI_CONFIGURATION,
# variable
ipa_ca_subject=ca_subject,
ipa_subject_base=subject_base,
ipa_fqdn=fqdn,
ipa_ocsp_uri="http://{}.{}/ca/ocsp".format(
IPA_CA_RECORD, ipautil.format_netloc(domain)),
ipa_admin_cert_p12=paths.DOGTAG_ADMIN_P12,
ipa_admin_user=admin_user,
pki_admin_password=admin_password,
pki_ds_password=dm_password,
# Dogtag's pkiparser defines these config vars by default:
pki_dns_domainname=domain,
pki_hostname=fqdn,
pki_subsystem=subsystem.upper(),
pki_subsystem_type=subsystem.lower(),
home_dir=os.path.expanduser("~"),
# for softhsm2 testing
softhsm2_so=paths.LIBSOFTHSM2_SO,
# Configure a more secure AJP password by default
ipa_ajp_secret=ipautil.ipa_generate_password(special=None),
# in FIPS mode use RSA-OAEP wrapping padding algo as default
fips_use_oaep_rsa_keywrap=tasks.is_fips_enabled()
)
@classmethod
def get_immutable_keys(cls):
"""Get set of immutable keys
Immutable keys are calculated from 'ipaca_default' config file
and known keys that are defined in code.
"""
if cls._immutable_keys is None:
immutable = set()
immutable.update(cls._immutable_code_keys)
cfg = RawConfigParser()
with open(cls.ipaca_default) as f:
cfg.read_file(f)
for section in cls.subsystems:
for k, _v in cfg.items(section, raw=True):
if k.startswith('pki_'):
immutable.add(k)
cls._immutable_keys = frozenset(immutable)
return cls._immutable_keys
@classmethod
def verify_pki_config_override(cls, filename):
"""Verify pki config override file
* filename must be an absolute path to an existing file
* file must be a valid ini file
* ini file must not override immutable settings
TODO: The checker does not verify config interpolation values, yet.
The validator does not have access to all settings.
:param filename: path to pki.ini
"""
if not os.path.isfile(filename):
raise ValueError(
"Config file '{}' does not exist.".format(filename)
)
if not os.path.isabs(filename):
raise ValueError(
"Config file '{}' is not an absolute path.".format(filename)
)
try:
cfg = RawConfigParser()
with open(filename) as f:
cfg.read_file(f)
except Exception as e:
raise ValueError(
"Invalid config '{}': {}".format(filename, e)
)
immutable_keys = cls.get_immutable_keys()
invalid_keys = set()
sections = [cfg.default_section]
sections.extend(cls.subsystems)
for section in sections:
if not cfg.has_section(section):
continue
for k, _v in cfg.items(section, raw=True):
if k in immutable_keys:
invalid_keys.add(k)
if invalid_keys:
raise ValueError(
"'{}' overrides immutable options: {}".format(
filename, ', '.join(sorted(invalid_keys))
)
)
def _mangle_values(self, dct):
"""Stringify and quote % as %% to avoid interpolation errors
* booleans are converted to 'True', 'False'
* DN and numbers are converted to string
* None is turned into empty string ''
"""
result = {}
for k, v in dct.items():
if isinstance(v, (DN, bool, six.integer_types)):
v = six.text_type(v)
elif v is None:
v = ''
result[k] = v.replace('%', '%%')
return result
def _get_default_config(self):
"""Load default config
:return: config parser, immutable keys
"""
defaults = self._mangle_values(self.defaults)
# create a config template with interpolation support
# read base config
cfgtpl = ConfigParser(defaults=defaults)
cfgtpl.optionxform = str
with open(self.ipaca_default) as f:
cfgtpl.read_file(f)
# overwrite defaults with our defaults
for key, value in defaults.items():
cfgtpl.set(DEFAULTSECT, key, value)
# all keys in default conf + known keys defined in code are
# considered immutable.
immutable_keys = set()
immutable_keys.update(self._immutable_code_keys)
for section_name in self.subsystems:
for k, _v in cfgtpl.items(section_name, raw=True):
immutable_keys.add(k)
return cfgtpl, immutable_keys
def _verify_immutable(self, config, immutable_settings, filename):
section_name = self.defaults['pki_subsystem']
errs = []
for key, isvalue in immutable_settings.items():
cfgvalue = config.get(section_name, key)
if isvalue != cfgvalue:
errs.append(f"{key}: '{cfgvalue}' != '{isvalue}'")
if errs:
raise ValueError(
'{} overrides immutable options:\n{}'.format(
filename, '\n'.join(errs)
)
)
def create_spawn_config(self, subsystem_config):
"""Create config instance
"""
section_name = self.defaults['pki_subsystem']
cfgtpl, immutable_keys = self._get_default_config()
# overwrite CA/KRA config with subsystem settings
subsystem_config = self._mangle_values(subsystem_config)
for key, value in subsystem_config.items():
cfgtpl.set(section_name, key, value)
# get a mapping of settings that cannot be modified by users
immutable_settings = {
k: v for k, v in cfgtpl.items(section_name)
if k in immutable_keys
}
# add ipaca_customize overlay,
# These are settings that can be modified by a user, too. We use
# ipaca_customize.ini to set sensible defaults.
with open(self.ipaca_customize) as f:
cfgtpl.read_file(f)
# load external overlay from command line
if self.pki_config_override is not None:
with open(self.pki_config_override) as f:
cfgtpl.read_file(f)
# verify again
self._verify_immutable(
cfgtpl, immutable_settings, self.pki_config_override
)
# key backup is not compatible with HSM support
if cfgtpl.getboolean(section_name, 'pki_hsm_enable', fallback=False):
cfgtpl.set(section_name, 'pki_backup_keys', 'False')
cfgtpl.set(section_name, 'pki_backup_password', '')
pki_token_name = cfgtpl.get(section_name, 'pki_token_name')
for stanza in self.token_stanzas:
if cfgtpl.has_option(section_name, stanza):
cfgtpl.set(section_name, stanza, pki_token_name)
# Next up, get rid of interpolation variables, DEFAULT,
# irrelevant sections and unused variables. Only the subsystem
# section is copied into a new raw config parser. A raw config
# parser is necessary, because ConfigParser.write() write passwords
# with '%' in a way, that is not accepted by Dogtag.
config = RawConfigParser()
config.optionxform = str
config.add_section(section_name)
for key, value in sorted(cfgtpl.items(section=section_name)):
if key.startswith('pki_'):
config.set(section_name, key, value)
return config
def test():
import sys
sharedir = os.path.abspath(os.path.join(
os.path.dirname(os.path.join(__file__)),
os.pardir,
os.pardir,
'install',
'share',
))
class TestPKIIniLoader(PKIIniLoader):
ipaca_default = os.path.join(sharedir, 'ipaca_default.ini')
ipaca_customize = os.path.join(sharedir, 'ipaca_customize.ini')
override = os.path.join(sharedir, 'ipaca_softhsm2.ini')
base_settings = dict(
fqdn='replica.ipa.example',
domain='ipa.example',
subject_base='o=IPA,o=EXAMPLE',
ca_subject='cn=CA,o=IPA,o=EXAMPLE',
admin_user='admin',
admin_password='Secret1',
dm_password='Secret2',
pki_config_override=override,
)
for subsystem in TestPKIIniLoader.subsystems:
print('-' * 78)
loader = TestPKIIniLoader(subsystem=subsystem, **base_settings)
loader.verify_pki_config_override(loader.ipaca_customize)
loader.verify_pki_config_override(override)
config = loader.create_spawn_config({})
config.write(sys.stdout, False)
if __name__ == '__main__':
test()
| 46,232
|
Python
|
.py
| 1,112
| 30.516187
| 79
| 0.584924
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,754
|
ipa_server_install.py
|
freeipa_freeipa/ipaserver/install/ipa_server_install.py
|
#
# Copyright (C) 2015 FreeIPA Contributors see COPYING for license
#
from __future__ import absolute_import
from ipapython.install import cli
from ipapython.install.core import extend_knob
from ipaplatform.paths import paths
from ipaserver.install.server import ServerMasterInstall
class CompatServerMasterInstall(ServerMasterInstall):
all_ip_addresses = False
nisdomain = None
no_nisdomain = False
no_sudo = False
request_cert = False
dm_password = extend_knob(
ServerMasterInstall.dm_password,
cli_names=['--ds-password', '-p'],
)
admin_password = ServerMasterInstall.admin_password
admin_password = extend_knob(
admin_password,
cli_names=list(admin_password.cli_names) + ['-a'],
)
ip_addresses = extend_knob(
ServerMasterInstall.ip_addresses,
description="Master Server IP Address. This option can be used "
"multiple times",
)
ServerInstall = cli.install_tool(
CompatServerMasterInstall,
command_name='ipa-server-install',
log_file_name=paths.IPASERVER_INSTALL_LOG,
console_format='%(message)s',
debug_option=True,
verbose=True,
uninstall_log_file_name=paths.IPASERVER_UNINSTALL_LOG,
)
def run():
ServerInstall.run_cli()
| 1,286
|
Python
|
.py
| 39
| 27.897436
| 72
| 0.718447
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,755
|
odsexporterinstance.py
|
freeipa_freeipa/ipaserver/install/odsexporterinstance.py
|
#
# Copyright (C) 2014 FreeIPA Contributors see COPYING for license
#
from __future__ import absolute_import
import logging
import os
import ldap
from ipaserver.install import service
from ipaserver.install import installutils
from ipapython.dn import DN
from ipapython import directivesetter
from ipapython import ipautil
from ipaplatform.constants import constants
from ipaplatform.paths import paths
from ipaplatform import services
from ipalib import errors, api
logger = logging.getLogger(__name__)
class ODSExporterInstance(service.Service):
def __init__(self, fstore=None):
super(ODSExporterInstance, self).__init__(
"ipa-ods-exporter",
service_desc="IPA OpenDNSSEC exporter daemon",
fstore=fstore,
keytab=paths.IPA_ODS_EXPORTER_KEYTAB,
service_prefix=u'ipa-ods-exporter'
)
self.enable_if_exists = False
suffix = ipautil.dn_attribute_property('_suffix')
def create_instance(self, fqdn, realm_name):
self.backup_state("enabled", self.is_enabled())
self.backup_state("running", self.is_running())
self.fqdn = fqdn
self.realm = realm_name
self.suffix = ipautil.realm_to_suffix(self.realm)
try:
self.stop()
except Exception:
pass
self.step("setting up DNS Key Exporter", self.__setup_key_exporter)
self.step("setting up kerberos principal", self.__setup_principal)
self.step("disabling default signer daemon", self.__disable_signerd)
self.step("starting DNS Key Exporter", self.__start)
self.step("configuring DNS Key Exporter to start on boot", self.__enable)
self.start_creation()
def __enable(self):
try:
self.ldap_configure('DNSKeyExporter', self.fqdn, None,
self.suffix)
except errors.DuplicateEntry:
logger.error("DNSKeyExporter service already exists")
def __setup_key_exporter(self):
directivesetter.set_directive(paths.SYSCONFIG_IPA_ODS_EXPORTER,
'SOFTHSM2_CONF',
paths.DNSSEC_SOFTHSM2_CONF,
quotes=False, separator='=')
def __setup_principal(self):
assert constants.ODS_GROUP.gid is not None
for f in [paths.IPA_ODS_EXPORTER_CCACHE, self.keytab]:
try:
os.remove(f)
except OSError:
pass
installutils.kadmin_addprinc(self.principal)
# Store the keytab on disk
installutils.create_keytab(paths.IPA_ODS_EXPORTER_KEYTAB,
self.principal)
p = self.move_service(self.principal)
if p is None:
# the service has already been moved, perhaps we're doing a DNS reinstall
dns_exporter_principal_dn = DN(
('krbprincipalname', self.principal),
('cn', 'services'), ('cn', 'accounts'), self.suffix)
else:
dns_exporter_principal_dn = p
# Make sure access is strictly reserved to the ods user
os.chmod(self.keytab, 0o440)
os.chown(self.keytab, 0, constants.ODS_GROUP.gid)
dns_group = DN(('cn', 'DNS Servers'), ('cn', 'privileges'),
('cn', 'pbac'), self.suffix)
mod = [(ldap.MOD_ADD, 'member', dns_exporter_principal_dn)]
try:
api.Backend.ldap2.modify_s(dns_group, mod)
except ldap.TYPE_OR_VALUE_EXISTS:
pass
except Exception as e:
logger.critical("Could not modify principal's %s entry: %s",
dns_exporter_principal_dn, str(e))
raise
# limit-free connection
mod = [(ldap.MOD_REPLACE, 'nsTimeLimit', '-1'),
(ldap.MOD_REPLACE, 'nsSizeLimit', '-1'),
(ldap.MOD_REPLACE, 'nsIdleTimeout', '-1'),
(ldap.MOD_REPLACE, 'nsLookThroughLimit', '-1')]
try:
api.Backend.ldap2.modify_s(dns_exporter_principal_dn, mod)
except Exception as e:
logger.critical("Could not set principal's %s LDAP limits: %s",
dns_exporter_principal_dn, str(e))
raise
def __disable_signerd(self):
signerd_service = services.knownservices.ods_signerd
if self.get_state("singerd_running") is None:
self.backup_state("singerd_running", signerd_service.is_running())
if self.get_state("singerd_enabled") is None:
self.backup_state("singerd_enabled", signerd_service.is_enabled())
# disable default opendnssec signer daemon
signerd_service.stop()
signerd_service.mask()
def __start(self):
self.start()
def remove_service(self):
try:
api.Command.service_del(self.principal)
except errors.NotFound:
pass
def uninstall(self):
if not self.is_configured():
return
self.print_msg("Unconfiguring %s" % self.service_name)
# just eat states
self.restore_state("running")
self.restore_state("enabled")
# stop and disable service (IPA service, we do not need it anymore)
self.disable()
self.stop()
# restore state of dnssec default signer daemon
signerd_enabled = self.restore_state("singerd_enabled")
signerd_running = self.restore_state("singerd_running")
signerd_service = services.knownservices.ods_signerd
signerd_service.unmask()
# service was stopped and disabled by setup
if signerd_enabled:
signerd_service.enable()
if signerd_running:
signerd_service.start()
ipautil.remove_keytab(self.keytab)
ipautil.remove_ccache(ccache_path=paths.IPA_ODS_EXPORTER_CCACHE)
| 5,908
|
Python
|
.py
| 137
| 32.635036
| 85
| 0.616042
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,756
|
kra.py
|
freeipa_freeipa/ipaserver/install/kra.py
|
#
# Copyright (C) 2015 FreeIPA Contributors see COPYING for license
#
"""
KRA installer module
"""
from __future__ import absolute_import
import logging
import os
from ipalib import api
from ipalib.kinit import kinit_keytab
from ipaplatform import services
from ipaplatform.paths import paths
from ipapython import ipautil
from ipapython.admintool import ScriptError
from ipapython.install.core import group
from ipaserver.install import ca, cainstance
from ipaserver.install import krainstance
from ipaserver.install import dsinstance
from ipaserver.install import installutils
from ipaserver.install import service as _service
from . import dogtag
logger = logging.getLogger(__name__)
def install_check(api, replica_config, options):
if replica_config is not None and not replica_config.setup_kra:
return
kra = krainstance.KRAInstance(api.env.realm)
if kra.is_installed():
raise RuntimeError("KRA is already installed.")
if not options.setup_ca:
if cainstance.is_ca_installed_locally():
if api.env.dogtag_version >= 10:
# correct dogtag version of CA installed
pass
else:
raise RuntimeError(
"Dogtag must be version 10.2 or above to install KRA")
else:
raise RuntimeError(
"Dogtag CA is not installed. Please install the CA first")
if replica_config is not None:
if not api.Command.kra_is_enabled()['result']:
raise RuntimeError(
"KRA is not installed on the master system. Please use "
"'ipa-kra-install' command to install the first instance.")
if api.env.ca_host is not None and api.env.ca_host != api.env.host:
raise RuntimeError(
"KRA can not be installed when 'ca_host' is overriden in "
"IPA configuration file.")
# There are three scenarios for installing a KRA
# 1. At install time of the initial server
# 2. Using ipa-kra-install
# 3. At install time of a replica
#
# These tests are done in reverse order. If we are doing a
# replica install we can check the remote CA.
#
# If we are running ipa-kra-install then there must be a CA
# use that.
#
# If initial install we either have the token options or we don't.
cai = cainstance.CAInstance()
if replica_config is not None:
(token_name, token_library_path) = ca.lookup_hsm_configuration(api)
elif cai.is_configured() and cai.hsm_enabled:
(token_name, token_library_path) = ca.lookup_hsm_configuration(api)
elif 'token_name' in options.__dict__:
token_name = options.token_name
token_library_path = options.token_library_path
else:
token_name = None
if replica_config is not None:
if (
token_name
and options.token_password_file
and options.token_password
):
raise ScriptError(
"token-password and token-password-file are mutually exclusive"
)
if options.token_password_file:
with open(options.token_password_file, "r") as fd:
options.token_password = fd.readline().strip()
if (
token_name
and not options.token_password_file
and not options.token_password
):
if options.unattended:
raise ScriptError("HSM token password required")
token_password = installutils.read_password(
f"HSM token '{token_name}'", confirm=False
)
if token_password is None:
raise ScriptError("HSM token password required")
else:
options.token_password = token_password
if token_name:
ca.hsm_validator(token_name, token_library_path, options.token_password)
def install(api, replica_config, options, custodia):
if replica_config is None:
if not options.setup_kra:
return
realm_name = api.env.realm
dm_password = options.dm_password
host_name = api.env.host
subject_base = dsinstance.DsInstance().find_subject_base()
pkcs12_info = None
master_host = None
promote = False
else:
if not replica_config.setup_kra:
return
cai = cainstance.CAInstance()
if not cai.hsm_enabled:
krafile = os.path.join(replica_config.dir, 'kracert.p12')
with ipautil.private_ccache():
ccache = os.environ['KRB5CCNAME']
kinit_keytab(
'host/{env.host}@{env.realm}'.format(env=api.env),
paths.KRB5_KEYTAB,
ccache)
custodia.get_kra_keys(
krafile,
replica_config.dirman_password)
else:
krafile = None
realm_name = replica_config.realm_name
dm_password = replica_config.dirman_password
host_name = replica_config.host_name
subject_base = replica_config.subject_base
pkcs12_info = (krafile,)
master_host = replica_config.kra_host_name
promote = True
ca_subject = ca.lookup_ca_subject(api, subject_base)
kra = krainstance.KRAInstance(realm_name)
kra.configure_instance(
realm_name, host_name, dm_password, dm_password,
subject_base=subject_base,
ca_subject=ca_subject,
pkcs12_info=pkcs12_info,
master_host=master_host,
promote=promote,
pki_config_override=options.pki_config_override,
token_password=options.token_password
)
_service.print_msg("Restarting the directory server")
ds = dsinstance.DsInstance()
ds.restart()
kra.enable_client_auth_to_db()
# Restart apache for new proxy config file
services.knownservices.httpd.restart(capture_output=True)
# Restarted named to restore bind-dyndb-ldap operation, see
# https://pagure.io/freeipa/issue/5813
named = services.knownservices.named # alias for current named
if named.is_running():
named.restart(capture_output=True)
def uninstall_check(options):
"""IPA needs to be running so pkidestroy can unregister KRA"""
kra = krainstance.KRAInstance(api.env.realm)
if not kra.is_installed():
return
result = ipautil.run([paths.IPACTL, 'status'],
raiseonerr=False)
if result.returncode not in [0, 4]:
try:
logger.info(
"Starting services to unregister KRA from security domain")
ipautil.run([paths.IPACTL, 'start'])
except Exception:
logger.info("Re-starting IPA failed, continuing uninstall")
def uninstall():
kra = krainstance.KRAInstance(api.env.realm)
kra.stop_tracking_certificates()
if kra.is_installed():
kra.uninstall()
@group
class KRAInstallInterface(dogtag.DogtagInstallInterface):
"""
Interface of the KRA installer
Knobs defined here will be available in:
* ipa-server-install
* ipa-replica-prepare
* ipa-replica-install
* ipa-kra-install
"""
description = "KRA"
| 7,161
|
Python
|
.py
| 187
| 30.187166
| 80
| 0.651773
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,757
|
__init__.py
|
freeipa_freeipa/ipaserver/install/__init__.py
|
# Authors: Karl MacMillan <kmacmillan@mentalrootkit.com>
# see inline
#
# Copyright (C) 2007 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
| 803
|
Python
|
.py
| 19
| 41.263158
| 71
| 0.769133
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,758
|
replication.py
|
freeipa_freeipa/ipaserver/install/replication.py
|
# Authors: Karl MacMillan <kmacmillan@mentalrootkit.com>
#
# Copyright (C) 2007 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function, absolute_import
import logging
import itertools
import re
import six
import time
import datetime
import sys
import os
import json
from random import randint
import ldap
from ipalib import api, errors
from ipalib.cli import textui
from ipalib.text import _
from ipapython import ipautil, ipaldap
from ipapython.admintool import ScriptError
from ipapython.dn import DN
from ipapython.ipaldap import ldap_initialize
from ipaplatform.paths import paths
from ipaserver.install import installutils
if six.PY3:
unicode = str
logger = logging.getLogger(__name__)
# the default container used by AD for user entries
WIN_USER_CONTAINER = DN(('cn', 'Users'))
# the default container used by IPA for user entries
IPA_USER_CONTAINER = DN(('cn', 'users'), ('cn', 'accounts'))
PORT = 636
DEFAULT_PORT = 389
TIMEOUT = 120
REPL_MAN_DN = DN(('cn', 'replication manager'), ('cn', 'config'))
DNA_DN = DN(('cn', 'Posix IDs'), ('cn', 'Distributed Numeric Assignment Plugin'), ('cn', 'plugins'), ('cn', 'config'))
REPL_MANAGERS_CN = DN(('cn', 'replication managers'))
IPA_REPLICA = 1
WINSYNC = 2
# List of attributes that need to be excluded from replication initialization.
TOTAL_EXCLUDES = ('entryusn',
'krblastsuccessfulauth',
'krblastfailedauth',
'krbloginfailedcount',
'passwordgraceusertime',)
# List of attributes that need to be excluded from normal replication.
EXCLUDES = ('memberof', 'idnssoaserial') + TOTAL_EXCLUDES
# List of attributes that are not updated on empty replication
STRIP_ATTRS = ('modifiersName',
'modifyTimestamp',
'internalModifiersName',
'internalModifyTimestamp')
# settings for cn=replica,cn=$DB,cn=mapping tree,cn=config
# during replica installation
REPLICA_CREATION_SETTINGS = {
"nsds5ReplicaReleaseTimeout": ["20"],
"nsds5ReplicaBackoffMax": ["3"],
"nsDS5ReplicaBindDnGroupCheckInterval": ["2"]
}
# after replica installation
REPLICA_FINAL_SETTINGS = {
"nsds5ReplicaReleaseTimeout": ["60"],
"nsds5ReplicaBackoffMax": ["300"], # default
"nsDS5ReplicaBindDnGroupCheckInterval": ["60"]
}
def replica_conn_check(master_host, host_name, realm, check_ca,
dogtag_master_ds_port, admin_password=None,
principal="admin", ca_cert_file=None):
"""
Check the ports used by the replica both locally and remotely to be sure
that replication will work.
Does not return a value, will raise ScriptError on failure.
"""
print("Run connection check to master")
args = [paths.IPA_REPLICA_CONNCHECK, "--master", master_host,
"--auto-master-check", "--realm", realm,
"--hostname", host_name]
nolog=tuple()
if principal is not None:
args.extend(["--principal", principal])
if admin_password:
args.extend(["--password", admin_password])
nolog=(admin_password,)
if check_ca and dogtag_master_ds_port == 7389:
args.append('--check-ca')
if ca_cert_file:
args.extend(["--ca-cert-file", ca_cert_file])
result = ipautil.run(
args, raiseonerr=False, capture_output=False, nolog=nolog)
if result.returncode != 0:
raise ScriptError(
"Connection check failed!"
"\nSee /var/log/ipareplica-conncheck.log for more information."
"\nIf the check results are not valid it can be skipped with --skip-conncheck parameter.")
else:
print("Connection check OK")
def enable_replication_version_checking(realm, dirman_passwd):
"""
Check the replication version checking plugin. If it is not
enabled then enable it and restart 389-ds. If it is enabled
the do nothing.
"""
conn = ipaldap.LDAPClient.from_realm(realm)
if dirman_passwd:
conn.simple_bind(bind_dn=ipaldap.DIRMAN_DN,
bind_password=dirman_passwd)
else:
conn.gssapi_bind()
entry = conn.get_entry(DN(('cn', 'IPA Version Replication'),
('cn', 'plugins'),
('cn', 'config')))
if entry.single_value.get('nsslapd-pluginenabled') == 'off':
conn.modify_s(entry.dn, [(ldap.MOD_REPLACE, 'nsslapd-pluginenabled', 'on')])
conn.unbind()
serverid = "-".join(realm.split("."))
installutils.restart_dirsrv(serverid)
else:
conn.unbind()
def wait_for_task(conn, dn):
"""Check task status
Task is complete when the nsTaskExitCode attr is set.
:return: the task's return code
"""
assert isinstance(dn, DN)
attrlist = [
'nsTaskLog', 'nsTaskStatus', 'nsTaskExitCode', 'nsTaskCurrentItem',
'nsTaskTotalItems']
while True:
entry = conn.get_entry(dn, attrlist)
if entry.single_value.get('nsTaskExitCode'):
exit_code = int(entry.single_value['nsTaskExitCode'])
break
time.sleep(1)
return exit_code
def wait_for_entry(connection, dn, timeout, attr=None, attrvalue='*',
quiet=True):
"""Wait for entry and/or attr to show up
"""
log = logger.debug if quiet else logger.info
attrlist = []
if attr is not None:
filterstr = ipaldap.LDAPClient.make_filter_from_attr(attr, attrvalue)
attrlist.append(attr)
else:
filterstr = "(objectclass=*)"
log("Waiting up to %s seconds for replication (%s) %s %s",
timeout, connection, dn, filterstr)
entry = []
deadline = time.time() + timeout
for i in itertools.count(start=1):
try:
entry = connection.get_entries(
dn, ldap.SCOPE_BASE, filterstr, attrlist)
except errors.NotFound:
pass # no entry yet
except Exception as e: # badness
logger.error("Error reading entry %s: %s", dn, e)
raise
if entry:
log("Entry found %r", entry)
return
elif time.time() > deadline:
raise errors.NotFound(
reason="wait_for_entry timeout on {} for {}".format(
connection, dn
)
)
else:
if i % 10 == 0:
logger.debug("Still waiting for replication of %s", dn)
time.sleep(1)
def get_ds_version(conn):
"""Returns the DS version
Retrieves the DS version from the vendorVersion attribute stored in LDAP.
:param conn: LDAP connection established and authenticated to the server
for which we need the version
:return: a tuple containing the DS version
"""
# Find which 389-ds is installed
rootdse = conn.get_entry(DN(''), ['vendorVersion'])
version = rootdse.single_value.get('vendorVersion')
mo = re.search(r'(\d+)\.(\d+)\.(\d+)[\.\d]*', version)
vendor_version = tuple(int(v) for v in mo.groups())
return vendor_version
class ReplicationManager:
"""Manage replication agreements
between DS servers, and sync agreements with Windows servers
"""
def __init__(self, realm, hostname, dirman_passwd=None, port=PORT,
starttls=False, conn=None):
self.hostname = hostname
self.port = port
self.dirman_passwd = dirman_passwd
self.realm = realm
self.starttls = starttls
self.suffix = ipautil.realm_to_suffix(realm)
self.need_memberof_fixup = False
self.db_suffix = self.suffix
self.agreement_name_format = "meTo%s"
# The caller is allowed to pass in an existing LDAPClient connection.
# Open a new one if not provided
if conn is None:
# If we are passed a password we'll use it as the DM password
# otherwise we'll do a GSSAPI bind.
protocol = 'ldap' if starttls else None
ldap_uri = ipaldap.get_ldap_uri(
hostname, port, protocol=protocol, cacert=paths.IPA_CA_CRT)
self.conn = ipaldap.LDAPClient(ldap_uri, cacert=paths.IPA_CA_CRT,
start_tls=starttls)
if dirman_passwd:
self.conn.simple_bind(bind_dn=ipaldap.DIRMAN_DN,
bind_password=dirman_passwd)
else:
self.conn.gssapi_bind()
else:
self.conn = conn
self.repl_man_passwd = dirman_passwd
# these are likely constant, but you could change them
# at runtime if you really want
self.repl_man_dn = REPL_MAN_DN
self.repl_man_cn = "replication manager"
self.repl_man_group_dn = DN(
REPL_MANAGERS_CN, api.env.container_sysaccounts, api.env.basedn)
def _get_replica_id(self, conn, master_conn):
"""
Returns the replica ID which is unique for each backend.
conn is the connection we are trying to get the replica ID for.
master_conn is the master we are going to replicate with.
"""
# First see if there is already one set
dn = self.replica_dn()
assert isinstance(dn, DN)
try:
replica = conn.get_entry(dn)
except errors.NotFound:
pass
else:
if replica.single_value.get('nsDS5ReplicaId'):
return int(replica.single_value['nsDS5ReplicaId'])
# Ok, either the entry doesn't exist or the attribute isn't set
# so get it from the other master
return self._get_and_update_id_from_master(master_conn)
def _get_and_update_id_from_master(self, master_conn, attempts=5):
"""
Fetch replica ID from remote master and update nsDS5ReplicaId attribute
on 'cn=replication,cn=etc,$SUFFIX' entry. Do it as MOD_DELETE+MOD_ADD
operations and retry when conflict occurs, e.g. due to simultaneous
update from another replica.
:param master_conn: LDAP connection to master
:param attempts: number of attempts to update nsDS5ReplicaId
:return: value of nsDS5ReplicaId before incrementation
"""
dn = DN(('cn','replication'),('cn','etc'), self.suffix)
for a in range(1, attempts + 1):
try:
logger.debug('Fetching nsDS5ReplicaId from master '
'[attempt %d/%d]', a, attempts)
replica = master_conn.get_entry(dn)
id_values = replica.get('nsDS5ReplicaId')
if not id_values:
logger.debug("Unable to retrieve nsDS5ReplicaId from "
"remote server")
raise RuntimeError("Unable to retrieve nsDS5ReplicaId from remote server")
# nsDS5ReplicaId is single-valued now, but historically it could
# contain multiple values, of which we need the highest.
# see bug: https://fedorahosted.org/freeipa/ticket/3394
retval = max(int(v) for v in id_values)
# Now update the value on the master
mod_list = [(ldap.MOD_DELETE, 'nsDS5ReplicaId', str(retval)),
(ldap.MOD_ADD, 'nsDS5ReplicaId', str(retval + 1))]
master_conn.modify_s(dn, mod_list)
logger.debug('Successfully updated nsDS5ReplicaId.')
return retval
except errors.NotFound:
logger.debug("Unable to retrieve nsDS5ReplicaId from remote "
"server")
raise
# these errors signal a conflict in updating replica ID.
# We then wait for a random time interval and try again
except (ldap.NO_SUCH_ATTRIBUTE, ldap.OBJECT_CLASS_VIOLATION) as e:
sleep_interval = randint(1, 5)
logger.debug("Update failed (%s). Conflicting operation?", e)
time.sleep(sleep_interval)
# in case of other error we bail out
except ldap.LDAPError as e:
logger.debug("Problem updating nsDS5ReplicaID %s", e)
raise
raise RuntimeError("Failed to update nsDS5ReplicaId in %d attempts"
% attempts)
def get_agreement_filter(self, agreement_types=None, host=None):
"""
Get an LDAP replication agreement filter with a possibility to filter
the agreements by their type and a host
"""
if agreement_types is None:
agreement_types = (IPA_REPLICA, WINSYNC)
elif not isinstance(agreement_types, (list, tuple)):
agreement_types = (agreement_types,)
agreement_types_filters = []
if IPA_REPLICA in agreement_types:
agreement_types_filters.append('(&(objectclass=nsds5ReplicationAgreement)(nsDS5ReplicaRoot=%s))'
% self.db_suffix)
if WINSYNC in agreement_types:
agreement_types_filters.append('(objectclass=nsDSWindowsReplicationAgreement)')
if len(agreement_types_filters) > 1:
agreement_filter = '(|%s)' % ''.join(agreement_types_filters)
else:
agreement_filter = ''.join(agreement_types_filters)
if host is not None:
agreement_filter = '(&%s(nsDS5ReplicaHost=%s))' % (agreement_filter, host)
return agreement_filter
def find_replication_agreements(self):
"""
The replication agreements are stored in
cn="$SUFFIX",cn=mapping tree,cn=config
FIXME: Rather than failing with a read error if a user tries
to read this it simply returns zero entries. We need to use
GER to determine if we are allowed to read this to return a proper
response. For now just return "No entries" even if the user may
not be allowed to see them.
"""
filt = self.get_agreement_filter()
try:
ents = self.conn.get_entries(
DN(('cn', 'mapping tree'), ('cn', 'config')),
ldap.SCOPE_SUBTREE, filt)
except errors.NotFound:
ents = []
return ents
def find_ipa_replication_agreements(self):
"""
The replication agreements are stored in
cn="$SUFFIX",cn=mapping tree,cn=config
Return the list of hosts we have replication agreements.
"""
filt = self.get_agreement_filter(IPA_REPLICA)
try:
ents = self.conn.get_entries(
DN(('cn', 'mapping tree'), ('cn', 'config')),
ldap.SCOPE_SUBTREE, filt)
except errors.NotFound:
ents = []
return ents
def get_replication_agreement(self, hostname):
"""
The replication agreements are stored in
cn="$SUFFIX",cn=mapping tree,cn=config
Get the replication agreement for a specific host.
Returns None if not found.
"""
filt = self.get_agreement_filter(host=hostname)
try:
entries = self.conn.get_entries(
DN(('cn', 'mapping tree'), ('cn', 'config')),
ldap.SCOPE_SUBTREE, filt)
except errors.NotFound:
return None
if len(entries) == 0:
return None
else:
return entries[0] # There can be only one
def add_replication_manager(self, conn, dn, pw):
"""
Create a pseudo user to use for replication.
"""
assert isinstance(dn, DN)
rdn_attr = dn[0].attr
rdn_val = dn[0].value
ent = conn.make_entry(
dn,
{
'objectclass': ["top", "person"],
rdn_attr: [rdn_val],
'userpassword': [pw],
'sn': ["replication manager pseudo user"],
}
)
try:
conn.add_entry(ent)
except errors.DuplicateEntry:
conn.modify_s(dn, [(ldap.MOD_REPLACE, "userpassword", pw)])
def delete_replication_manager(self, conn, dn=REPL_MAN_DN):
assert isinstance(dn, DN)
try:
conn.delete_entry(dn)
except errors.NotFound:
pass
def get_replica_type(self, master=True):
if master:
return "3"
else:
return "2"
def replica_dn(self):
return DN(('cn', 'replica'), ('cn', self.db_suffix),
('cn', 'mapping tree'), ('cn', 'config'))
def get_be_name(self, conn):
# Get the backend name for this suffix
suffix_entry = conn.get_entry(
DN(('cn', self.db_suffix),
('cn', 'mapping tree'),
('cn', 'config')),
['nsslapd-backend'])
return suffix_entry.single_value.get('nsslapd-backend')
def _set_replica_binddngroup(self, r_conn, entry):
"""
Set nsds5replicabinddngroup attribute on remote master's replica entry.
Older masters (ipa < 3.3) may not support setting this attribute. In
this case log the error and fall back to setting replica's binddn
directly.
"""
binddn_groups = {
DN(p) for p in entry.get('nsds5replicabinddngroup', [])}
mod = []
if self.repl_man_group_dn not in binddn_groups:
mod.append((ldap.MOD_ADD, 'nsds5replicabinddngroup',
self.repl_man_group_dn))
if mod:
try:
r_conn.modify_s(entry.dn, mod)
except ldap.UNWILLING_TO_PERFORM:
logger.debug(
"nsds5replicabinddngroup attribute not supported on "
"remote master.")
except (ldap.ALREADY_EXISTS, ldap.CONSTRAINT_VIOLATION):
logger.debug("No update to %s necessary", entry.dn)
def replica_config(self, conn, replica_id, replica_binddn):
assert isinstance(replica_binddn, DN)
dn = self.replica_dn()
assert isinstance(dn, DN)
logger.debug("Add or update replica config %s", dn)
try:
entry = conn.get_entry(dn)
except errors.NotFound:
# no entry, create new one
entry = conn.make_entry(
dn,
objectclass=["top", "nsds5replica", "extensibleobject"],
cn=["replica"],
nsds5replicaroot=[str(self.db_suffix)],
nsds5replicaid=[str(replica_id)],
nsds5replicatype=[self.get_replica_type()],
nsds5flags=["1"],
nsds5replicabinddn=[replica_binddn],
nsds5replicabinddngroup=[self.repl_man_group_dn],
nsds5replicalegacyconsumer=["off"],
**REPLICA_CREATION_SETTINGS
)
try:
conn.add_entry(entry)
except errors.DuplicateEntry:
logger.debug("Lost race against another replica, updating")
# fetch entry that have been added by another replica
entry = conn.get_entry(dn)
else:
logger.debug("Added replica config %s", dn)
# added entry successfully
return entry
# either existing entry or lost race
binddns = entry.setdefault('nsDS5ReplicaBindDN', [])
if replica_binddn not in {DN(m) for m in binddns}:
# Add the new replication manager
binddns.append(replica_binddn)
# If the remote server has 389-ds < 1.3, it does not
# support the attributes we are trying to set.
# Find which 389-ds is installed
vendor_version = get_ds_version(conn)
if vendor_version >= (1, 3, 0):
for key, value in REPLICA_CREATION_SETTINGS.items():
entry[key] = value
else:
logger.debug("replication attributes not supported "
"on remote master, skipping update.")
try:
conn.update_entry(entry)
except errors.EmptyModlist:
logger.debug("No update to %s necessary", entry.dn)
else:
logger.debug("Update replica config %s", entry.dn)
self._set_replica_binddngroup(conn, entry)
return entry
def setup_changelog(self, conn):
try:
"""Check if we have the new per backend changelog, and set
the trimming max-age setting. If the new changelog entry
is not found then we are still using the old global changelog.
"""
cl_dn = DN(
('cn', 'changelog'),
('cn', self.get_be_name(conn)),
('cn', 'ldbm database'),
('cn', 'plugins'),
('cn', 'config'))
cl_entry = conn.get_entry(cl_dn)
except errors.NotFound:
"""Did not find a per-backend changelog, so add the global
changelog entry. First get the database directory to build
the changelog directory location from.
"""
ent = conn.get_entry(
DN(
('cn', 'config'),
('cn', 'ldbm database'),
('cn', 'plugins'),
('cn', 'config')),
['nsslapd-directory'])
dbdir = os.path.dirname(ent.single_value.get('nsslapd-directory'))
entry = conn.make_entry(
DN(
('cn', 'changelog5'),
('cn', 'config')),
{
'objectclass': ["top", "extensibleobject"],
'cn': ["changelog5"],
'nsslapd-changelogdir': [os.path.join(dbdir, "cldb")],
'nsslapd-changelogmaxage': ['30d'],
}
)
try:
conn.add_entry(entry)
except errors.DuplicateEntry:
return
else:
# Set the changelog trimming
cl_entry['nsslapd-changelogmaxage'] = '30d'
try:
conn.update_entry(cl_entry)
except errors.EmptyModlist:
# not a problem since the trimming is already set
pass
def _finalize_replica_settings(self, conn):
"""Change replica settings to final values
During replica installation, some settings are configured for faster
replication.
"""
dn = self.replica_dn()
entry = conn.get_entry(dn)
for key, value in REPLICA_FINAL_SETTINGS.items():
entry[key] = value
try:
conn.update_entry(entry)
except errors.EmptyModlist:
pass
def finalize_replica_config(self, r_hostname, r_binddn=None,
r_bindpw=None, cacert=paths.IPA_CA_CRT):
"""Apply final cn=replica settings
replica_config() sets several attribute to fast cache invalidation
and fast reconnects to optimize replicat installation. For
production, longer timeouts and less aggressive cache invalidation
is sufficient. finalize_replica_config() sets the values on new
replica and the master.
When installing multiple replicas in parallel, one replica may
finalize the values while another is still installing.
See https://pagure.io/freeipa/issue/7617
"""
self._finalize_replica_settings(self.conn)
r_conn = ipaldap.LDAPClient.from_hostname_secure(
r_hostname, cacert=cacert
)
if r_bindpw:
r_conn.simple_bind(r_binddn, r_bindpw)
else:
r_conn.gssapi_bind()
# If the remote server has 389-ds < 1.3, it does not
# support the attributes we are trying to set.
# Find which 389-ds is installed
vendor_version = get_ds_version(r_conn)
if vendor_version >= (1, 3, 0):
# 389-ds understands the replication attributes,
# we can safely modify them
self._finalize_replica_settings(r_conn)
else:
logger.debug("replication attributes not supported "
"on remote master, skipping update.")
r_conn.close()
def setup_chaining_backend(self, conn):
chaindn = DN(('cn', 'chaining database'), ('cn', 'plugins'), ('cn', 'config'))
benamebase = "chaindb"
urls = [conn.ldap_uri]
cn = ""
benum = 1
done = False
while not done:
try:
cn = benamebase + str(benum) # e.g. localdb1
dn = DN(('cn', cn), chaindn)
entry = conn.make_entry(
dn,
{
'objectclass': [
'top', 'extensibleObject', 'nsBackendInstance'],
'cn': [cn],
'nsslapd-suffix': [str(self.db_suffix)],
'nsfarmserverurl': urls,
'nsmultiplexorbinddn': [self.repl_man_dn],
'nsmultiplexorcredentials': [self.repl_man_passwd],
}
)
self.conn.add_entry(entry)
done = True
except errors.DuplicateEntry:
benum += 1
except errors.ExecutionError as e:
print("Could not add backend entry " + dn, e)
raise
return cn
def setup_chaining_farm(self, conn):
try:
conn.modify_s(self.db_suffix, [(ldap.MOD_ADD, 'aci',
[ "(targetattr = \"*\")(version 3.0; acl \"Proxied authorization for database links\"; allow (proxy) userdn = \"ldap:///%s\";)" % self.repl_man_dn ])])
except ldap.TYPE_OR_VALUE_EXISTS:
logger.debug("proxy aci already exists in suffix %s on %s",
self.db_suffix, conn.ldap_uri)
def get_mapping_tree_entry(self):
try:
entries = self.conn.get_entries(
DN(('cn', 'mapping tree'), ('cn', 'config')),
ldap.SCOPE_ONELEVEL,
"(cn=\"%s\")" % (self.db_suffix))
# TODO: Check we got only one entry
return entries[0]
except errors.NotFound:
logger.debug(
"failed to find mapping tree entry for %s", self.db_suffix)
raise
def enable_chain_on_update(self, bename):
mtent = self.get_mapping_tree_entry()
dn = mtent.dn
def get_entry(dn, attrs):
return self.conn.get_entry(DN(dn), attrs)
replication_plugin_name = (
installutils.get_replication_plugin_name(get_entry)
)
plgent = self.conn.get_entry(
DN(('cn', replication_plugin_name), ('cn', 'plugins'),
('cn', 'config')),
['nsslapd-pluginPath'])
path = plgent.single_value.get('nsslapd-pluginPath')
mod = [(ldap.MOD_REPLACE, 'nsslapd-state', 'backend'),
(ldap.MOD_ADD, 'nsslapd-backend', bename),
(ldap.MOD_ADD, 'nsslapd-distribution-plugin', path),
(ldap.MOD_ADD, 'nsslapd-distribution-funct', 'repl_chain_on_update')]
try:
self.conn.modify_s(dn, mod)
except ldap.TYPE_OR_VALUE_EXISTS:
logger.debug("chainOnUpdate already enabled for %s",
self.db_suffix)
def setup_chain_on_update(self, other_conn):
chainbe = self.setup_chaining_backend(other_conn)
self.enable_chain_on_update(chainbe)
def add_passsync_user(self, conn, password):
pass_dn = DN(('uid', 'passsync'), api.env.container_sysaccounts,
self.suffix)
print("The user for the Windows PassSync service is %s" % pass_dn)
try:
conn.get_entry(pass_dn)
print("Windows PassSync system account exists, not resetting password")
except errors.NotFound:
# The user doesn't exist, add it
print("Adding Windows PassSync system account")
entry = conn.make_entry(
pass_dn,
objectclass=["account", "simplesecurityobject", "inetUser"],
uid=["passsync"],
userPassword=[password],
)
try:
conn.add_entry(entry)
except errors.DuplicateEntry:
pass
# Add the user to the list of users allowed to bypass password policy
extop_dn = DN(('cn', 'ipa_pwd_extop'), ('cn', 'plugins'), ('cn', 'config'))
entry = conn.get_entry(extop_dn)
pass_mgrs = entry.get('passSyncManagersDNs', [])
pass_mgrs.append(pass_dn)
mod = [(ldap.MOD_REPLACE, 'passSyncManagersDNs', pass_mgrs)]
try:
conn.modify_s(extop_dn, mod)
except ldap.TYPE_OR_VALUE_EXISTS:
logger.debug("Plugin '%s' already '%s' in passSyncManagersDNs",
extop_dn, pass_dn)
# And finally add it is a member of PassSync privilege to allow
# displaying user NT attributes and reset passwords
passsync_privilege_dn = DN(('cn','PassSync Service'),
api.env.container_privilege,
api.env.basedn)
members = entry.get('member', [])
members.append(pass_dn)
mod = [(ldap.MOD_REPLACE, 'member', members)]
try:
conn.modify_s(passsync_privilege_dn, mod)
except ldap.TYPE_OR_VALUE_EXISTS:
logger.debug("PassSync service '%s' already have '%s' as member",
passsync_privilege_dn, pass_dn)
def setup_winsync_agmt(self, entry, win_subtree=None):
if win_subtree is None:
win_subtree = DN(WIN_USER_CONTAINER, self.ad_suffix)
ds_subtree = DN(IPA_USER_CONTAINER, self.suffix)
windomain = ipautil.suffix_to_realm(self.suffix)
entry["objectclass"] = ["nsDSWindowsReplicationAgreement"]
entry["nsds7WindowsReplicaSubtree"] = [win_subtree]
entry["nsds7DirectoryReplicaSubtree"] = [ds_subtree]
# for now, just sync users and ignore groups
entry["nsds7NewWinUserSyncEnabled"] = ['true']
entry["nsds7NewWinGroupSyncEnabled"] = ['false']
entry["nsds7WindowsDomain"] = [windomain]
def agreement_dn(self, hostname, master=None):
"""
IPA agreement use the same dn on both sides, dogtag does not.
master is not used for IPA agreements but for dogtag it will
tell which side we want.
"""
cn = self.agreement_name_format % (hostname)
dn = DN(('cn', cn), self.replica_dn())
return (cn, dn)
def setup_agreement(self, a_conn, b_hostname, port=389,
repl_man_dn=None, repl_man_passwd=None,
iswinsync=False, win_subtree=None, isgssapi=False,
master=None):
"""
master is used to determine which side of the agreement we are
creating. This is only needed for dogtag replication agreements
which use a different name on each side. If master is None then
isn't a dogtag replication agreement.
"""
if repl_man_dn is not None:
assert isinstance(repl_man_dn, DN)
cn, dn = self.agreement_dn(b_hostname, master=master)
try:
a_conn.get_entry(dn)
return
except errors.NotFound:
pass
entry = a_conn.make_entry(
dn,
objectclass=["nsds5replicationagreement"],
cn=[cn],
nsds5replicahost=[b_hostname],
nsds5replicaport=[str(port)],
nsds5replicatimeout=[str(TIMEOUT)],
nsds5replicaroot=[str(self.db_suffix)],
description=["me to %s" % b_hostname],
)
if master is None:
entry['nsDS5ReplicatedAttributeList'] = [
'(objectclass=*) $ EXCLUDE %s' % " ".join(EXCLUDES)]
if isgssapi:
entry['nsds5replicatransportinfo'] = ['LDAP']
entry['nsds5replicabindmethod'] = ['SASL/GSSAPI']
else:
entry['nsds5replicabinddn'] = [repl_man_dn]
entry['nsds5replicacredentials'] = [repl_man_passwd]
entry['nsds5replicatransportinfo'] = ['TLS']
entry['nsds5replicabindmethod'] = ['simple']
if iswinsync:
self.setup_winsync_agmt(entry, win_subtree)
else:
entry['nsds5ReplicaStripAttrs'] = [" ".join(STRIP_ATTRS)]
a_conn.add_entry(entry)
try:
mod = [(ldap.MOD_ADD, 'nsDS5ReplicatedAttributeListTotal',
'(objectclass=*) $ EXCLUDE %s' % " ".join(TOTAL_EXCLUDES))]
a_conn.modify_s(dn, mod)
except ldap.LDAPError:
# Apparently there are problems set the total list
# Probably the master is an old 389-ds server, tell the caller
# that we will have to set the memberof fixup task
self.need_memberof_fixup = True
wait_for_entry(
a_conn, entry.dn, timeout=api.env.replication_wait_timeout
)
def needs_memberof_fixup(self):
return self.need_memberof_fixup
def get_replica_principal_dns(self, a, b, retries):
"""
Get the DNs of the ldap principals we are going to convert
to using GSSAPI replication.
Arguments a and b are LDAP connections. retries is the number
of attempts that should be made to find the entries. It could
be that replication is slow.
If successful this returns a tuple (dn_a, dn_b).
If either of the DNs doesn't exist after the retries are
exhausted an exception is raised.
"""
filter_a = '(krbprincipalname=ldap/%s@%s)' % (a.host, self.realm)
filter_b = '(krbprincipalname=ldap/%s@%s)' % (b.host, self.realm)
a_entry = None
b_entry = None
error_message = ''
while (retries > 0 ):
logger.info('Getting ldap service principals for conversion: '
'%s and %s', filter_a, filter_b)
try:
a_entry = b.get_entries(self.suffix, ldap.SCOPE_SUBTREE,
filter=filter_a)
except errors.NotFound:
pass
try:
b_entry = a.get_entries(self.suffix, ldap.SCOPE_SUBTREE,
filter=filter_b)
except errors.NotFound:
pass
if a_entry and b_entry:
logger.debug('Found both principals.')
break
# One or both is missing, force sync again
if not a_entry:
logger.debug('Unable to find entry for %s on %s',
filter_a, str(b))
self.force_sync(a, b.host)
_cn, dn = self.agreement_dn(b.host)
_haserror, error_message = self.wait_for_repl_update(a, dn, 60)
if not b_entry:
logger.debug('Unable to find entry for %s on %s',
filter_b, str(a))
self.force_sync(b, a.host)
_cn, dn = self.agreement_dn(a.host)
_haserror, error_message = self.wait_for_repl_update(b, dn, 60)
retries -= 1
if not a_entry or not b_entry:
error = 'One of the ldap service principals is missing. ' \
'Replication agreement cannot be converted.'
if error_message:
error += '\nReplication error message: %s' % error_message
raise RuntimeError(error)
return (a_entry[0].dn, b_entry[0].dn)
def _add_replica_bind_dn(self, conn, bind_dn):
rep_dn = self.replica_dn()
assert isinstance(rep_dn, DN)
try:
mod = [(ldap.MOD_ADD, "nsds5replicabinddn", bind_dn)]
conn.modify_s(rep_dn, mod)
except ldap.TYPE_OR_VALUE_EXISTS:
pass
def _add_dn_to_replication_managers(self, conn, bind_dn):
try:
mod = [(ldap.MOD_ADD, "member", bind_dn)]
conn.modify_s(self.repl_man_group_dn, mod)
except (ldap.TYPE_OR_VALUE_EXISTS, ldap.NO_SUCH_OBJECT):
pass
def setup_krb_princs_as_replica_binddns(self, a, b):
"""
Search the appropriate principal names so we can get
the correct DNs to store in the replication agreements.
Then modify the replica object to allow these DNs to act
as replication agents.
"""
(a_dn, b_dn) = self.get_replica_principal_dns(a, b, retries=100)
assert isinstance(a_dn, DN)
assert isinstance(b_dn, DN)
for conn, bind_dn in ((a, b_dn), (b, a_dn)):
# Add kerberos principal DNs as valid bindDNs for replication
self._add_replica_bind_dn(conn, bind_dn)
# Add kerberos principal DNs as valid bindDNs to bindDN group
self._add_dn_to_replication_managers(conn, bind_dn)
def gssapi_update_agreements(self, a, b):
self.setup_krb_princs_as_replica_binddns(a, b)
#change replication agreements to connect to other host using GSSAPI
mod = [(ldap.MOD_REPLACE, "nsds5replicatransportinfo", "LDAP"),
(ldap.MOD_REPLACE, "nsds5replicabindmethod", "SASL/GSSAPI"),
(ldap.MOD_DELETE, "nsds5replicabinddn", None),
(ldap.MOD_DELETE, "nsds5replicacredentials", None)]
_cn, a_ag_dn = self.agreement_dn(b.host)
a.modify_s(a_ag_dn, mod)
_cn, b_ag_dn = self.agreement_dn(a.host)
b.modify_s(b_ag_dn, mod)
# Finally remove the temporary replication manager user
try:
a.delete_entry(self.repl_man_dn)
except errors.NotFound:
pass
try:
b.delete_entry(self.repl_man_dn)
except errors.NotFound:
pass
def delete_agreement(self, hostname, dn=None):
"""
Delete a replication agreement.
@hostname: the hostname of the agreement to remove
@dn: optional dn of the agreement to remove
For IPA agreements we can easily calculate the DN of the agreement
to remove. Dogtag agreements are another matter, its agreement
names depend entirely on where it is created. In this case it is
better to pass the DN in directly.
"""
if dn is None:
_cn, dn = self.agreement_dn(hostname)
return self.conn.delete_entry(dn)
def delete_referral(self, hostname):
dn = DN(('cn', self.db_suffix),
('cn', 'mapping tree'), ('cn', 'config'))
# TODO: should we detect proto/port somehow ?
mod = [(ldap.MOD_DELETE, 'nsslapd-referral',
'ldap://%s/%s' % (ipautil.format_netloc(hostname, 389),
self.db_suffix))]
try:
self.conn.modify_s(dn, mod)
except Exception as e:
logger.debug("Failed to remove referral value: %s", str(e))
def check_repl_init(self, conn, agmtdn, start):
done = False
hasError = 0
attrlist = ['cn', 'nsds5BeginReplicaRefresh',
'nsds5replicaUpdateInProgress',
'nsds5ReplicaLastInitStatus',
'nsds5ReplicaLastInitStatusJSON',
'nsds5ReplicaLastInitStart',
'nsds5ReplicaLastInitEnd']
entry = conn.get_entry(agmtdn, attrlist)
if not entry:
print("Error reading status from agreement", agmtdn)
hasError = 1
else:
refresh = entry.single_value.get('nsds5BeginReplicaRefresh')
inprogress = entry.single_value.get('nsds5replicaUpdateInProgress')
status = entry.single_value.get('nsds5ReplicaLastInitStatus')
json_status = \
entry.single_value.get('nsds5ReplicaLastInitStatusJSON')
if not refresh: # done - check status
if json_status:
# Just reset status with the JSON 'message'
status_obj = json.loads(json_status)
status = status_obj['message']
if not status:
print("No status yet")
elif status.find("replica busy") > -1:
print("[%s] reports: Replica Busy! Status: [%s]"
% (conn.ldap_uri, status))
done = True
hasError = 2
elif status.find("Total update succeeded") > -1:
print("\nUpdate succeeded")
done = True
elif inprogress:
print("\nUpdate in progress yet not in progress")
else:
print("\n[%s] reports: Update failed! Status: [%s]"
% (conn.ldap_uri, status))
hasError = 1
done = True
else:
now = datetime.datetime.now()
d = now - start
sys.stdout.write('\r')
sys.stdout.write("Update in progress, %d seconds elapsed" % int(d.total_seconds()))
sys.stdout.flush()
return done, hasError
def check_repl_update(self, conn, agmtdn):
done = False
hasError = 0
error_message = ''
attrlist = ['cn',
'nsds5replicaUpdateInProgress',
'nsds5ReplicaLastUpdateStatus',
'nsds5ReplicaLastUpdateStatusjson',
'nsds5ReplicaLastUpdateStart',
'nsds5ReplicaLastUpdateEnd']
entry = conn.get_entry(agmtdn, attrlist)
if not entry:
print("Error reading status from agreement", agmtdn)
hasError = 1
else:
inprogress = entry.single_value.get('nsds5replicaUpdateInProgress')
status = entry.single_value.get('nsds5ReplicaLastUpdateStatus')
json_status = \
entry.single_value.get('nsds5ReplicaLastUpdateStatusjson')
try:
# nsds5ReplicaLastUpdateStart is either a GMT time
# ending with Z or 0 (see 389-ds ticket 47836)
# Remove the Z and convert to int
start = entry.single_value['nsds5ReplicaLastUpdateStart']
if start.endswith('Z'):
start = start[:-1]
start = int(start)
except (ValueError, TypeError, KeyError):
start = 0
try:
# nsds5ReplicaLastUpdateEnd is either a GMT time
# ending with Z or 0 (see 389-ds ticket 47836)
# Remove the Z and convert to int
end = entry.single_value['nsds5ReplicaLastUpdateEnd']
if end.endswith('Z'):
end = end[:-1]
end = int(end)
except (ValueError, TypeError, KeyError):
end = 0
# incremental update is done if inprogress is false and end >= start
done = inprogress is not None and not inprogress and start <= end
logger.info("Replication Update in progress: %s: status: %s: "
"start: %d: end: %d",
inprogress, status, start, end)
if json_status:
# In 389-ds-base 1.4.1.4 we have the status message available
# to us in a json object
status_obj = json.loads(json_status)
if status_obj['state'] != 'green':
hasError = 1
error_message = status_obj['message']
done = True
elif status: # always check for errors
# status will usually be a number followed by a string
# number != 0 means error
# Since 389-ds-base 1.3.5 it is 'Error (%d) %s'
# so we need to remove a prefix string and parentheses
if status.startswith('Error '):
rc, msg = status[6:].split(' ', 1)
rc = rc.strip('()')
else:
rc, msg = status.split(' ', 1)
if rc != '0':
hasError = 1
error_message = msg
done = True
return done, hasError, error_message
def wait_for_repl_init(self, conn, agmtdn):
done = False
haserror = 0
start = datetime.datetime.now()
while not done and not haserror:
time.sleep(1) # give it a few seconds to get going
done, haserror = self.check_repl_init(conn, agmtdn, start)
print("")
return haserror
def wait_for_repl_update(self, conn, agmtdn, maxtries=600):
done = False
haserror = 0
error_message = ''
while not done and not haserror and maxtries > 0:
time.sleep(1) # give it a few seconds to get going
done, haserror, error_message = self.check_repl_update(conn, agmtdn)
maxtries -= 1
if maxtries == 0: # too many tries
print("Error: timeout: could not determine agreement status: please check your directory server logs for possible errors")
haserror = 1
return haserror, error_message
def start_replication(self, conn, hostname=None, master=None):
print("Starting replication, please wait until this has completed.")
if hostname is None:
hostname = self.hostname
_cn, dn = self.agreement_dn(hostname, master)
mod = [(ldap.MOD_ADD, 'nsds5BeginReplicaRefresh', 'start')]
conn.modify_s(dn, mod)
return self.wait_for_repl_init(conn, dn)
def basic_replication_setup(self, conn, replica_id, repldn, replpw):
assert isinstance(repldn, DN)
if replpw is not None:
self.add_replication_manager(conn, repldn, replpw)
self.replica_config(conn, replica_id, repldn)
self.setup_changelog(conn)
def setup_replication(self, r_hostname, r_port=389, r_sslport=636,
r_binddn=None, r_bindpw=None,
is_cs_replica=False, local_port=None):
assert isinstance(r_binddn, DN)
if local_port is None:
local_port = r_port
# note - there appears to be a bug in python-ldap - it does not
# allow connections using two different CA certs
r_conn = ipaldap.LDAPClient.from_hostname_secure(r_hostname)
if r_bindpw:
r_conn.simple_bind(r_binddn, r_bindpw)
else:
r_conn.gssapi_bind()
#Setup the first half
l_id = self._get_replica_id(self.conn, r_conn)
self.basic_replication_setup(self.conn, l_id,
self.repl_man_dn, self.repl_man_passwd)
# Now setup the other half
r_id = self._get_replica_id(r_conn, r_conn)
self.basic_replication_setup(r_conn, r_id,
self.repl_man_dn, self.repl_man_passwd)
if is_cs_replica:
self.setup_agreement(r_conn, self.hostname, port=local_port,
repl_man_dn=self.repl_man_dn,
repl_man_passwd=self.repl_man_passwd,
master=False)
self.setup_agreement(self.conn, r_hostname, port=r_port,
repl_man_dn=self.repl_man_dn,
repl_man_passwd=self.repl_man_passwd,
master=True)
else:
self.setup_agreement(r_conn, self.hostname, port=local_port,
repl_man_dn=self.repl_man_dn,
repl_man_passwd=self.repl_man_passwd)
self.setup_agreement(self.conn, r_hostname, port=r_port,
repl_man_dn=self.repl_man_dn,
repl_man_passwd=self.repl_man_passwd)
#Finally start replication
ret = self.start_replication(r_conn, master=False)
if ret != 0:
raise RuntimeError("Failed to start replication")
def unhashed_password_log(self, conn, value):
if not conn:
raise RuntimeError("unhashed_password_log no connection")
if not value:
return
# Check the validity of the value
if value.lower() not in ['off', 'on', 'nolog']:
return
# Change the value if needed
entry = conn.get_entry(DN(('cn', 'config')),
['nsslapd-unhashed-pw-switch'])
toggle = entry.single_value.get("nsslapd-unhashed-pw-switch")
if not toggle or not (toggle.lower() == value.lower()):
entry["nsslapd-unhashed-pw-switch"] = value.lower()
conn.update_entry(entry)
# if unhashed password are being logged, display a warning
if value.lower() == 'on':
try:
entry = conn.get_entry(
DN(('cn', 'changelog5'),
('cn', 'config')),
['nsslapd-changelogdir'])
cldb = entry.single_value.get("nsslapd-changelogdir")
logger.warning("This configuration (\"--winsync\") may imply "
"that the log file contains clear text "
"passwords.\n"
"Please ensure that these files can be accessed"
" only by trusted accounts.\n"
"Log files are under %s", cldb)
except errors.NotFound:
logger.warning("This configuration (\"--winsync\") may imply "
"that the log file contains clear text "
"passwords.\n"
"Please ensure that these files can be accessed"
" only by trusted accounts.")
def setup_winsync_replication(self,
ad_dc_name, ad_binddn, ad_pwd,
passsync_pw, ad_subtree,
cacert=paths.IPA_CA_CRT):
self.ad_suffix = ""
try:
# Validate AD connection
ad_conn = ldap_initialize(
'ldap://%s' % ipautil.format_netloc(ad_dc_name),
cacertfile=cacert
)
ad_conn.start_tls_s()
ad_conn.simple_bind_s(str(ad_binddn), ad_pwd)
res = ad_conn.search_s("", ldap.SCOPE_BASE, '(objectClass=*)',
['defaultNamingContext'])
for dn,entry in res:
if dn == "":
ad_suffix = entry['defaultNamingContext'][0]
self.ad_suffix = ad_suffix.decode('utf-8')
logger.info("AD Suffix is: %s", self.ad_suffix)
if self.ad_suffix == "":
raise RuntimeError("Failed to lookup AD's Ldap suffix")
ad_conn.unbind_s()
del ad_conn
except Exception as e:
logger.info("Failed to connect to AD server %s", ad_dc_name)
logger.info("The error was: %s", e)
raise RuntimeError("Failed to setup winsync replication")
# Setup the only half.
# there is no other side to get a replica ID from
# So we generate one locally
replica_id = self._get_replica_id(self.conn, self.conn)
self.basic_replication_setup(self.conn, replica_id,
self.repl_man_dn, self.repl_man_passwd)
#now add a passync user allowed to access the AD server
self.add_passsync_user(self.conn, passsync_pw)
self.setup_agreement(self.conn, ad_dc_name,
repl_man_dn=ad_binddn, repl_man_passwd=ad_pwd,
iswinsync=True, win_subtree=ad_subtree)
logger.info("Added new sync agreement, waiting for it to become "
"ready . . .")
_cn, dn = self.agreement_dn(ad_dc_name)
self.wait_for_repl_update(self.conn, dn, 300)
logger.info("Agreement is ready, starting replication . . .")
# Add winsync replica to the public DIT
dn = DN(('cn',ad_dc_name),('cn','replicas'),('cn','ipa'),('cn','etc'), self.suffix)
entry = self.conn.make_entry(
dn,
objectclass=["nsContainer", "ipaConfigObject"],
cn=[ad_dc_name],
ipaConfigString=["winsync:%s" % self.hostname],
)
try:
self.conn.add_entry(entry)
except Exception:
logger.info("Failed to create public entry for winsync replica")
# For winsync, unhashed passwords needs to be in replication changelog
# Time to alarm about a security risk
self.unhashed_password_log(self.conn, 'on')
#Finally start replication
ret = self.start_replication(self.conn, ad_dc_name)
if ret != 0:
raise RuntimeError("Failed to start replication")
def convert_to_gssapi_replication(self, r_hostname, r_binddn, r_bindpw):
r_conn = ipaldap.LDAPClient.from_hostname_secure(r_hostname)
if r_bindpw:
r_conn.simple_bind(r_binddn, r_bindpw)
else:
r_conn.gssapi_bind()
# First off make sure servers are in sync so that both KDCs
# have all principals and their passwords and can release
# the right tickets. We do this by force pushing all our changes
self.force_sync(self.conn, r_hostname)
_cn, dn = self.agreement_dn(r_hostname)
self.wait_for_repl_update(self.conn, dn, 300)
# now in the opposite direction
self.force_sync(r_conn, self.hostname)
_cn, dn = self.agreement_dn(self.hostname)
self.wait_for_repl_update(r_conn, dn, 300)
# now that directories are in sync,
# change the agreements to use GSSAPI
self.gssapi_update_agreements(self.conn, r_conn)
def setup_gssapi_replication(self, r_hostname, r_binddn=None, r_bindpw=None):
"""
Directly sets up GSSAPI replication.
Only usable to connect 2 existing replicas (needs existing kerberos
principals)
"""
r_conn = ipaldap.LDAPClient.from_hostname_secure(r_hostname)
if r_bindpw:
r_conn.simple_bind(r_binddn, r_bindpw)
else:
r_conn.gssapi_bind()
# Allow krb principals to act as replicas
self.setup_krb_princs_as_replica_binddns(self.conn, r_conn)
# Create mutual replication agreementsausiung SASL/GSSAPI
self.setup_agreement(self.conn, r_hostname, isgssapi=True)
self.setup_agreement(r_conn, self.hostname, isgssapi=True)
def initialize_replication(self, dn, conn):
mod = [(ldap.MOD_ADD, 'nsds5BeginReplicaRefresh', 'start'),
(ldap.MOD_REPLACE, 'nsds5ReplicaEnabled', 'on')]
try:
conn.modify_s(dn, mod)
except ldap.ALREADY_EXISTS:
return
def force_sync(self, conn, hostname):
newschedule = '2358-2359 0'
filter = self.get_agreement_filter(host=hostname)
try:
entries = conn.get_entries(
DN(('cn', 'config')), ldap.SCOPE_SUBTREE, filter)
except errors.NotFound:
logger.error("Unable to find replication agreement for %s",
hostname)
raise RuntimeError("Unable to proceed")
if len(entries) > 1:
logger.error("Found multiple agreements for %s", hostname)
logger.error("Using the first one only (%s)", entries[0].dn)
dn = entries[0].dn
schedule = entries[0].single_value.get('nsds5replicaupdateschedule')
# On the remote chance of a match. We force a synch to happen right
# now by setting the schedule to something and quickly removing it.
if schedule is not None:
if newschedule == schedule:
newschedule = '2358-2359 1'
logger.info("Setting agreement %s schedule to %s to force synch",
dn, newschedule)
mod = [(ldap.MOD_REPLACE, 'nsDS5ReplicaUpdateSchedule', [ newschedule ])]
conn.modify_s(dn, mod)
time.sleep(1)
logger.info("Deleting schedule %s from agreement %s",
newschedule, dn)
mod = [(ldap.MOD_DELETE, 'nsDS5ReplicaUpdateSchedule', None)]
conn.modify_s(dn, mod)
def get_agreement_type(self, hostname):
entry = self.get_replication_agreement(hostname)
if not entry:
raise errors.NotFound(
reason="Replication agreement for %s not found" % hostname)
objectclass = entry.get("objectclass")
for o in objectclass:
if o.lower() == "nsdswindowsreplicationagreement":
return WINSYNC
return IPA_REPLICA
def replica_cleanup(self, replica, realm, force=False):
"""
This function removes information about the replica in parts
of the shared tree that expose it, so clients stop trying to
use this replica.
"""
err = None
if replica == self.hostname:
raise RuntimeError("Can't cleanup self")
# delete master kerberos key and all its svc principals
try:
entries = self.conn.get_entries(
self.suffix, ldap.SCOPE_SUBTREE,
filter='(krbprincipalname=*/%s@%s)' % (replica, realm))
if entries:
entries.sort(key=lambda x: len(x.dn), reverse=True)
for entry in entries:
self.conn.delete_entry(entry)
except errors.NotFound:
pass
except Exception as e:
if not force:
raise e
else:
err = e
# remove replica memberPrincipal from s4u2proxy configuration
dn1 = DN(('cn', 'ipa-http-delegation'), api.env.container_s4u2proxy, self.suffix)
member_principal1 = "HTTP/%(fqdn)s@%(realm)s" % dict(fqdn=replica, realm=realm)
dn2 = DN(('cn', 'ipa-ldap-delegation-targets'), api.env.container_s4u2proxy, self.suffix)
member_principal2 = "ldap/%(fqdn)s@%(realm)s" % dict(fqdn=replica, realm=realm)
dn3 = DN(('cn', 'ipa-cifs-delegation-targets'), api.env.container_s4u2proxy, self.suffix)
member_principal3 = "cifs/%(fqdn)s@%(realm)s" % dict(fqdn=replica, realm=realm)
for (dn, member_principal) in ((dn1, member_principal1),
(dn2, member_principal2),
(dn3, member_principal3)):
try:
mod = [(ldap.MOD_DELETE, 'memberPrincipal', member_principal)]
self.conn.modify_s(dn, mod)
except (ldap.NO_SUCH_OBJECT, ldap.NO_SUCH_ATTRIBUTE):
logger.debug("Replica (%s) memberPrincipal (%s) not found in "
"%s",
replica, member_principal, dn)
except Exception as e:
if not force:
raise e
elif not err:
err = e
# delete master entry with all active services
try:
dn = DN(('cn', replica), api.env.container_masters, self.suffix)
entries = self.conn.get_entries(dn, ldap.SCOPE_SUBTREE)
if entries:
entries.sort(key=lambda x: len(x.dn), reverse=True)
for entry in entries:
self.conn.delete_entry(entry)
except errors.NotFound:
pass
except Exception as e:
if not force:
raise e
elif not err:
err = e
try:
basedn = DN(('cn', 'etc'), self.suffix)
filter = '(dnaHostname=%s)' % replica
entries = self.conn.get_entries(
basedn, ldap.SCOPE_SUBTREE, filter=filter)
if len(entries) != 0:
for entry in entries:
self.conn.delete_entry(entry)
except errors.NotFound:
pass
except Exception as e:
if not force:
raise e
elif not err:
err = e
# delete DNS server configuration, if any
try:
api.Command.dnsserver_del(unicode(replica))
except errors.NotFound:
pass
except Exception as e:
if not force:
raise e
elif not err:
err = e
try:
dn = DN(('cn', 'default'), ('ou', 'profile'), self.suffix)
ret = self.conn.get_entry(dn)
srvlist = ret.single_value.get('defaultServerList', '')
srvlist = srvlist.split()
if replica in srvlist:
srvlist.remove(replica)
attr = ' '.join(srvlist)
ret['defaultServerList'] = attr
self.conn.update_entry(ret)
except errors.NotFound:
pass
except errors.MidairCollision:
pass
except errors.EmptyModlist:
pass
except Exception as e:
if force and err:
raise err
else:
raise e
if err:
raise err
def set_readonly(self, readonly, critical=False):
"""
Set the database readonly status.
@readonly: boolean for read-only status
@critical: boolean to raise an exception on failure, default False.
"""
dn = DN(('cn', 'userRoot'), ('cn', 'ldbm database'),
('cn', 'plugins'), ('cn', 'config'))
mod = [(ldap.MOD_REPLACE, 'nsslapd-readonly', 'on' if readonly else 'off')]
try:
self.conn.modify_s(dn, mod)
except ldap.INSUFFICIENT_ACCESS as e:
# We can't modify the read-only status on the remote server.
# This usually isn't a show-stopper.
if critical:
raise e
logger.debug("No permission to modify replica read-only status, "
"continuing anyway")
def cleanallruv(self, replicaId):
"""
Create a CLEANALLRUV task and monitor it until it has
completed.
"""
logger.debug("Creating CLEANALLRUV task for replica id %d", replicaId)
dn = DN(('cn', 'clean %d' % replicaId), ('cn', 'cleanallruv'),('cn', 'tasks'), ('cn', 'config'))
e = self.conn.make_entry(
dn,
{
'objectclass': ['top', 'extensibleObject'],
'cn': ['clean %d' % replicaId],
'replica-base-dn': [self.db_suffix],
'replica-id': [replicaId],
'replica-force-cleaning': ['yes'],
}
)
try:
self.conn.add_entry(e)
except errors.DuplicateEntry:
print("CLEANALLRUV task for replica id %d already exists." % replicaId)
else:
print("Background task created to clean replication data. This may take a while.")
print("This may be safely interrupted with Ctrl+C")
wait_for_task(self.conn, dn)
def abortcleanallruv(self, replicaId, force=False):
"""
Create a task to abort a CLEANALLRUV operation.
"""
logger.debug("Creating task to abort a CLEANALLRUV operation for "
"replica id %d", replicaId)
dn = DN(('cn', 'abort %d' % replicaId), ('cn', 'abort cleanallruv'),('cn', 'tasks'), ('cn', 'config'))
e = self.conn.make_entry(
dn,
{
'replica-base-dn': [api.env.basedn],
'replica-id': [replicaId],
'objectclass': ['top', 'extensibleObject'],
'cn': ['abort %d' % replicaId],
'replica-certify-all': ['no'] if force else ['yes'],
}
)
try:
self.conn.add_entry(e)
except errors.DuplicateEntry:
print("An abort CLEANALLRUV task for replica id %d already exists." % replicaId)
else:
print("Background task created. This may take a while.")
print("This may be safely interrupted with Ctrl+C")
wait_for_task(self.conn, dn)
def get_DNA_range(self, hostname):
"""
Return the DNA range on this server as a tuple, (next, max), or
(None, None) if no range has been assigned yet.
Raises an exception on errors reading an entry.
"""
entry = self.conn.get_entry(DNA_DN)
nextvalue = int(entry.single_value.get("dnaNextValue", 0))
maxvalue = int(entry.single_value.get("dnaMaxValue", 0))
sharedcfgdn = entry.single_value.get("dnaSharedCfgDN")
if sharedcfgdn is not None:
sharedcfgdn = DN(sharedcfgdn)
shared_entry = self.conn.get_entry(sharedcfgdn)
remaining = int(shared_entry.single_value.get("dnaRemainingValues", 0))
else:
remaining = 0
if nextvalue == 0 and maxvalue == 0:
return (None, None)
# Check the magic values for an unconfigured DNA entry
if maxvalue == 1100 and nextvalue == 1101 and remaining == 0:
return (None, None)
else:
return (nextvalue, maxvalue)
def get_DNA_next_range(self, hostname):
"""
Return the DNA "on-deck" range on this server as a tuple, (next, max),
or
(None, None) if no range has been assigned yet.
Raises an exception on errors reading an entry.
"""
entry = self.conn.get_entry(DNA_DN)
range = entry.single_value.get("dnaNextRange")
if range is None:
return (None, None)
try:
(next, max) = range.split('-')
except ValueError:
# Should not happen, malformed entry, return nothing.
return (None, None)
return (int(next), int(max))
def save_DNA_next_range(self, next_start, next_max):
"""
Save a DNA range into the on-deck value.
This adds a dnaNextRange value to the DNA configuration. This
attribute takes the form of start-next.
Returns True on success.
Returns False if the range is already defined.
Raises an exception on failure.
"""
entry = self.conn.get_entry(DNA_DN)
range = entry.single_value.get("dnaNextRange")
if range is not None and next_start != 0 and next_max != 0:
return False
if next_start == 0 and next_max == 0:
entry["dnaNextRange"] = None
else:
entry["dnaNextRange"] = "%s-%s" % (next_start, next_max)
self.conn.update_entry(entry)
return True
def save_DNA_range(self, next_start, next_max):
"""
Save a DNA range.
This is potentially very dangerous.
Returns True on success. Raises an exception on failure.
"""
entry = self.conn.get_entry(DNA_DN)
entry["dnaNextValue"] = next_start
entry["dnaMaxValue"] = next_max
self.conn.update_entry(entry)
return True
def disable_agreement(self, hostname):
"""
Disable the replication agreement to hostname.
"""
entry = self.get_replication_agreement(hostname)
if not entry:
raise errors.NotFound(reason=_(
"Replication agreement for %(hostname)s not found") % {
'hostname': hostname
})
entry['nsds5ReplicaEnabled'] = 'off'
try:
self.conn.update_entry(entry)
except errors.EmptyModlist:
pass
def enable_agreement(self, hostname):
"""
Enable the replication agreement to hostname.
Note: for replication to work it needs to be enabled both ways.
"""
entry = self.get_replication_agreement(hostname)
if not entry:
raise errors.NotFound(reason=_(
"Replication agreement for %(hostname)s not found") % {
'hostname': hostname
})
entry['nsds5ReplicaEnabled'] = 'on'
try:
self.conn.update_entry(entry)
except errors.EmptyModlist:
pass
def _add_replication_managers(self, conn):
entry = conn.make_entry(
self.repl_man_group_dn,
objectclass=['top', 'groupofnames'],
cn=['replication managers']
)
try:
conn.add_entry(entry)
except errors.DuplicateEntry:
pass
def ensure_replication_managers(self, conn, r_hostname):
"""
Ensure that the 'cn=replication managers,cn=sysaccounts' group exists
and contains the principals for master and remote replica
On IPA 3.x masters lacking support for nsds5ReplicaBinddnGroup
attribute, add replica bind DN directly into the replica entry.
"""
my_dn = DN(
('krbprincipalname', u'ldap/%s@%s' % (self.hostname, self.realm)),
api.env.container_service,
api.env.basedn
)
remote_dn = DN(
('krbprincipalname', u'ldap/%s@%s' % (r_hostname, self.realm)),
api.env.container_service,
api.env.basedn
)
try:
conn.get_entry(self.repl_man_group_dn)
except errors.NotFound:
self._add_replica_bind_dn(conn, my_dn)
self._add_replication_managers(conn)
self._add_dn_to_replication_managers(conn, my_dn)
self._add_dn_to_replication_managers(conn, remote_dn)
def add_temp_sasl_mapping(self, conn, r_hostname):
"""
Create a special user to let SASL Mapping find a valid user
on first replication.
"""
name = 'ldap/%s@%s' % (r_hostname, self.realm)
replica_binddn = DN(('cn', name), ('cn', 'config'))
entry = conn.make_entry(
replica_binddn,
objectclass=["top", "person"],
cn=[name],
sn=["replication manager pseudo user"]
)
conn.add_entry(entry)
entry = conn.get_entry(self.replica_dn())
entry['nsDS5ReplicaBindDN'].append(replica_binddn)
try:
conn.update_entry(entry)
except errors.EmptyModlist:
pass
entry = conn.make_entry(
DN(('cn', 'Peer Master'), ('cn', 'mapping'), ('cn', 'sasl'),
('cn', 'config')),
objectclass=["top", "nsSaslMapping"],
cn=["Peer Master"],
nsSaslMapRegexString=['^[^:@]+$'],
nsSaslMapBaseDNTemplate=[DN(('cn', 'config'))],
nsSaslMapFilterTemplate=['(cn=&@%s)' % self.realm],
nsSaslMapPriority=['1'],
)
try:
conn.add_entry(entry)
except errors.DuplicateEntry:
pass
def remove_temp_replication_user(self, conn, r_hostname):
"""
Remove the special SASL Mapping user created in a previous step.
"""
name = 'ldap/%s@%s' % (r_hostname, self.realm)
replica_binddn = DN(('cn', name), ('cn', 'config'))
conn.delete_entry(replica_binddn)
entry = conn.get_entry(self.replica_dn())
while replica_binddn in entry['nsDS5ReplicaBindDN']:
entry['nsDS5ReplicaBindDN'].remove(replica_binddn)
conn.update_entry(entry)
def setup_promote_replication(self, r_hostname, r_binddn=None,
r_bindpw=None, cacert=paths.IPA_CA_CRT):
r_conn = ipaldap.LDAPClient.from_hostname_secure(
r_hostname, cacert=cacert)
if r_bindpw:
r_conn.simple_bind(r_binddn, r_bindpw)
else:
r_conn.gssapi_bind()
# Setup the first half
l_id = self._get_replica_id(self.conn, r_conn)
self.basic_replication_setup(self.conn, l_id, self.repl_man_dn, None)
self.add_temp_sasl_mapping(self.conn, r_hostname)
# Now setup the other half
r_id = self._get_replica_id(r_conn, r_conn)
self.basic_replication_setup(r_conn, r_id, self.repl_man_dn, None)
self.ensure_replication_managers(r_conn, r_hostname)
self.setup_agreement(r_conn, self.hostname, isgssapi=True)
self.setup_agreement(self.conn, r_hostname, isgssapi=True)
# Finally start replication
ret = self.start_replication(r_conn, master=False)
if ret != 0:
raise RuntimeError("Failed to start replication")
self.remove_temp_replication_user(self.conn, r_hostname)
class CSReplicationManager(ReplicationManager):
"""ReplicationManager specific to CA agreements
Note that in most cases we don't know if we're connecting to an old-style
separate PKI DS, or to a host with a merged DB.
Use the get_cs_replication_manager function to determine this and return
an appropriate CSReplicationManager.
"""
def __init__(self, realm, hostname, dirman_passwd, port):
super(CSReplicationManager, self).__init__(
realm, hostname, dirman_passwd, port, starttls=True)
self.db_suffix = DN(('o', 'ipaca'))
self.hostnames = [] # set before calling or agreement_dn() will fail
def agreement_dn(self, hostname, master=None):
"""
Construct a dogtag replication agreement name. This needs to be much
more agressive than the IPA replication agreements because the name
is different on each side.
hostname is the local hostname, not the remote one, for both sides
NOTE: The agreement number is hardcoded in dogtag as well
TODO: configurable instance name
"""
dn = None
cn = None
if self.conn.port == 7389:
instance_name = 'pki-ca'
else:
instance_name = 'pki-tomcat'
# if master is not None we know what dn to return:
if master is not None:
if master is True:
name = "master"
else:
name = "clone"
cn="%sAgreement1-%s-%s" % (name, hostname, instance_name)
dn = DN(('cn', cn), self.replica_dn())
return (cn, dn)
for host in self.hostnames:
for master in ["master", "clone"]:
try:
cn="%sAgreement1-%s-%s" % (master, host, instance_name)
dn = DN(('cn', cn), self.replica_dn())
self.conn.get_entry(dn)
return (cn, dn)
except errors.NotFound:
dn = None
cn = None
raise errors.NotFound(reason='No agreement found for %s' % hostname)
def delete_referral(self, hostname, port):
dn = DN(('cn', self.db_suffix),
('cn', 'mapping tree'), ('cn', 'config'))
entry = self.conn.get_entry(dn)
try:
# TODO: should we detect proto somehow ?
entry['nsslapd-referral'].remove(
'ldap://%s/%s' %
(ipautil.format_netloc(hostname, port), self.db_suffix))
self.conn.update_entry(entry)
except Exception as e:
logger.debug("Failed to remove referral value: %s", e)
def has_ipaca(self):
try:
self.conn.get_entry(self.db_suffix)
except errors.NotFound:
return False
else:
return True
def get_cs_replication_manager(realm, host, dirman_passwd):
"""Get a CSReplicationManager for a remote host
Detects if the host has a merged database, connects to appropriate port.
"""
# Try merged database port first. If it has the ipaca tree, return
# corresponding replication manager
# If we can't connect to it at all, we're not dealing with an IPA master
# anyway; let the exception propagate up
# Fall back to the old PKI-only DS port. Check that it has the ipaca tree
# (IPA with merged DB theoretically leaves port 7389 free for anyone).
# If it doesn't, raise exception.
ports = [389, 7389]
for port in ports:
logger.debug('Looking for PKI DS on %s:%s', host, port)
replication_manager = CSReplicationManager(
realm, host, dirman_passwd, port)
if replication_manager.has_ipaca():
logger.debug('PKI DS found on %s:%s', host, port)
return replication_manager
else:
logger.debug('PKI tree not found on %s:%s', host, port)
raise errors.NotFound(reason='Cannot reach PKI DS at %s on ports %s' % (host, ports))
class CAReplicationManager(ReplicationManager):
"""ReplicationManager specific to CA agreements for domain level 1 and
above servers.
"""
def __init__(self, realm, hostname):
# Always connect to self over ldapi
conn = ipaldap.LDAPClient.from_realm(realm)
conn.external_bind()
super(CAReplicationManager, self).__init__(
realm, hostname, None, port=DEFAULT_PORT, conn=conn)
self.db_suffix = DN(('o', 'ipaca'))
self.agreement_name_format = "caTo%s"
def setup_cs_replication(self, r_hostname):
"""
Assumes a promote replica with working GSSAPI for replication
and unified DS instance.
"""
r_conn = ipaldap.LDAPClient.from_hostname_secure(r_hostname)
r_conn.gssapi_bind()
# Setup the first half
l_id = self._get_replica_id(self.conn, r_conn)
self.basic_replication_setup(self.conn, l_id, self.repl_man_dn, None)
# Now setup the other half
r_id = self._get_replica_id(r_conn, r_conn)
self.basic_replication_setup(r_conn, r_id, self.repl_man_dn, None)
self.setup_agreement(r_conn, self.hostname, isgssapi=True)
self.setup_agreement(self.conn, r_hostname, isgssapi=True)
# Finally start replication
ret = self.start_replication(r_conn, master=False)
if ret != 0:
raise RuntimeError("Failed to start replication")
def run_server_del_as_cli(api_instance, hostname, **options):
"""
run server_del API command and print the result to stdout/stderr using
textui backend.
:params api_instance: API instance
:params hostname: server FQDN
:params options: options for server_del command
"""
server_del_cmd = api_instance.Command.server_del
if 'version' not in options:
options['version'] = api_instance.env.api_version
result = server_del_cmd(hostname, **options)
textui_backend = textui(api_instance)
server_del_cmd.output_for_cli(textui_backend, result, hostname, **options)
| 79,875
|
Python
|
.py
| 1,797
| 32.531998
| 187
| 0.573934
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,759
|
ipa_cert_fix.py
|
freeipa_freeipa/ipaserver/install/ipa_cert_fix.py
|
#
# Copyright (C) 2019 FreeIPA Contributors see COPYING for license
#
# ipa-cert-fix performs the following steps:
#
# 1. Confirm running as root (AdminTool.validate_options does this)
#
# 2. Confirm that DS is up.
#
# 3. Determine which of following certs (if any) need renewing
# - IPA RA
# - Apache HTTPS
# - 389 LDAPS
# - Kerberos KDC (PKINIT)
#
# 4. Execute `pki-server cert-fix` with relevant options,
# including `--extra-cert SERIAL` for each cert from #3.
#
# 5. Print details of renewed certificates.
#
# 6. Install renewed certs from #3 in relevant places
#
# 7. ipactl restart
from __future__ import print_function, absolute_import
import base64
from cryptography import x509 as crypto_x509
from cryptography.hazmat.backends import default_backend
import datetime
from enum import Enum
import logging
import shutil
from ipalib import api
from ipalib import x509
from ipalib.facts import is_ipa_configured
from ipalib.install import certmonger
from ipaplatform.paths import paths
from ipapython.admintool import AdminTool
from ipapython.certdb import NSSDatabase, EMPTY_TRUST_FLAGS
from ipapython.dn import DN
from ipapython.ipaldap import realm_to_serverid
from ipaserver.install import ca, cainstance, dsinstance
from ipaserver.install.certs import is_ipa_issued_cert
from ipapython import directivesetter
from ipapython import ipautil
msg = """
WARNING
ipa-cert-fix is intended for recovery when expired certificates
prevent the normal operation of IPA. It should ONLY be used
in such scenarios, and backup of the system, especially certificates
and keys, is STRONGLY RECOMMENDED.
"""
renewal_note = """
Note: Monitor the certmonger-initiated renewal of
certificates after ipa-cert-fix and wait for its completion before
any other administrative task.
"""
RENEWED_CERT_PATH_TEMPLATE = "/etc/pki/pki-tomcat/certs/{}-renewed.crt"
logger = logging.getLogger(__name__)
cert_nicknames = {
'sslserver': 'Server-Cert cert-pki-ca',
'subsystem': 'subsystemCert cert-pki-ca',
'ca_ocsp_signing': 'ocspSigningCert cert-pki-ca',
'ca_audit_signing': 'auditSigningCert cert-pki-ca',
'kra_transport': 'transportCert cert-pki-kra',
'kra_storage': 'storageCert cert-pki-kra',
'kra_audit_signing': 'auditSigningCert cert-pki-kra',
}
class IPACertType(Enum):
IPARA = "IPA RA"
HTTPS = "Apache HTTPS"
LDAPS = "LDAP"
KDC = "KDC"
class IPACertFix(AdminTool):
command_name = "ipa-cert-fix"
usage = "%prog"
description = "Renew expired certificates."
def validate_options(self):
super(IPACertFix, self).validate_options(needs_root=True)
def run(self):
if not is_ipa_configured():
print("IPA is not configured.")
return 2
if not cainstance.is_ca_installed_locally():
print("CA is not installed on this server.")
return 1
try:
ipautil.run(['pki-server', 'cert-fix', '--help'], raiseonerr=True)
except ipautil.CalledProcessError:
print(
"The 'pki-server cert-fix' command is not available; "
"cannot proceed."
)
return 1
api.bootstrap(in_server=True, confdir=paths.ETC_IPA)
api.finalize()
if not dsinstance.is_ds_running(realm_to_serverid(api.env.realm)):
print(
"The LDAP server is not running; cannot proceed."
)
return 1
api.Backend.ldap2.connect() # ensure DS is up
subject_base = dsinstance.DsInstance().find_subject_base()
if not subject_base:
raise RuntimeError("Cannot determine certificate subject base.")
ca_subject_dn = ca.lookup_ca_subject(api, subject_base)
now = (
datetime.datetime.now(tz=datetime.timezone.utc)
+ datetime.timedelta(weeks=2))
certs, extra_certs, non_renewed = expired_certs(now)
if not certs and not extra_certs:
print("Nothing to do.")
return 0
print(msg)
print_intentions(certs, extra_certs, non_renewed)
response = ipautil.user_input('Enter "yes" to proceed')
if response.lower() != 'yes':
print("Not proceeding.")
return 0
print("Proceeding.")
try:
fix_certreq_directives(certs)
run_cert_fix(certs, extra_certs)
except ipautil.CalledProcessError:
if any(
x[0] is IPACertType.LDAPS
for x in extra_certs + non_renewed
):
# The DS cert was expired. This will cause 'pki-server
# cert-fix' to fail at the final restart, and return nonzero.
# So this exception *might* be OK to ignore.
#
# If 'pki-server cert-fix' has written new certificates
# corresponding to all the extra_certs, then ignore the
# CalledProcessError and proceed to installing the IPA-specific
# certs. Otherwise re-raise.
if check_renewed_ipa_certs(extra_certs):
pass
else:
raise
else:
raise # otherwise re-raise
replicate_dogtag_certs(subject_base, ca_subject_dn, certs)
install_ipa_certs(subject_base, ca_subject_dn, extra_certs)
if any(x[0] != 'sslserver' for x in certs) \
or any(x[0] is IPACertType.IPARA for x in extra_certs):
# we renewed a "shared" certificate, therefore we must
# become the renewal master
print("Becoming renewal master.")
cainstance.CAInstance().set_renewal_master()
print("Restarting IPA")
ipautil.run(['ipactl', 'restart'], raiseonerr=True)
print(renewal_note)
return 0
def expired_certs(now):
expired_ipa, non_renew_ipa = expired_ipa_certs(now)
return expired_dogtag_certs(now), expired_ipa, non_renew_ipa
def expired_dogtag_certs(now):
"""
Determine which Dogtag certs are expired, or close to expiry.
Return a list of (cert_id, cert) pairs.
"""
certs = []
db = NSSDatabase(nssdir=paths.PKI_TOMCAT_ALIAS_DIR)
for certid, nickname in cert_nicknames.items():
try:
cert = db.get_cert(nickname)
except RuntimeError:
pass # unfortunately certdb doesn't give us a better exception
else:
if cert.not_valid_after_utc <= now:
certs.append((certid, cert))
return certs
def expired_ipa_certs(now):
"""
Determine which IPA certs are expired, or close to expiry.
Return a list of (IPACertType, cert) pairs.
"""
certs = []
non_renewed = []
# IPA RA
cert = x509.load_certificate_from_file(paths.RA_AGENT_PEM)
if cert.not_valid_after_utc <= now:
certs.append((IPACertType.IPARA, cert))
# Apache HTTPD
cert = x509.load_certificate_from_file(paths.HTTPD_CERT_FILE)
if cert.not_valid_after_utc <= now:
if not is_ipa_issued_cert(api, cert):
non_renewed.append((IPACertType.HTTPS, cert))
else:
certs.append((IPACertType.HTTPS, cert))
# LDAPS
serverid = realm_to_serverid(api.env.realm)
ds = dsinstance.DsInstance(realm_name=api.env.realm)
ds_dbdir = dsinstance.config_dirname(serverid)
ds_nickname = ds.get_server_cert_nickname(serverid)
db = NSSDatabase(nssdir=ds_dbdir)
cert = db.get_cert(ds_nickname)
if cert.not_valid_after_utc <= now:
if not is_ipa_issued_cert(api, cert):
non_renewed.append((IPACertType.LDAPS, cert))
else:
certs.append((IPACertType.LDAPS, cert))
# KDC
cert = x509.load_certificate_from_file(paths.KDC_CERT)
if cert.not_valid_after_utc <= now:
if not is_ipa_issued_cert(api, cert):
non_renewed.append((IPACertType.HTTPS, cert))
else:
certs.append((IPACertType.KDC, cert))
return certs, non_renewed
def print_intentions(dogtag_certs, ipa_certs, non_renewed):
print("The following certificates will be renewed:")
print()
for certid, cert in dogtag_certs:
print_cert_info("Dogtag", certid, cert)
for certtype, cert in ipa_certs:
print_cert_info("IPA", certtype.value, cert)
if non_renewed:
print(
"The following certificates will NOT be renewed because "
"they were not issued by the IPA CA:"
)
print()
for certtype, cert in non_renewed:
print_cert_info("IPA", certtype.value, cert)
def print_cert_info(context, desc, cert):
print("{} {} certificate:".format(context, desc))
print(" Subject: {}".format(DN(cert.subject)))
print(" Serial: {}".format(cert.serial_number))
print(" Expires: {}".format(cert.not_valid_after_utc))
print()
def get_csr_from_certmonger(nickname):
"""
Get the csr for the provided nickname by asking certmonger.
Returns the csr in ASCII format without the header/footer in a single line
or None if not found.
"""
criteria = {
'cert-database': paths.PKI_TOMCAT_ALIAS_DIR,
'cert-nickname': nickname,
}
id = certmonger.get_request_id(criteria)
if id:
csr = certmonger.get_request_value(id, "csr")
if csr:
try:
# Make sure the value can be parsed as valid CSR
csr_obj = crypto_x509.load_pem_x509_csr(
csr.encode('ascii'), default_backend())
val = base64.b64encode(csr_obj.public_bytes(x509.Encoding.DER))
return val.decode('ascii')
except Exception as e:
# Fallthrough and return None
logger.debug("Unable to get CSR from certmonger: %s", e)
return None
def fix_certreq_directives(certs):
"""
For all the certs to be fixed, ensure that the corresponding CSR is found
in PKI config file, or try to get the CSR from certmonger.
"""
directives = {
'auditSigningCert cert-pki-ca': ('ca.audit_signing.certreq',
paths.CA_CS_CFG_PATH),
'ocspSigningCert cert-pki-ca': ('ca.ocsp_signing.certreq',
paths.CA_CS_CFG_PATH),
'subsystemCert cert-pki-ca': ('ca.subsystem.certreq',
paths.CA_CS_CFG_PATH),
'Server-Cert cert-pki-ca': ('ca.sslserver.certreq',
paths.CA_CS_CFG_PATH),
'auditSigningCert cert-pki-kra': ('kra.audit_signing.certreq',
paths.KRA_CS_CFG_PATH),
'storageCert cert-pki-kra': ('kra.storage.certreq',
paths.KRA_CS_CFG_PATH),
'transportCert cert-pki-kra': ('kra.transport.certreq',
paths.KRA_CS_CFG_PATH),
}
# pki-server cert-fix needs to find the CSR in the subsystem config file
# otherwise it will fail
# For each cert to be fixed, check that the CSR is present or
# get it from certmonger
for (certid, _cert) in certs:
# Check if the directive is set in the config file
nickname = cert_nicknames[certid]
(directive, cfg_path) = directives[nickname]
if directivesetter.get_directive(cfg_path, directive, '=') is None:
# The CSR is missing, try to get it from certmonger
csr = get_csr_from_certmonger(nickname)
if csr:
# Update the directive
directivesetter.set_directive(cfg_path, directive, csr,
quotes=False, separator='=')
def run_cert_fix(certs, extra_certs):
ldapi_path = (
paths.SLAPD_INSTANCE_SOCKET_TEMPLATE
% '-'.join(api.env.realm.split('.'))
)
cmd = [
'pki-server',
'cert-fix',
'--ldapi-socket', ldapi_path,
'--agent-uid', 'ipara',
]
for certid, _cert in certs:
cmd.extend(['--cert', certid])
for _certtype, cert in extra_certs:
cmd.extend(['--extra-cert', str(cert.serial_number)])
ipautil.run(cmd, raiseonerr=True)
def replicate_dogtag_certs(subject_base, ca_subject_dn, certs):
for certid, _oldcert in certs:
cert_path = "/etc/pki/pki-tomcat/certs/{}.crt".format(certid)
cert = x509.load_certificate_from_file(cert_path)
print_cert_info("Renewed Dogtag", certid, cert)
replicate_cert(subject_base, ca_subject_dn, cert)
def check_renewed_ipa_certs(certs):
"""
Check whether all expected IPA-specific certs (extra_certs) were renewed
successfully.
For now this subroutine just checks that the files that we expect
``pki-server cert-fix`` to have written do exist and contain an X.509
certificate.
Return ``True`` if everything seems to be as expected, otherwise ``False``.
"""
for _certtype, oldcert in certs:
cert_path = RENEWED_CERT_PATH_TEMPLATE.format(oldcert.serial_number)
try:
x509.load_certificate_from_file(cert_path)
except (IOError, ValueError):
return False
return True
def install_ipa_certs(subject_base, ca_subject_dn, certs):
"""Print details and install renewed IPA certificates."""
for certtype, oldcert in certs:
cert_path = RENEWED_CERT_PATH_TEMPLATE.format(oldcert.serial_number)
cert = x509.load_certificate_from_file(cert_path)
print_cert_info("Renewed IPA", certtype.value, cert)
if certtype is IPACertType.IPARA:
shutil.copyfile(cert_path, paths.RA_AGENT_PEM)
cainstance.update_people_entry(cert)
replicate_cert(subject_base, ca_subject_dn, cert)
elif certtype is IPACertType.HTTPS:
shutil.copyfile(cert_path, paths.HTTPD_CERT_FILE)
elif certtype is IPACertType.LDAPS:
serverid = realm_to_serverid(api.env.realm)
ds = dsinstance.DsInstance(realm_name=api.env.realm)
ds_dbdir = dsinstance.config_dirname(serverid)
db = NSSDatabase(nssdir=ds_dbdir)
ds_nickname = ds.get_server_cert_nickname(serverid)
db.delete_cert(ds_nickname)
db.import_pem_cert(ds_nickname, EMPTY_TRUST_FLAGS, cert_path)
elif certtype is IPACertType.KDC:
shutil.copyfile(cert_path, paths.KDC_CERT)
def replicate_cert(subject_base, ca_subject_dn, cert):
nickname = cainstance.get_ca_renewal_nickname(
subject_base, ca_subject_dn, DN(cert.subject))
if nickname:
cainstance.update_ca_renewal_entry(api.Backend.ldap2, nickname, cert)
| 14,787
|
Python
|
.py
| 356
| 33.002809
| 79
| 0.635889
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,760
|
adtrust.py
|
freeipa_freeipa/ipaserver/install/adtrust.py
|
#
# Copyright (C) 2017 FreeIPA Contributors see COPYING for license
#
"""
AD trust installer module
"""
from __future__ import print_function, absolute_import
import logging
import os
import six
from ipalib.constants import MIN_DOMAIN_LEVEL
from ipalib import create_api, rpc
from ipalib import errors
from ipalib.install.service import ServiceAdminInstallInterface
from ipalib.install.service import replica_install_only
from ipaplatform.paths import paths
from ipapython.admintool import ScriptError
from ipapython import ipaldap, ipautil
from ipapython.dn import DN
from ipapython.install.core import group, knob
from ipaserver.install import installutils
from ipaserver.install import adtrustinstance
from ipaserver.install import service
from ipaserver.install.plugins.adtrust import update_host_cifs_keytabs
from ipaserver.install.bindinstance import dns_zone_exists
from ipaserver.dns_data_management import IPASystemRecords
if six.PY3:
unicode = str
logger = logging.getLogger(__name__)
netbios_name = None
reset_netbios_name = False
DEFAULT_PRIMARY_RID_BASE = 1000
DEFAULT_SECONDARY_RID_BASE = 100000000
def netbios_name_error(name):
logger.error("\nIllegal NetBIOS name [%s].\n", name)
logger.error(
"Up to 15 characters and only uppercase ASCII letters, digits "
"and dashes are allowed. Empty string is not allowed.")
def read_netbios_name(netbios_default):
netbios_name = ""
print("Enter the NetBIOS name for the IPA domain.")
print("Only up to 15 uppercase ASCII letters, digits "
"and dashes are allowed.")
print("Example: EXAMPLE.")
print("")
print("")
if not netbios_default:
netbios_default = "EXAMPLE"
while True:
netbios_name = ipautil.user_input(
"NetBIOS domain name", netbios_default, allow_empty=False)
print("")
if adtrustinstance.check_netbios_name(netbios_name):
break
netbios_name_error(netbios_name)
return netbios_name
def retrieve_netbios_name(api):
flat_name_attr = 'ipantflatname'
try:
entry = api.Backend.ldap2.get_entry(
DN(('cn', api.env.domain), api.env.container_cifsdomains,
ipautil.realm_to_suffix(api.env.realm)),
[flat_name_attr])
except errors.NotFound:
# trust not configured
logger.debug("No previous trust configuration found")
return None
else:
return entry.get(flat_name_attr)[0]
def set_and_check_netbios_name(netbios_name, unattended, api):
"""
Depending on whether a trust is already configured or not, the passed
NetBIOS domain name must be handled differently.
If trust is not configured, the given NetBIOS is used. The NetBIOS is
generated if none was given on the command line.
If trust is already configured, the given NetBIOS name is used to reset
the stored NetBIOS name in case it differs from the current one.
"""
cur_netbios_name = None
gen_netbios_name = None
reset_netbios_name = False
if api.Backend.ldap2.isconnected():
cur_netbios_name = retrieve_netbios_name(api)
else:
logger.debug(
"LDAP is not connected, can not retrieve NetBIOS name")
if cur_netbios_name and not netbios_name:
# keep the current NetBIOS name
netbios_name = cur_netbios_name
reset_netbios_name = False
elif cur_netbios_name and cur_netbios_name != netbios_name:
# change the NetBIOS name
print("Current NetBIOS domain name is %s, new name is %s.\n"
% (cur_netbios_name, netbios_name))
print("Please note that changing the NetBIOS name might "
"break existing trust relationships.")
if unattended:
reset_netbios_name = True
print("NetBIOS domain name will be changed to %s.\n"
% netbios_name)
else:
print("Say 'yes' if the NetBIOS shall be changed and "
"'no' if the old one shall be kept.")
reset_netbios_name = ipautil.user_input(
'Do you want to reset the NetBIOS domain name?',
default=False, allow_empty=False)
if not reset_netbios_name:
netbios_name = cur_netbios_name
elif cur_netbios_name and cur_netbios_name == netbios_name:
# keep the current NetBIOS name
reset_netbios_name = False
elif not cur_netbios_name:
if not netbios_name:
gen_netbios_name = adtrustinstance.make_netbios_name(
api.env.domain)
if gen_netbios_name is not None:
# Fix existing trust configuration
print("Trust is configured but no NetBIOS domain name found, "
"setting it now.")
reset_netbios_name = True
else:
# initial trust configuration
reset_netbios_name = False
else:
# all possible cases should be covered above
raise Exception('Unexpected state while checking NetBIOS domain name')
if unattended and netbios_name is None and gen_netbios_name:
netbios_name = gen_netbios_name
if not adtrustinstance.check_netbios_name(netbios_name):
if unattended:
netbios_name_error(netbios_name)
raise ScriptError("Aborting installation.")
else:
if netbios_name:
netbios_name_error(netbios_name)
netbios_name = None
if not unattended and not netbios_name:
netbios_name = read_netbios_name(gen_netbios_name)
return (netbios_name, reset_netbios_name)
def enable_compat_tree():
print("Do you want to enable support for trusted domains in Schema "
"Compatibility plugin?")
print("This will allow clients older than SSSD 1.9 and non-Linux "
"clients to work with trusted users.")
print("")
enable_compat = ipautil.user_input(
"Enable trusted domains support in slapi-nis?",
default=False,
allow_empty=False)
print("")
return enable_compat
def check_for_installed_deps():
# Check if samba packages are installed
if not adtrustinstance.check_inst():
raise ScriptError("Aborting installation.")
def retrieve_entries_without_sid(api):
"""
Retrieve a list of entries without assigned SIDs.
:returns: a list of entries or an empty list if an error occurs
"""
# The filter corresponds to ipa_sidgen_task.c LDAP search filter
filter = '(&(objectclass=ipaobject)(!(objectclass=mepmanagedentry))' \
'(|(objectclass=posixaccount)(objectclass=posixgroup)' \
'(objectclass=ipaidobject))(!(ipantsecurityidentifier=*)))'
base_dn = api.env.basedn
try:
logger.debug(
"Searching for objects with missing SID with "
"filter=%s, base_dn=%s", filter, base_dn)
entries, _truncated = api.Backend.ldap2.find_entries(
filter=filter, base_dn=base_dn, attrs_list=[''])
return entries
except errors.NotFound:
# All objects have SIDs assigned
pass
except (errors.DatabaseError, errors.NetworkError) as e:
logger.error(
"Could not retrieve a list of objects that need a SID "
"identifier assigned: %s", e)
return []
def retrieve_and_ask_about_sids(api, options):
entries = []
if api.Backend.ldap2.isconnected():
entries = retrieve_entries_without_sid(api)
else:
logger.debug(
"LDAP backend not connected, can not retrieve entries "
"with missing SID")
object_count = len(entries)
if object_count > 0:
print("")
print("WARNING: %d existing users or groups do not have "
"a SID identifier assigned." % len(entries))
print("Installer can run a task to have ipa-sidgen "
"Directory Server plugin generate")
print("the SID identifier for all these users. Please note, "
"in case of a high")
print("number of users and groups, the operation might "
"lead to high replication")
print("traffic and performance degradation. Refer to "
"ipa-adtrust-install(1) man page")
print("for details.")
print("")
if options.unattended:
print("Unattended mode was selected, installer will "
"NOT run ipa-sidgen task!")
else:
if ipautil.user_input(
"Do you want to run the ipa-sidgen task?",
default=False,
allow_empty=False):
options.add_sids = True
def retrieve_potential_adtrust_agents(api):
"""
Retrieve a sorted list of potential AD trust agents
:param api: initialized API instance
:returns: sorted list of FQDNs of masters which are not AD trust agents
"""
try:
# Search only masters which have support for domain levels
# because only these masters will have SSSD recent enough
# to support AD trust agents
dl_enabled_masters = api.Command.server_find(
ipamindomainlevel=MIN_DOMAIN_LEVEL, all=True)['result']
except (errors.DatabaseError, errors.NetworkError) as e:
logger.error(
"Could not retrieve a list of existing IPA masters: %s", e)
return None
try:
# search for existing AD trust agents
adtrust_agents = api.Command.server_find(
servrole=u'AD trust agent', all=True)['result']
except (errors.DatabaseError, errors.NetworkError) as e:
logger.error("Could not retrieve a list of adtrust agents: %s", e)
return None
dl_enabled_master_cns = {m['cn'][0] for m in dl_enabled_masters}
adtrust_agents_cns = {m['cn'][0] for m in adtrust_agents}
potential_agents_cns = dl_enabled_master_cns - adtrust_agents_cns
# remove the local host from the potential agents since it will be set up
# by adtrustinstance configuration code
potential_agents_cns -= {api.env.host}
return sorted(potential_agents_cns)
def add_hosts_to_adtrust_agents(api, host_list):
"""
Add the CIFS and host principals to the 'adtrust agents'
group as 389-ds only operates with GroupOfNames, we have to
use the principal's proper dn as defined in self.cifs_agent
:param api: API instance
:param host_list: list of potential AD trust agent FQDNs
"""
agents_dn = DN(
('cn', 'adtrust agents'), api.env.container_sysaccounts,
api.env.basedn)
service.add_principals_to_group(
api.Backend.ldap2,
agents_dn,
"member",
[api.Object.host.get_dn(x) for x in host_list])
def add_new_adtrust_agents(api, options):
"""
Find out IPA masters which are not part of the cn=adtrust agents
and propose them to be added to the list
:param api: API instance
:param options: parsed CLI options
"""
potential_agents_cns = retrieve_potential_adtrust_agents(api)
if potential_agents_cns:
print("")
print("WARNING: %d IPA masters are not yet able to serve "
"information about users from trusted forests."
% len(potential_agents_cns))
print("Installer can add them to the list of IPA masters "
"allowed to access information about trusts.")
print("If you choose to do so, you also need to restart "
"LDAP service on those masters.")
print("Refer to ipa-adtrust-install(1) man page for details.")
print("")
if options.unattended:
print("Unattended mode was selected, installer will NOT "
"add other IPA masters to the list of allowed to")
print("access information about trusted forests!")
return
new_agents = []
for name in sorted(potential_agents_cns):
if ipautil.user_input(
"IPA master [%s]?" % (name),
default=False,
allow_empty=False):
new_agents.append(name)
if new_agents:
add_hosts_to_adtrust_agents(api, new_agents)
# The method trust_enable_agent was added on API version 2.236
# Specifically request this version in the remote call
kwargs = {u'version': u'2.236',
u'enable_compat': options.enable_compat}
failed_agents = []
for agent in new_agents:
# Try to run the ipa-trust-enable-agent script on the agent
# If the agent is too old and does not support this,
# print a msg
logger.info("Execute trust_enable_agent on remote server %s",
agent)
client = None
try:
xmlrpc_uri = 'https://{}/ipa/xml'.format(
ipautil.format_netloc(agent))
remote_api = create_api(mode=None)
remote_api.bootstrap(context='installer',
confdir=paths.ETC_IPA,
xmlrpc_uri=xmlrpc_uri,
fallback=False)
client = rpc.jsonclient(remote_api)
client.finalize()
client.connect()
result = client.forward(
u'trust_enable_agent',
ipautil.fsdecode(agent),
**kwargs)
except errors.CommandError as e:
logger.debug(
"Remote server %s does not support agent enablement "
"over RPC: %s", agent, e)
failed_agents.append(agent)
except (errors.PublicError, ConnectionRefusedError) as e:
logger.debug(
"Remote call to trust_enable_agent failed on server %s: "
"%s", agent, e)
failed_agents.append(agent)
else:
for message in result.get('messages'):
logger.debug('%s', message['message'])
if not int(result['result']):
logger.debug(
"ipa-trust-enable-agent returned non-zero exit code "
" on server %s", agent)
failed_agents.append(agent)
finally:
if client and client.isconnected():
client.disconnect()
# if enablement failed on some agents, print a WARNING:
if failed_agents:
if options.enable_compat:
print("""
WARNING: you MUST manually enable the Schema compatibility Plugin and """)
print("""
WARNING: you MUST restart (both "ipactl restart" and "systemctl restart sssd")
the following IPA masters in order to activate them to serve information about
users from trusted forests:
""")
for x in failed_agents:
print(x)
def install_check(standalone, options, api):
global netbios_name
global reset_netbios_name
if options.setup_adtrust and not standalone:
check_for_installed_deps()
realm_not_matching_domain = (api.env.domain.upper() != api.env.realm)
if realm_not_matching_domain:
print("WARNING: Realm name does not match the domain name.\n"
"You will not be able to establish trusts with Active "
"Directory unless\nthe realm name of the IPA server matches its "
"domain name.\n\n")
if not options.unattended:
if not ipautil.user_input("Do you wish to continue?",
default=False,
allow_empty=False):
raise ScriptError("Aborting installation.")
# Check if /etc/samba/smb.conf already exists. In case it was not generated
# by IPA, print a warning that we will break existing configuration.
if options.setup_adtrust:
if adtrustinstance.ipa_smb_conf_exists():
if not options.unattended:
print("IPA generated smb.conf detected.")
if not ipautil.user_input("Overwrite smb.conf?",
default=False,
allow_empty=False):
raise ScriptError("Aborting installation.")
elif os.path.exists(paths.SMB_CONF):
print("WARNING: The smb.conf already exists. Running "
"ipa-adtrust-install will break your existing samba "
"configuration.\n\n")
if not options.unattended:
if not ipautil.user_input("Do you wish to continue?",
default=False,
allow_empty=False):
raise ScriptError("Aborting installation.")
if not options.unattended and not options.enable_compat:
options.enable_compat = enable_compat_tree()
netbios_name, reset_netbios_name = set_and_check_netbios_name(
options.netbios_name, options.unattended, api)
if not options.add_sids:
retrieve_and_ask_about_sids(api, options)
def install(standalone, options, fstore, api):
if not options.unattended and standalone:
print("")
print("The following operations may take some minutes to complete.")
print("Please wait until the prompt is returned.")
print("")
smb = adtrustinstance.ADTRUSTInstance(fstore, options.setup_adtrust)
smb.realm = api.env.realm
smb.autobind = ipaldap.AUTOBIND_ENABLED
smb.setup(api.env.host, api.env.realm,
netbios_name, reset_netbios_name,
options.rid_base, options.secondary_rid_base,
options.add_sids,
enable_compat=options.enable_compat)
smb.find_local_id_range()
smb.create_instance()
# Update Samba keytab with host keys
ad_update = update_host_cifs_keytabs(api)
if ad_update:
result = ad_update()
# this particular update does not require restarting DS but
# the plugin might require that in future
if result[0]:
logger.debug('Restarting directory server to apply updates')
installutils.restart_dirsrv()
if options.add_agents:
# Find out IPA masters which are not part of the cn=adtrust agents
# and propose them to be added to the list
add_new_adtrust_agents(api, options)
def generate_dns_service_records_help(api):
"""
Return list of instructions to create DNS service records for Windows
if in case DNS is not enabled and the DNS zone is not managed by IPA.
In case IPA manages the DNS zone, nothing is returned.
"""
zone = api.env.domain
err_msg = []
ret = api.Command['dns_is_enabled']()
if not ret['result']:
err_msg.append("DNS management was not enabled at install time.")
else:
if not dns_zone_exists(zone):
err_msg.append(
"DNS zone %s cannot be managed as it is not defined in "
"IPA" % zone)
if err_msg:
err_msg.append("Add the following service records to your DNS "
"server for DNS zone %s: " % zone)
system_records = IPASystemRecords(api, all_servers=True)
adtrust_records = system_records.get_base_records(
[api.env.host], ["AD trust controller"],
include_master_role=False, include_kerberos_realm=False)
for r_name, node in adtrust_records.items():
for rec in IPASystemRecords.records_list_from_node(r_name, node):
err_msg.append(rec)
return err_msg
return None
@group
class SIDInstallInterface(ServiceAdminInstallInterface):
"""
Interface for the SID generation Installer
Knobs defined here will be available in:
* ipa-server-install
* ipa-replica-install
* ipa-adtrust-install
"""
description = "SID generation"
add_sids = knob(
None,
description="Add SIDs for existing users and groups as the final step"
)
add_sids = replica_install_only(add_sids)
netbios_name = knob(
str,
None,
description="NetBIOS name of the IPA domain"
)
rid_base = knob(
int,
DEFAULT_PRIMARY_RID_BASE,
description="Start value for mapping UIDs and GIDs to RIDs"
)
secondary_rid_base = knob(
int,
DEFAULT_SECONDARY_RID_BASE,
description="Start value of the secondary range for mapping "
"UIDs and GIDs to RIDs"
)
@group
class ADTrustInstallInterface(SIDInstallInterface):
"""
Interface for the AD trust installer
Knobs defined here will be available in:
* ipa-server-install
* ipa-replica-install
* ipa-adtrust-install
"""
description = "AD trust"
# the following knobs are provided on top of those specified for
# admin credentials
add_agents = knob(
None,
description="Add IPA masters to a list of hosts allowed to "
"serve information about users from trusted forests"
)
add_agents = replica_install_only(add_agents)
enable_compat = knob(
None,
description="Enable support for trusted domains for old clients"
)
no_msdcs = knob(
None,
description="Deprecated: has no effect",
deprecated=True
)
| 21,475
|
Python
|
.py
| 507
| 32.781065
| 79
| 0.62988
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,761
|
ipa_ldap_updater.py
|
freeipa_freeipa/ipaserver/install/ipa_ldap_updater.py
|
# Authors: Rob Crittenden <rcritten@redhat.com>
# Petr Viktorin <pviktori@redhat.com>
#
# Copyright (C) 2008 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Documentation can be found at http://freeipa.org/page/LdapUpdate
# TODO
# save undo files?
from __future__ import print_function, absolute_import
import logging
import os
import six
from ipalib import api
from ipapython import admintool
from ipaplatform.paths import paths
from ipaserver.install import installutils, schemaupdate
from ipaserver.install.ldapupdate import LDAPUpdate, UPDATES_DIR, BadSyntax
from ipaserver.install.upgradeinstance import IPAUpgrade
if six.PY3:
unicode = str
logger = logging.getLogger(__name__)
class LDAPUpdater(admintool.AdminTool):
command_name = 'ipa-ldap-updater'
usage = "%prog [options] input_file(s)\n"
@classmethod
def add_options(cls, parser):
super(LDAPUpdater, cls).add_options(parser, debug_option=True)
parser.add_option(
"-u", '--upgrade', action="store_true", dest="upgrade",
default=False,
help="upgrade an installed server in offline mode")
parser.add_option(
"-S", '--schema-file', action="append", dest="schema_files",
help="custom schema ldif file to use")
@classmethod
def get_command_class(cls, options, args):
if options.upgrade:
return LDAPUpdater_Upgrade
else:
return LDAPUpdater_NonUpgrade
def validate_options(self):
options = self.options
super(LDAPUpdater, self).validate_options(needs_root=True)
self.files = self.args
if not (self.files or options.schema_files):
logger.info("To execute overall IPA upgrade please use "
"'ipa-server-upgrade' command")
raise admintool.ScriptError("No update files or schema file were "
"specified")
for filename in self.files:
if not os.path.exists(filename):
raise admintool.ScriptError("%s: file not found" % filename)
try:
installutils.check_server_configuration()
except RuntimeError as e:
raise admintool.ScriptError(e)
def setup_logging(self):
super(LDAPUpdater, self).setup_logging(log_file_mode='a')
def run(self):
super(LDAPUpdater, self).run()
api.bootstrap(in_server=True, context='updates', confdir=paths.ETC_IPA)
api.finalize()
def handle_error(self, exception):
return installutils.handle_error(exception, self.log_file_name)
class LDAPUpdater_Upgrade(LDAPUpdater):
log_file_name = paths.IPAUPGRADE_LOG
def run(self):
super(LDAPUpdater_Upgrade, self).run()
api.Backend.ldap2.connect()
options = self.options
realm = api.env.realm
upgrade = IPAUpgrade(realm, self.files,
schema_files=options.schema_files)
try:
upgrade.create_instance()
except BadSyntax:
raise admintool.ScriptError(
'Bad syntax detected in upgrade file(s).', 1)
except RuntimeError:
raise admintool.ScriptError('IPA upgrade failed.', 1)
else:
if upgrade.modified:
logger.info('Update complete')
else:
logger.info('Update complete, no data were modified')
api.Backend.ldap2.disconnect()
class LDAPUpdater_NonUpgrade(LDAPUpdater):
log_file_name = paths.IPAUPGRADE_LOG
def run(self):
super(LDAPUpdater_NonUpgrade, self).run()
api.Backend.ldap2.connect()
options = self.options
modified = False
if options.schema_files:
modified = schemaupdate.update_schema(
options.schema_files,
ldapi=True) or modified
ld = LDAPUpdate()
if not self.files:
self.files = ld.get_all_files(UPDATES_DIR)
modified = ld.update(self.files) or modified
if modified:
logger.info('Update complete')
else:
logger.info('Update complete, no data were modified')
api.Backend.ldap2.disconnect()
| 4,901
|
Python
|
.py
| 120
| 32.825
| 79
| 0.662732
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,762
|
otpdinstance.py
|
freeipa_freeipa/ipaserver/install/otpdinstance.py
|
# Authors: Tomas Babej <tbabej@redhat.com>
#
# Copyright (C) 2013 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from ipaserver.install import service
class OtpdInstance(service.SimpleServiceInstance):
def __init__(self):
service.SimpleServiceInstance.__init__(self, "ipa-otpd")
| 948
|
Python
|
.py
| 22
| 41.409091
| 71
| 0.771398
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,763
|
ipa_otptoken_import.py
|
freeipa_freeipa/ipaserver/install/ipa_otptoken_import.py
|
# Authors: Nathaniel McCallum <npmccallum@redhat.com>
#
# Copyright (C) 2014 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import
import abc
import base64
import datetime
import logging
import os
import uuid
from lxml import etree
import dateutil.parser
import dateutil.tz
import gssapi
import six
from cryptography.exceptions import InvalidSignature
from cryptography.hazmat.primitives import hashes, hmac
from cryptography.hazmat.primitives.padding import PKCS7
from cryptography.hazmat.primitives.kdf import pbkdf2
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.backends import default_backend
from ipaplatform.paths import paths
from ipapython import admintool
from ipalib import api, errors
from ipalib.constants import VAULT_WRAPPING_SUPPORTED_ALGOS, VAULT_WRAPPING_3DES
from ipaserver.plugins.ldap2 import AUTOBIND_DISABLED
logger = logging.getLogger(__name__)
class ValidationError(Exception):
pass
def fetchAll(element, xpath, conv=lambda x: x):
return [conv(e) for e in element.xpath(xpath, namespaces={
"pkcs5": "http://www.rsasecurity.com/rsalabs/pkcs/schemas/pkcs-5v2-0#",
"pskc": "urn:ietf:params:xml:ns:keyprov:pskc",
"xenc11": "http://www.w3.org/2009/xmlenc11#",
"xenc": "http://www.w3.org/2001/04/xmlenc#",
"ds": "http://www.w3.org/2000/09/xmldsig#",
})]
def fetch(element, xpath, conv=lambda x: x, default=None):
result = fetchAll(element, xpath, conv)
return result[0] if result else default
def convertDate(value):
"Converts an ISO 8601 string into a UTC datetime object."
dt = dateutil.parser.parse(value)
if dt.tzinfo is None:
dt = datetime.datetime(*dt.timetuple()[0:6],
tzinfo=dateutil.tz.tzlocal())
return dt.astimezone(dateutil.tz.tzutc())
def convertTokenType(value):
"Converts token algorithm URI to token type string."
return {
"urn:ietf:params:xml:ns:keyprov:pskc:hotp": u"hotp",
"urn:ietf:params:xml:ns:keyprov:pskc#hotp": u"hotp",
"urn:ietf:params:xml:ns:keyprov:pskc:totp": u"totp",
"urn:ietf:params:xml:ns:keyprov:pskc#totp": u"totp",
}.get(value.lower(), None)
def convertHashName(value):
"Converts hash names to their canonical names."
default_hash = u"sha1"
known_prefixes = ("", "hmac-",)
known_hashes = {
"sha1": u"sha1",
"sha224": u"sha224",
"sha256": u"sha256",
"sha384": u"sha384",
"sha512": u"sha512",
"sha-1": u"sha1",
"sha-224": u"sha224",
"sha-256": u"sha256",
"sha-384": u"sha384",
"sha-512": u"sha512",
}
if value is None:
return default_hash
v = value.lower()
for prefix in known_prefixes:
if prefix:
w = v[len(prefix):]
else:
w = v
result = known_hashes.get(w)
if result is not None:
break
else:
result = default_hash
return result
def convertHMACType(value):
"Converts HMAC URI to hashlib object."
return {
"http://www.w3.org/2000/09/xmldsig#hmac-sha1": hashes.SHA1,
"http://www.w3.org/2001/04/xmldsig-more#hmac-sha224": hashes.SHA224,
"http://www.w3.org/2001/04/xmldsig-more#hmac-sha256": hashes.SHA256,
"http://www.w3.org/2001/04/xmldsig-more#hmac-sha384": hashes.SHA384,
"http://www.w3.org/2001/04/xmldsig-more#hmac-sha512": hashes.SHA512,
}.get(value.lower(), hashes.SHA1)
def convertAlgorithm(value):
"Converts encryption URI to (mech, ivlen)."
supported_algs = {
"http://www.w3.org/2001/04/xmlenc#aes128-cbc": (
algorithms.AES, modes.CBC, 128),
"http://www.w3.org/2001/04/xmlenc#aes192-cbc": (
algorithms.AES, modes.CBC, 192),
"http://www.w3.org/2001/04/xmlenc#aes256-cbc": (
algorithms.AES, modes.CBC, 256),
"http://www.w3.org/2001/04/xmldsig-more#camellia128": (
algorithms.Camellia, modes.CBC, 128),
"http://www.w3.org/2001/04/xmldsig-more#camellia192": (
algorithms.Camellia, modes.CBC, 192),
"http://www.w3.org/2001/04/xmldsig-more#camellia256": (
algorithms.Camellia, modes.CBC, 256),
# TODO: add support for these formats.
# "http://www.w3.org/2001/04/xmlenc#kw-aes128": "kw-aes128",
# "http://www.w3.org/2001/04/xmlenc#kw-aes192": "kw-aes192",
# "http://www.w3.org/2001/04/xmlenc#kw-aes256": "kw-aes256",
# "http://www.w3.org/2001/04/xmlenc#kw-tripledes": "kw-tripledes",
# "http://www.w3.org/2001/04/xmldsig-more#kw-camellia128": "kw-camellia128",
# "http://www.w3.org/2001/04/xmldsig-more#kw-camellia192": "kw-camellia192",
# "http://www.w3.org/2001/04/xmldsig-more#kw-camellia256": "kw-camellia256",
}
# We don't deal with VAULT here but if VAULT_WRAPPING_3DES is not present
# in the list of the vault wrapping algorithms, we cannot use 3DES anywhere
if VAULT_WRAPPING_3DES in VAULT_WRAPPING_SUPPORTED_ALGOS:
supported_algs["http://www.w3.org/2001/04/xmlenc#tripledes-cbc"] = (
algorithms.TripleDES, modes.CBC, 64)
return supported_algs.get(value.lower(), (None, None, None))
def convertEncrypted(value, decryptor=None, pconv=base64.b64decode, econv=lambda x: x):
"Converts a value element, decrypting if necessary. See RFC 6030."
v = fetch(value, "./pskc:PlainValue/text()", pconv)
if v is not None:
return v
mac = fetch(value, "./pskc:ValueMAC/text()", base64.b64decode)
ev = fetch(value, "./pskc:EncryptedValue")
if ev is not None and decryptor is not None:
return econv(decryptor(ev, mac))
return None
class XMLKeyDerivation(metaclass=abc.ABCMeta):
"Interface for XML Encryption 1.1 key derivation."
@abc.abstractmethod
def __init__(self, enckey):
"Sets up key derivation parameters from the parent XML entity."
@abc.abstractmethod
def derive(self, masterkey):
"Derives a key from the master key."
class PBKDF2KeyDerivation(XMLKeyDerivation):
def __init__(self, enckey):
params = fetch(enckey, "./xenc11:DerivedKey/xenc11:KeyDerivationMethod/pkcs5:PBKDF2-params")
if params is None:
raise ValueError("XML file is missing PBKDF2 parameters!")
salt = fetch(params, "./Salt/Specified/text()", base64.b64decode)
itrs = fetch(params, "./IterationCount/text()", int)
klen = fetch(params, "./KeyLength/text()", int)
hmod = fetch(params, "./PRF/@Algorithm", convertHMACType, hashes.SHA1)
if salt is None:
raise ValueError("XML file is missing PBKDF2 salt!")
if itrs is None:
raise ValueError("XML file is missing PBKDF2 iteration count!")
if klen is None:
raise ValueError("XML file is missing PBKDF2 key length!")
self.kdf = pbkdf2.PBKDF2HMAC(
algorithm=hmod(),
length=klen,
salt=salt,
iterations=itrs,
backend=default_backend()
)
def derive(self, masterkey):
return self.kdf.derive(masterkey)
def convertKeyDerivation(value):
"Converts key derivation URI to a BaseKeyDerivation class."
return {
"http://www.rsasecurity.com/rsalabs/pkcs/schemas/pkcs-5v2-0#pbkdf2": PBKDF2KeyDerivation,
}.get(value.lower(), None)
class XMLDecryptor:
"""This decrypts values from XML as specified in:
* http://www.w3.org/TR/xmlenc-core/
* RFC 6931"""
def __init__(self, key, hmac=None):
self.__key = key
self.__hmac = hmac
def __call__(self, element, mac=None):
algo, mode, klen = fetch(
element, "./xenc:EncryptionMethod/@Algorithm", convertAlgorithm)
data = fetch(
element,
"./xenc:CipherData/xenc:CipherValue/text()",
base64.b64decode
)
# Make sure the key is the right length.
if len(self.__key) * 8 != klen:
raise ValidationError("Invalid key length!")
# If a MAC is present, perform validation.
if mac:
tmp = self.__hmac.copy()
tmp.update(data)
try:
tmp.verify(mac)
except InvalidSignature as e:
raise ValidationError("MAC validation failed!", e)
iv = data[:algo.block_size // 8]
data = data[len(iv):]
algorithm = algo(self.__key)
cipher = Cipher(algorithm, mode(iv), default_backend())
decryptor = cipher.decryptor()
padded = decryptor.update(data)
padded += decryptor.finalize()
unpadder = PKCS7(algorithm.block_size).unpadder()
out = unpadder.update(padded)
out += unpadder.finalize()
return out
class PSKCKeyPackage:
_XML = {
'pskc:DeviceInfo': {
'pskc:IssueNo/text()': ('issueno', str),
'pskc:ExpiryDate/text()': ('notafter.hw', convertDate),
'pskc:Manufacturer/text()': ('vendor', str),
'pskc:Model/text()': ('model', str),
'pskc:SerialNo/text()': ('serial', str),
'pskc:StartDate/text()': ('notbefore.hw', convertDate),
'pskc:UserId/text()': ('owner', str),
},
'pskc:Key': {
'@Algorithm': ('type', convertTokenType),
'@Id': ('id', str),
'pskc:FriendlyName/text()': ('description', str),
'pskc:Issuer/text()': ('issuer', str),
'pskc:KeyReference/text()': ('keyref', str),
'pskc:AlgorithmParameters': {
'pskc:Suite/text()': ('algorithm', convertHashName),
'pskc:ResponseFormat/@CheckDigit': ('checkdigit', str),
'pskc:ResponseFormat/@Encoding': ('encoding', str),
'pskc:ResponseFormat/@Length': ('digits', int),
},
'pskc:Data': {
'pskc:Counter':
('counter', lambda v, d: convertEncrypted(v, d, int, int)),
'pskc:Secret': ('key', convertEncrypted),
'pskc:Time':
('time', lambda v, d: convertEncrypted(v, d, int, int)),
'pskc:TimeDrift':
('offset', lambda v, d: convertEncrypted(v, d, int, int)),
'pskc:TimeInterval':
('interval', lambda v, d: convertEncrypted(v, d, int, int))
},
'pskc:Policy': {
'pskc:ExpiryDate/text()': ('notafter.sw', convertDate),
'pskc:KeyUsage/text()': ('keyusage', str),
'pskc:NumberOfTransactions': ('maxtransact', lambda v: v),
'pskc:PINPolicy': ('pinpolicy', lambda v: v),
'pskc:StartDate/text()': ('notbefore.sw', convertDate),
},
},
}
_MAP = (
('type', 'type', lambda v, o: v.strip()),
('description', 'description', lambda v, o: v.strip()),
('vendor', 'ipatokenvendor', lambda v, o: v.strip()),
('model', 'ipatokenmodel', lambda v, o: v.strip()),
('serial', 'ipatokenserial', lambda v, o: v.strip()),
('issueno', 'ipatokenserial', lambda v, o: o.get('ipatokenserial', '') + '-' + v.strip()),
(
'key',
'ipatokenotpkey',
lambda v, o: base64.b32encode(v).decode('ascii')
),
('digits', 'ipatokenotpdigits', lambda v, o: v),
('algorithm', 'ipatokenotpalgorithm', lambda v, o: v),
('counter', 'ipatokenhotpcounter', lambda v, o: v),
('interval', 'ipatokentotptimestep', lambda v, o: v),
('offset', 'ipatokentotpclockoffset', lambda v, o: o.get('ipatokentotptimestep', 30) * v),
)
def __init__(self, element, decryptor):
self.__element = element
self.__decryptor = decryptor
self.__id = None
self.__options = None
@property
def id(self):
if self.__id is None:
self.__process()
return self.__id
@property
def options(self):
if self.__options is None:
self.__process()
return self.__options
def remove(self):
self.__element.getparent().remove(self.__element)
def __process(self):
# Parse and validate.
data = self.__parse(self.__decryptor, self.__element, ".", self._XML)
self.__validate(data)
# Copy values into output.
options = {}
for (dk, ok, f) in self._MAP:
if dk in data:
options[ok] = f(data[dk], options)
# Copy validity dates.
self.__dates(options, data, 'notbefore', max)
self.__dates(options, data, 'notafter', min)
# Save attributes.
self.__options = options
self.__id = data.get('id', uuid.uuid4())
def __parse(self, decryptor, element, prefix, table):
"Recursively parses the xml from a table."
data = {}
for k, v in table.items():
path = prefix + "/" + k
if isinstance(v, dict):
data.update(self.__parse(decryptor, element, path, v))
continue
result = fetch(element, path)
if result is not None:
lambda_code_attr = "__code__" if six.PY3 else "func_code"
if getattr(
getattr(v[1], lambda_code_attr, None),
"co_argcount", 0) > 1:
data[v[0]] = v[1](result, decryptor)
else:
data[v[0]] = v[1](result)
return data
def __validate(self, data):
"Validates the parsed data."
if 'type' not in data or data['type'] not in ('totp', 'hotp'):
raise ValidationError("Unsupported token type!")
if 'key' not in data:
if 'keyref' in data:
raise ValidationError("Referenced keys are not supported!")
raise ValidationError("Key not found in token!")
if data.get('checkdigit', 'FALSE').upper() != 'FALSE':
raise ValidationError("CheckDigit not supported!")
if data.get('maxtransact', None) is not None:
raise ValidationError('NumberOfTransactions policy not supported!')
if data.get('pinpolicy', None) is not None:
raise ValidationError('PINPolicy policy not supported!')
if data.get('time', 0) != 0:
raise ValidationError('Specified time is not supported!')
encoding = data.get('encoding', 'DECIMAL').upper()
if encoding != 'DECIMAL':
raise ValidationError('Unsupported encoding: %s!' % encoding)
usage = data.get('keyusage', 'OTP')
if usage != 'OTP':
raise ValidationError('Unsupported key usage: %s' % usage)
def __dates(self, out, data, key, reducer):
dates = (data.get(key + '.sw', None), data.get(key + '.hw', None))
dates = [x for x in dates if x is not None]
if dates:
out['ipatoken' + key] = str(
reducer(dates).strftime("%Y%m%d%H%M%SZ"))
class PSKCDocument:
@property
def keyname(self):
return self.__keyname
def __init__(self, filename):
self.__keyname = None
self.__decryptor = None
self.__doc = etree.parse(filename)
self.__mkey = fetch(self.__doc, "./pskc:MACMethod/pskc:MACKey")
self.__algo = fetch(self.__doc, "./pskc:MACMethod/@Algorithm", convertHMACType)
self.__keypackages = fetchAll(self.__doc, "./pskc:KeyPackage")
if not self.__keypackages:
raise ValueError("PSKC file is invalid!")
self.__enckey = fetch(self.__doc, "./pskc:EncryptionKey")
if self.__enckey is not None:
# Check for x509 key.
x509key = fetch(self.__enckey, "./ds:X509Data")
if x509key is not None:
raise NotImplementedError("X.509 keys are not currently supported!")
# Get the keyname.
self.__keyname = fetch(self.__enckey, "./ds:KeyName/text()")
if self.__keyname is None:
self.__keyname = fetch(self.__enckey,
"./xenc11:DerivedKey/xenc11:MasterKeyName/text()")
def setKey(self, key):
# Derive the enckey if required.
kd = fetch(self.__enckey,
"./xenc11:DerivedKey/xenc11:KeyDerivationMethod/@Algorithm",
convertKeyDerivation)
if kd is not None:
key = kd(self.__enckey).derive(key)
# Load the decryptor.
self.__decryptor = XMLDecryptor(key)
if self.__mkey is not None and self.__algo is not None:
tmp = hmac.HMAC(
self.__decryptor(self.__mkey),
self.__algo(),
backend=default_backend()
)
self.__decryptor = XMLDecryptor(key, tmp)
def getKeyPackages(self):
for kp in self.__keypackages:
yield PSKCKeyPackage(kp, self.__decryptor)
def save(self, dest):
self.__doc.write(dest)
class OTPTokenImport(admintool.AdminTool):
command_name = 'ipa-otptoken-import'
description = "Import OTP tokens."
usage = "%prog [options] <PSKC file> <output file>"
@classmethod
def add_options(cls, parser):
super(OTPTokenImport, cls).add_options(parser)
parser.add_option("-k", "--keyfile", dest="keyfile",
help="File containing the key used to decrypt token secrets")
def validate_options(self):
super(OTPTokenImport, self).validate_options()
# Parse the file.
if len(self.args) < 1:
raise admintool.ScriptError("Import file required!")
self.doc = PSKCDocument(self.args[0])
# Get the output file.
if len(self.args) < 2:
raise admintool.ScriptError("Output file required!")
self.output = self.args[1]
if os.path.exists(self.output):
raise admintool.ScriptError("Output file already exists!")
# Verify a key is provided if one is needed.
if self.doc.keyname is not None:
if self.safe_options.keyfile is None:
raise admintool.ScriptError("Encryption key required: %s!" % self.doc.keyname)
# Load the keyfile.
keyfile = self.safe_options.keyfile
with open(keyfile, "rb") as f:
self.doc.setKey(f.read())
def run(self):
api.bootstrap(in_server=True, confdir=paths.ETC_IPA)
api.finalize()
try:
api.Backend.ldap2.connect(ccache=os.environ.get('KRB5CCNAME'),
autobind=AUTOBIND_DISABLED)
except (gssapi.exceptions.GSSError, errors.ACIError):
raise admintool.ScriptError("Unable to connect to LDAP! Did you kinit?")
try:
# Parse tokens
for keypkg in self.doc.getKeyPackages():
try:
api.Command.otptoken_add(keypkg.id, no_qrcode=True, **keypkg.options)
except Exception as e:
logger.warning("Error adding token: %s", e)
else:
logger.info("Added token: %s", keypkg.id)
keypkg.remove()
finally:
api.Backend.ldap2.disconnect()
# Write out the XML file without the tokens that succeeded.
self.doc.save(self.output)
| 20,343
|
Python
|
.py
| 458
| 34.877729
| 111
| 0.595619
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,764
|
ipa_migrate_constants.py
|
freeipa_freeipa/ipaserver/install/ipa_migrate_constants.py
|
# ipa-migrate constants
#
# Lists of all the plugins and settings
#
# Copyright (C) 2023 FreeIPA Contributors see COPYING for license
# Generic constants
BIND_DN = "cn=directory manager"
LOG_FILE_NAME = "/var/log/ipa-migrate.log"
LDIF_FILE_NAME = "/var/log/ipa-migrate.ldif"
CONFLICT_FILE_NAME = "/var/log/ipa-migrate-conflict.ldif"
# Operational attributes to strip from the remote server
STRIP_OP_ATTRS = [
'modifiersname',
'modifytimestamp',
'creatorsname',
'createtimestamp',
'nsuniqueid',
'dsentrydn',
'entryuuid',
'entrydn',
'entryid',
'entryusn',
'numsubordinates',
'parentid',
'tombstonenumsubordinates'
]
# Operational attributes that we would want to remove from the local entry if
# they don't exist in the remote entry
POLICY_OP_ATTRS = [
'nsaccountlock',
'passwordexpiratontime',
'passwordgraceusertime',
'pwdpolicysubentry',
'passwordexpwarned',
'passwordretrycount',
'retrycountresettime',
'accountunlocktime',
'passwordhistory',
'passwordallowchangetime',
'pwdreset'
]
# Atributes to strip from users/groups
STRIP_ATTRS = [
'krbextradata',
'krblastfailedauth',
'krblastpwdchange',
'krbloginfailedcount',
'krbticketflags',
'krbmkey',
'ipasshpubkey', # We keep this for users (handled in clean_entry())
'mepmanagedentry', # It will be rebuilt on new server
'memberof',
# from ds-migrate....
'krbprincipalkey', 'memberofindirect', 'memberindirect', # User
'memberofindirect', 'memberindirect', # Groups
]
# Attributes to ignore during entry comparison, but these attributes will be
# applied when creating a new entry
IGNORE_ATTRS = [
'description',
'ipasshpubkey',
'ipantsecurityidentifier', # Need this in production mode
'ipantflatname',
'ipamigrationenabled',
'ipauniqueid',
'serverhostname',
'krbpasswordexpiration',
'krblastadminunlock',
]
# For production mode, bring everything over
PROD_ATTRS = [
'ipantsecurityidentifier',
'ipanthash',
'ipantlogonscript',
'ipantprofilepath',
'ipanthomedirectory',
'ipanthomedirectorydrive'
]
AD_USER_ATTRS = [ # ipaNTUserAttrs objectclass
'ipantsecurityidentifier', # required
'ipanthash',
'ipantlogonscript',
'ipantprofilepath',
'ipanthomedirectory',
'ipanthomedirectorydrive'
]
AD_DOMAIN_ATTRS = [ # ipaNTDomainAttrs objectclass
'ipantsecurityidentifier', # required
'ipantflatName', # required
'ipantdomainguid', # required
'ipantfallbackprimarygroup',
]
AD_TRUST_ATTRS = [ # ipaNTTrustedDomain objectclass
'ipanttrusttype',
'ipanttrustattributes',
'ipanttrustdirection',
'ipanttrustpartner',
'ipantflatname',
'ipanttrustauthoutgoing',
'ipanttrustauthincoming',
'ipanttrusteddomainsid',
'ipanttrustforesttrustInfo',
'ipanttrustposixoffset',
'ipantsupportedencryptiontypes',
'ipantsidblacklistincoming',
'ipantsidblacklistoutgoing',
'ipantadditionalsuffixes',
]
DNA_REGEN_VAL = "-1"
DNA_REGEN_ATTRS = [
'uidnumber',
'gidnumber',
'ipasubuidnumber',
'ipasubgidnumber',
]
STRIP_OC = [
'meporiginentry',
]
#
# The DS_CONFIG mapping breaks each config entry (or type of entry) into its
# own catagory. Each catagory, or type, as DN list "dn", the attributes# we
# are intrested in. These attributes are broken into single valued "attrs",
# or multi-valued attributes "multivalued". If the attributes is single
# valued then the value is replaced, if it's multivalued then it is "appended"
#
# The "label" and "count" attributes are used for the Summary Report
#
DS_CONFIG = {
'config': {
'dn': ['cn=config'],
'attrs': [
# Should this be a tuple with possible conditions?
# Higher value wins?
'nsslapd-idletimeout',
'nsslapd-ioblocktimeout',
'nsslapd-sizelimit',
'nsslapd-timelimit',
'nsslapd-ndn-cache-max-size',
'nsslapd-maxsasliosize',
'nsslapd-maxthreadsperconn',
'nsslapd-listen-backlog-size',
'nsslapd-ignore-time-skew',
'nsslapd-disk-monitoring',
'nsslapd-anonlimitsdn',
'nsslapd-auditlog-display-attrs',
'nsslapd-allowed-sasl-mechanisms',
'nsslapd-enable-upgrade-hash',
'nsslapd-localssf',
'nsslapd-minssf',
'nsslapd-minssf-exclude-rootdse',
'nsslapd-max-filter-nest-level',
'nsslapd-ssl-check-hostname',
'nsslapd-validate-cert',
'nsslapd-unhashed-pw-switch',
'nsslapd-maxbersize'
# access log rotation
'nsslapd-accesslog-logexpirationtime',
'nsslapd-accesslog-logexpirationtimeunit',
'nsslapd-accesslog-logmaxdiskspace',
'nsslapd-accesslog-logminfreediskspace',
'nsslapd-accesslog-logrotationsync-enabled',
'nsslapd-accesslog-logrotationsynchour',
'nsslapd-accesslog-logrotationsyncmin',
'nsslapd-accesslog-logrotationtime',
'nsslapd-accesslog-logrotationtimeunit',
'nsslapd-accesslog-maxlogsize',
'nsslapd-accesslog-maxlogsperdir',
# audit log rotation
'nsslapd-auditlog-logexpirationtime',
'nsslapd-auditlog-logexpirationtimeunit',
'nsslapd-auditlog-logmaxdiskspace',
'nsslapd-auditlog-logminfreediskspace',
'nsslapd-auditlog-logrotationsync-enabled',
'nsslapd-auditlog-logrotationsynchour',
'nsslapd-auditlog-logrotationsyncmin',
'nsslapd-auditlog-logrotationtime',
'nsslapd-auditlog-logrotationtimeunit',
'nsslapd-auditlog-maxlogsize',
'nsslapd-auditlog-maxlogsperdir',
# audit fail log rotation
'nsslapd-auditfaillog-logexpirationtime',
'nsslapd-auditfaillog-logexpirationtimeunit',
'nsslapd-auditfaillog-logmaxdiskspace',
'nsslapd-auditfaillog-logminfreediskspace',
'nsslapd-auditfaillog-logrotationsync-enabled',
'nsslapd-auditfaillog-logrotationsynchour',
'nsslapd-auditfaillog-logrotationsyncmin',
'nsslapd-auditfaillog-logrotationtime',
'nsslapd-auditfaillog-logrotationtimeunit',
'nsslapd-auditfaillog-maxlogsize',
'nsslapd-seauditfaillogcuritylog-maxlogsperdir',
# error log rotation
'nsslapd-errorlog-logexpirationtime',
'nsslapd-errorlog-logexpirationtimeunit',
'nsslapd-errorlog-logmaxdiskspace',
'nsslapd-errorlog-logminfreediskspace',
'nsslapd-errorlog-logrotationsync-enabled',
'nsslapd-errorlog-logrotationsynchour',
'nsslapd-errorlog-logrotationsyncmin',
'nsslapd-errorlog-logrotationtime',
'nsslapd-errorlog-logrotationtimeunit',
'nsslapd-errorlog-maxlogsize',
'nsslapd-errorlog-maxlogsperdir',
# security log rotation
'nsslapd-securitylog-logexpirationtime',
'nsslapd-securitylog-logexpirationtimeunit',
'nsslapd-securitylog-logmaxdiskspace',
'nsslapd-securitylog-logminfreediskspace',
'nsslapd-securitylog-logrotationsync-enabled',
'nsslapd-securitylog-logrotationsynchour',
'nsslapd-securitylog-logrotationsyncmin',
'nsslapd-securitylog-logrotationtime',
'nsslapd-securitylog-logrotationtimeunit',
'nsslapd-securitylog-maxlogsize',
'nsslapd-securitylog-maxlogsperdir',
],
'multivalued': [],
'label': 'cn=config',
'mode': 'all',
'count': 0,
},
'ldbm_config': {
'dn': ['cn=config,cn=ldbm database,cn=plugins,cn=config'],
'attrs': [
'nsslapd-lookthroughlimit',
'nsslapd-idlistscanlimit', # pick larger value?
'nsslapd-import-cachesize',
'nsslapd-search-bypass-filter-test',
'nsslapd-search-use-vlv-index',
'nsslapd-exclude-from-export',
'nsslapd-pagedlookthroughlimit',
'nsslapd-pagedidlistscanlimit',
'nsslapd-rangelookthroughlimit',
'nsslapd-backend-opt-level',
],
'multivalued': [],
'label': 'LDBM Config',
'mode': 'all',
'count': 0,
},
'ldbm_bdb': {
'dn': ['cn=bdb,cn=config,cn=ldbm database,cn=plugins,cn=config'],
'attrs': [
# 'nsslapd-cache-autosize',
# 'nsslapd-cache-autosize-split',
# 'nsslapd-dbcachesize',
'nsslapd-db-compactdb-interval',
'nsslapd-db-compactdb-time',
'nsslapd-db-locks',
'nsslapd-import-cache-autosize',
'nsslapd-import-cachesize',
'nsslapd-db-deadlock-policy',
'nsslapd-db-locks-monitoring-enabled',
'nsslapd-db-locks-monitoring-threshold',
'nsslapd-db-locks-monitoring-pause',
],
'multivalued': [],
'label': 'BDB Config',
'mode': 'all',
'count': 0,
},
'ldbm_mdb': { # Future TODO
'dn': ['cn=mdb,cn=config,cn=ldbm database,cn=plugins,cn=config'],
'attrs': [],
'multivalued': [],
'label': 'MDB Config',
'mode': 'all',
'count': 0,
},
'backends': { # cn=userroot,cn=ldbm database,cn=plugins,cn=config
'dn': [
'cn=changelog,cn=ldbm database,cn=plugins,cn=config',
'cn=userRoot,cn=ldbm database,cn=plugins,cn=config',
],
'attrs': [
# 'nsslapd-cachesize', # autotuned
# 'nsslapd-cachememsize', # autotuned
# 'nsslapd-dncachememsize',
],
'multivalued': [],
'label': 'Userroot',
'mode': 'all',
'count': 0,
},
'referint': {
'dn': ['cn=referential integrity postoperation,cn=plugins,cn=config'],
'attrs': [
'nsslapd-plugincontainerscope', 'nsslapd-pluginentryscope',
'nsslapd-pluginexcludeentryscope', 'referint-update-delay'
],
'multivalued': [
'referint-membership-attr',
],
'label': 'Referint Plugin',
'mode': 'all',
'count': 0,
},
'memberof': {
'dn': ['cn=MemberOf Plugin,cn=plugins,cn=config'],
'attrs': [],
'multivalued': [
'memberofgroupattr', 'memberofentryscope',
'memberofentryscopeexcludesubtree',
],
'label': 'MemberOf Plugin',
'mode': 'all',
'count': 0,
},
'ipa_winsync': {
'dn': ['cn=ipa-winsync,cn=plugins,cn=config'],
'attrs': [
'ipawinsyncacctdisable', 'ipawinsyncdefaultgroupattr',
'ipawinsyncdefaultgroupfilter', 'ipawinsyncforcesync',
'ipawinsynchomedirattr', 'ipawinsyncloginshellattr',
'ipawinsyncnewentryfilter', 'ipawinsyncnewuserocattr',
'ipawinsyncrealmattr', 'ipawinsyncrealmfilter',
'ipawinsyncuserflatten',
],
'multivalued': [
'ipaWinSyncUserAttr',
],
'label': 'Winsync Plugin',
'mode': 'all',
'count': 0,
},
'topo_config': {
'dn': ['cn=IPA Topology Configuration,cn=plugins,cn=config'],
'attrs': [
'nsslapd-topo-plugin-shared-binddngroup',
'nsslapd-topo-plugin-shared-config-base'
'nsslapd-topo-plugin-startup-delay',
],
'multivalued': [
'nsslapd-topo-plugin-shared-replica-root'
],
'label': 'Topology Configuration',
'mode': 'all',
'count': 0,
},
'ipa_dns': { # TODO - do admins ever turn this plugin off?
'dn': ['cn=IPA DNS,cn=plugins,cn=config'],
'attrs': [
'nsslapd-pluginEnabled',
],
'multivalued': [],
'label': 'DNS Plugin',
'mode': 'all',
'count': 0,
},
'retro': {
'dn': ['cn=Retro Changelog Plugin,cn=plugins,cn=config'],
'attrs': [
'nsslapd-changelogmaxage',
],
'multivalued': [
'nsslapd-include-suffix',
'nsslapd-exclude-suffix',
'nsslapd-exclude-attrs',
'nsslapd-attribute',
],
'label': 'Retro Changelog Plugin',
'mode': 'all',
'count': 0,
},
'grace': { # TODO - do admins ever turn this plugin off?
'dn': ['cn=IPA Graceperiod,cn=plugins,cn=config'],
'attrs': [
'nsslapd-pluginEnabled',
],
'multivalued': [],
'label': 'Grace Period Plugin',
'mode': 'all',
'count': 0,
},
'lockout': { # TODO - do admins ever turn this plugin off?
'dn': ['cn=IPA Lockout,cn=plugins,cn=config'],
'attrs': [
'nsslapd-pluginEnabled',
],
'multivalued': [],
'label': 'Lockout Plugin',
'mode': 'all',
'count': 0,
},
'enroll': { # TODO - might not be needed?
'dn': ['cn=ipa_enrollment_extop,cn=plugins,cn=config'],
'attrs': [
'nsslapd-realmtree',
],
'multivalued': [],
'label': 'Enrollment Plugin',
'mode': 'all',
'count': 0,
},
'extdom': { # TODO - might not be needed?
'dn': ['cn=ipa_extdom_extop,cn=plugins,cn=config'],
'attrs': [
'nsslapd-basedn',
],
'multivalued': [],
'label': 'Extdom Extop Plugin',
'mode': 'all',
'count': 0,
},
'pw_extop': { # TODO - might not be needed?
'dn': ['cn=ipa_pwd_extop,cn=plugins,cn=config'],
'attrs': [
'nsslapd-realmtree',
],
'multivalued': [],
'label': 'Password Extop Plugin',
'mode': 'all',
'count': 0,
},
'dna': {
'dn': [
'cn=Posix IDs,cn=Distributed Numeric Assignment Plugin,'
'cn=plugins,cn=config',
'cn=Subordinate IDs,cn=Distributed Numeric Assignment '
'Plugin,cn=plugins,cn=config'
],
'attrs': [
'dnafilter', 'dnamaxValue', 'dnanextvalue',
'dnasharedcfgdn', 'dnathreshold', 'dnatype',
# 'dnaexcludeScope' # became stricter in newer versions, but
# migration reverts the scope to bhe more open
],
'multivalued': [],
'label': 'DNA Plugin',
'mode': 'production',
'count': 0,
},
'schema_compat': {
'dn': [
'cn=Schema Compatibility,cn=plugins,cn=config',
'cn=users,cn=Schema Compatibility,cn=plugins,cn=config',
'cn=groups,cn=Schema Compatibility,cn=plugins,cn=config',
'cn=ng,cn=Schema Compatibility,cn=plugins,cn=config',
'cn=sudoers,cn=Schema Compatibility,cn=plugins,cn=config',
'cn=computers,cn=Schema Compatibility,cn=plugins,cn=config',
],
'attrs': [
'schema-compat-container-group', 'schema-compat-search-base',
'schema-compat-container-rdn', 'nsslapd-pluginenabled',
],
'multivalued': [
'schema-compat-entry-attribute', 'schema-compat-ignore-subtree',
'schema-compat-restrict-subtree',
],
'label': 'Schema Compat Plugin',
'mode': 'all',
'count': 0,
},
'sasl_map': {
'dn': [
'cn=Full Principal,cn=mapping,cn=sasl,cn=config',
'cn=ID Overridden Principal,cn=mapping,cn=sasl,cn=config',
'cn=Name Only,cn=mapping,cn=sasl,cn=config',
],
'attrs': [
'nssaslmapbasedntemplate', 'nssaslmappriority',
'nssaslmapregexstring', 'nssaslmapfiltertemplate',
],
'multivalued': [],
'label': 'SASL Map',
'mode': 'all',
'count': 0,
},
'uuid': {
'dn': [
'cn=IPA Unique IDs,cn=IPA UUID,cn=plugins,cn=config',
'cn=IPK11 Unique IDs,cn=IPA UUID,cn=plugins,cn=config',
],
'attrs': [
'ipauuidattr', 'ipauuidenforce', 'ipauuidexcludesubtree',
'ipauuidfilter', 'ipauuidmagicregen', 'ipauuidscope'
],
'multivalued': [],
'label': 'UUID Plugin',
'mode': 'all',
'count': 0,
},
'uniqueness': {
'dn': [
'cn=uid uniqueness,cn=plugins,cn=config',
'cn=attribute uniqueness,cn=plugins,cn=config',
'cn=krbPrincipalName uniqueness,cn=plugins,cn=config',
'cn=krbCanonicalName uniqueness,cn=plugins,cn=config',
'cn=ipaUniqueID uniqueness,cn=plugins,cn=config',
'cn=certificate store subject uniqueness,cn=plugins,cn=config',
'cn=certificate store issuer/serial uniqueness,cn=plugins,'
'cn=config',
'cn=caacl name uniqueness,cn=plugins,cn=config',
'cn=netgroup uniqueness,cn=plugins,cn=config',
'cn=sudorule name uniqueness,cn=plugins,cn=config',
'cn=ipaSubordinateIdEntry ipaOwner uniqueness,cn=plugins,'
'cn=config',
'cn=mail uniqueness,cn=plugins,cn=config',
],
'attrs': [
'uniqueness-across-all-subtrees',
],
'multivalued': [
'uniqueness-subtrees', 'uniqueness-exclude-subtrees',
'uniqueness-attribute-name'
],
'label': 'Attr Uniqueness Plugin',
'mode': 'all',
'count': 0,
},
}
#
# This mapping is simliar to above but it handles container entries
# This could be built into the above mapping using the "comma" approach
#
DS_INDEXES = {
'index': {
'dn': ',cn=index,cn=userroot,cn=ldbm database,cn=plugins,cn=config',
'attrs': [
'nssystemindex',
],
'multivalued': [
'nsindextype',
'nsmatchingrule',
],
'label': 'Database Indexes',
'mode': 'all',
'count': 0
},
'encrypted': {
'dn': ',cn=encrypted attributes,cn=userroot,cn=ldbm database,'
'cn=plugins,cn=config',
'attrs': [
'nsencryptionalgorithm',
],
'multivalued': [],
'label': 'Encrypted Attributes',
'mode': 'all',
'count': 0
},
}
#
# This mapping breaks each IPA entry (or type of entry) into its own catagory
# Each catagory, or type, has an objectclass list "oc" and its DIT location
# "subtree". If the "subtree" starts with a comma "," then it is a container
# of entries, otherwise it's a single entry. These two are used together to
# identify the entry.
# The "label" and "count" attributes are used for the Summary Report
#
# Some entries use ipaUniqueId as the RDN attribute, this makes comparing
# entries between the remote and local servers problematic. So we need special
# identifying information to find the local entry. In this case we use the
# "alt_id" key which is a dict of an attribute 'attr' and partial base DN
# 'base' - which is expected to end in a comma.
#
DB_OBJECTS = {
# Plugins
'automember_def': {
'oc': ['automemberdefinition'],
'subtree': ',cn=automember,cn=etc,$SUFFIX',
'label': 'Automember Definitions',
'mode': 'all',
'count': 0,
},
'automember_rules': {
'oc': ['automemberregexrule'],
'subtree': ',cn=automember,cn=etc,$SUFFIX',
'label': 'Automember Rules',
'mode': 'all',
'count': 0,
},
'dna_ranges': {
'oc': ['ipadomainidrange', 'ipaidrange', 'ipatrustedaddomainrange'],
'subtree': ',cn=ranges,cn=etc,$SUFFIX',
'label': 'DNA Ranges',
'prod_only': False,
'mode': 'production',
'count': 0,
},
'dna_posix_ids': {
'oc': ['dnasharedconfig'],
'subtree': 'cn=posix-ids,cn=dna,cn=ipa,cn=etc,$SUFFIX',
'label': 'DNA Posix IDs',
'prod_only': False,
'mode': 'production',
'count': 0,
},
'dna_sub_ids': {
'oc': ['dnasharedconfig'],
'subtree': 'cn=subordinate-ids,cn=dna,cn=ipa,cn=etc,$SUFFIX',
'label': 'DNA Sub IDs',
'prod_only': False,
'mode': 'production',
'count': 0,
},
'mep_templates': {
'oc': ['meptemplateentry'],
'subtree': ',cn=templates,cn=managed entries,cn=etc,$SUFFIX',
'label': 'MEP Templates',
'mode': 'all',
'count': 0,
},
'mep_defs': {
'oc': ['extensibleobject'],
'subtree': ',cn=definitions,cn=managed entries,cn=etc,$SUFFIX',
'label': 'MEP Defintions',
'mode': 'all',
'count': 0,
},
# Etc...
'anon_limits': {
'oc': [],
'subtree': 'cn=anonymous-limits,cn=etc,$SUFFIX',
'label': 'Anonymous Limits',
'mode': 'all',
'count': 0,
},
'ca': { # Unknown if this is needed TODO
'oc': [],
'subtree': 'cn=ca,$SUFFIX',
'label': 'CA',
'mode': 'all',
'count': 0,
},
'ipa_config': {
'oc': ['ipaconfigobject', 'ipaguiconfig'],
'subtree': 'cn=ipaconfig,cn=etc,$SUFFIX',
'special_attrs': [
# needs special handling, but ipa-server-upgrade rewrites this
# attribute anyway!
('ipausersearchfields', 'list'),
],
'label': 'IPA Config',
'mode': 'all',
'count': 0,
},
'sysaccounts': {
'oc': [],
'subtree': ',cn=sysaccounts,cn=etc,$SUFFIX',
'label': 'Sysaccounts',
'mode': 'all',
'count': 0,
},
'topology': {
'oc': ['iparepltopoconf'],
'subtree': ',cn=topology,cn=ipa,cn=etc,$SUFFIX',
'label': 'Topology',
'mode': 'all',
'count': 0,
},
'certmap': {
'oc': ['ipacertmapconfigobject'],
'subtree': 'cn=certmap,$SUFFIX',
'label': 'Certmap',
'mode': 'all',
'count': 0,
},
'certmap_rules': {
'oc': [],
'subtree': ',cn=certmaprules,cn=certmap,$SUFFIX',
'label': 'Certmap Rules',
'mode': 'all',
'count': 0,
},
's4u2proxy': {
'oc': ['ipakrb5delegationacl', 'groupofprincipals'],
'subtree': ',cn=s4u2proxy,cn=etc,$SUFFIX',
'label': 's4u2proxy',
'mode': 'all',
'count': 0,
},
'passkey_config': {
'oc': ['ipapasskeyconfigobject'],
'subtree': 'cn=passkeyconfig,cn=etc,$SUFFIX',
'label': 'PassKey Config',
'mode': 'all',
'count': 0,
},
'desktop_profiles': {
'oc': ['ipadeskprofileconfig'],
'subtree': 'cn=desktop-profile,$SUFFIX',
'label': 'Desktop Pofiles',
'mode': 'all',
'count': 0,
},
# Accounts
'computers': {
'oc': ['ipahost'],
'subtree': ',cn=computers,cn=accounts,$SUFFIX',
'label': 'Hosts',
'mode': 'all',
'count': 0,
},
'admin': {
'oc': ['person'],
'subtree': 'uid=admin,cn=users,cn=accounts,$SUFFIX',
'label': 'Admin',
'mode': 'all',
'count': 0,
},
'users': {
'oc': ['person'],
'subtree': ',cn=users,cn=accounts,$SUFFIX',
'label': 'Users',
'strip_attrs': [
'krbprincipalname',
'krbextradata',
'krbprincipalkey',
'krblastpwdchange',
'krbpasswordexpiration',
'krblastadminunlock',
'krblastfailedauth',
'krbloginfailedcount',
],
'mode': 'all',
'count': 0,
},
'groups': {
'oc': ['groupofnames', 'posixgroup'],
'subtree': ',cn=groups,cn=accounts,$SUFFIX',
'label': 'Groups',
'mode': 'all',
'count': 0,
},
'roles': {
'oc': ['groupofnames'],
'subtree': ',cn=roles,cn=accounts,$SUFFIX',
'label': 'Roles',
'mode': 'all',
'count': 0,
},
'host_groups': {
'oc': ['ipahostgroup'],
'subtree': ',cn=hostgroups,cn=accounts,$SUFFIX',
'label': 'Host Groups',
'mode': 'all',
'count': 0,
},
'services': { # Contains COS entries - should COS be ignored TODO
'oc': ['ipaservice'],
'subtree': ',cn=services,cn=accounts,$SUFFIX',
'label': 'Services',
'mode': 'all',
'count': 0,
},
'views': { # unknown what these entries look like TODO
'oc': [],
'subtree': ',cn=views,cn=accounts,$SUFFIX',
'label': 'Views',
'mode': 'all',
'count': 0,
},
'ipservices': { # unknown what these entries look like TODO
'oc': [],
'subtree': ',cn=ipservices,cn=accounts,$SUFFIX',
'label': 'IP Services',
'mode': 'all',
'count': 0,
},
'subids': {
'oc': [],
'subtree': ',cn=subids,cn=accounts,$SUFFIX',
'label': 'Sub IDs',
'mode': 'production',
'alt_id': {
'attr': 'ipaOwner',
'isDN': True,
'base': 'cn=subids,cn=accounts,',
},
'count': 0,
},
# automount
'automounts': {
'oc': [],
'subtree': ',cn=automount,$SUFFIX',
'label': 'Automounts',
'mode': 'all',
'count': 0,
},
'automount_maps': {
'oc': ['automountmap'],
'subtree': ',cn=automount,$SUFFIX',
'label': 'Automount Maps',
'mode': 'all',
'count': 0,
},
# OTP
'otp': {
'oc': [],
'subtree': ',cn=otp,$SUFFIX',
'label': 'OTP',
'mode': 'all',
'count': 0,
},
'otp_config': {
'oc': ['ipatokenotpconfig'],
'subtree': 'cn=otp,cn=etc,$SUFFIX',
'label': 'OTP Config',
'mode': 'all',
'count': 0,
},
# Realms
'realms': {
'oc': ['domainrelatedobject'],
'subtree': ',cn=realm domains,cn=ipa,cn=etc,$SUFFIX',
'label': 'Realm',
'mode': 'all',
'count': 0,
},
# Trusts - not sure if this is useful TODO
# cn=ad,cn=trusts,#SUFFIX
'trusts': {
'oc': [],
'subtree': ',cn=trusts,$SUFFIX',
'label': 'Trusts',
'mode': 'all',
'count': 0,
},
# AD
'ad': {
'oc': ['ipantdomainattrs'],
'subtree': ',cn=ad,cn=etc,$SUFFIX',
'label': 'AD',
'mode': 'all',
'count': 0,
},
# Provisioning (staged and deleted users) also COS entries
'provisioning': {
'oc': [],
'subtree': ',cn=accounts,cn=provisioning,$SUFFIX',
'label': 'Provisioning',
'mode': 'all',
'count': 0,
},
# PBAC
'pbac_priv': {
'oc': ['groupofnames'],
'subtree': ',cn=privileges,cn=pbac,$SUFFIX',
'label': 'Privledges',
'mode': 'all',
'count': 0,
},
'pbac_perms': {
'oc': ['ipapermission'],
'subtree': ',cn=permissions,cn=pbac,$SUFFIX',
'label': 'Permissions',
'mode': 'all',
'count': 0,
},
# HBAC
'hbac_services': {
'oc': ['ipahbacservice'],
'subtree': ',cn=hbacservices,cn=hbac,$SUFFIX',
'label': 'HBAC Services',
'mode': 'all',
'count': 0,
},
'hbac_service_groups': {
'oc': ['ipahbacservicegroup'],
'subtree': ',cn=hbacservicegroups,cn=hbac,$SUFFIX',
'label': 'HBAC Service Groups',
'mode': 'all',
'count': 0,
},
'hbac_rules': {
'oc': ['ipahbacrule'],
'subtree': ',cn=hbac,$SUFFIX',
'label': 'HBAC Rules',
'alt_id': {
'attr': 'cn',
'base': 'cn=hbac,',
'isDN': False,
},
'mode': 'all',
'count': 0,
},
# Selinux
'selinux_usermap': { # Not sure if this is needed, entry is empty TODO
'oc': [],
'subtree': ',cn=usermap,cn=selinux,$SUFFIX',
'alt_id': {
'attr': 'cn',
'base': 'cn=usermap,cn=selinux,',
'isDN': False,
},
'label': 'Selinux Usermaps',
'mode': 'all',
'count': 0,
},
# Sudo
'sudo_rules': {
'oc': ['ipasudorule'],
'subtree': ',cn=sudorules,cn=sudo,$SUFFIX',
'label': 'Sudo Rules',
'alt_id': {
'attr': 'cn',
'base': 'cn=sudorules,cn=sudo,',
'isDN': False,
},
'special_attrs': [
# schema defines sudoOrder as mutlivalued, but we need to treat
# it as single valued
('sudoorder', 'single'),
],
'mode': 'all',
'count': 0,
},
'sudo_cmds': {
'oc': ['ipasudocmd'],
'subtree': ',cn=sudocmds,cn=sudo,$SUFFIX',
'alt_id': {
'attr': 'sudoCmd',
'base': 'cn=sudocmds,cn=sudo,',
'isDN': False,
},
'label': 'Sudo Commands',
'mode': 'all',
'count': 0,
},
'sudo_cmd_groups': {
'oc': ['ipasudocmdgrp'],
'subtree': ',cn=sudocmdgroups,cn=sudo,$SUFFIX',
'label': 'Sudo Command Groups',
'mode': 'all',
'count': 0,
},
# DNS
'dns_container': {
'oc': ['ipadnscontainer', 'nscontainer'],
'subtree': 'cn=dns,$SUFFIX',
'label': 'DNS Container',
'mode': 'all',
'count': 0,
},
'dns_server_container': {
'oc': ['nscontainer'],
'subtree': 'cn=servers,cn=dns,$SUFFIX',
'label': 'DNS Server Container',
'mode': 'all',
'count': 0,
},
'dns_records': {
'oc': ['idnsrecord', 'idnszone', 'idnsforwardzone'],
'subtree': ',cn=dns,$SUFFIX',
'label': 'DNS Records',
'mode': 'all',
'count': 0,
},
'dns_servers': {
'oc': ['idnsServerConfigObject'],
'subtree': ',cn=servers,cn=dns,$SUFFIX',
'label': 'DNS Servers',
'mode': 'all',
'count': 0,
},
# Kerberos
'krb_realm': {
'oc': ['krbrealmcontainer'],
'subtree': ',cn=kerberos,$SUFFIX',
'label': 'Kerberos Realm',
'mode': 'all',
'count': 0,
},
'kerberos_policy': { # principal
'oc': ['krbticketpolicyaux'],
'subtree': ',cn=kerberos,$SUFFIX',
'label': 'Kerberos Policy',
'mode': 'all',
'count': 0,
},
'krb_pwpolicy': {
'oc': ['ipapwdpolicy'],
'subtree': 'cn=global_policy,cn=$REALM,cn=kerberos,$SUFFIX',
'label': 'Kerberos Password Policy',
'mode': 'all',
'count': 0,
},
'krb_default_pwppolicy': {
'oc': ['krbpwdpolicy'],
'subtree': 'cn=default kerberos service password policy,'
'cn=$REALM,cn=kerberos,$SUFFIX',
'label': 'Kerberos Default Password Policy',
'mode': 'all',
'count': 0,
},
# Other
'domain_config': {
'oc': ['ipadomainlevelconfig'],
'subtree': 'cn=domain level,cn=ipa,cn=etc,$SUFFIX',
'label': 'Domain Configuration',
'mode': 'all',
'count': 0,
},
'net_groups': {
'oc': ['ipanisnetgroup'],
'not_oc': ['mepmanagedentry'],
'subtree': ',cn=ng,cn=alt,$SUFFIX',
'alt_id': {
'attr': 'cn',
'base': 'cn=ng,cn=alt,',
'isDN': False,
},
'label': 'Network Groups',
'mode': 'all',
'count': 0,
},
# Certificates
# cn=IPA.LOCAL IPA CA,cn=certificates,cn=ipa,cn=etc,dc=ipademo,dc=local
'certificate': {
'oc': ['ipacertificate', 'pkica'],
'subtree': ',cn=certificates,cn=ipa,cn=etc,$SUFFIX',
'label': 'CA Certificates',
'mode': 'all',
'count': 0,
},
'caacls': {
'oc': ['ipacaacl'],
'subtree': ',cn=caacls,cn=ca,$SUFFIX',
'alt_id': {
'attr': 'cn',
'base': 'cn=caacls,cn=ca,',
'isDN': False,
},
'label': 'CA Certificate ACLs',
'mode': 'all',
'count': 0,
},
}
DB_EXCLUDE_TREES = [
'cn=sec,cn=dns,$SUFFIX',
'cn=custodia,cn=ipa,cn=etc,$SUFFIX',
]
#
# COS can probably be skipped TODO
#
COS = {
# COS
'cos_templates': 'cn=cosTemplates,cn=accounts,$SUFFIX',
'cos_pwpolicy': 'cn=Password Policy,cn=accounts,$SUFFIX',
# COS Computers
'cos_computer:templates': 'cn=cosTemplates,cn=computers,cn=accounts,',
'cos_computer_pwpolicy':
'cn=Default Password Policy,cn=cosTemplates,cn=computers,cn=accounts,',
'cos_computer_pwpolicy_entry':
'cn=Default Password Policy,cn=computers,cn=accounts,',
# COS Services
'cos_service_templates': 'cn=cosTemplates,cn=services,cn=accounts,$SUFFIX',
'cos_service_pwpolicy':
'cn=Default Password Policy,cn=cosTemplates,cn=services,cn=accounts,',
'cos_service_pwpolicy_entry':
'cn=Default Password Policy,cn=services,cn=accounts,',
# COS Kerberos
'cos_kerb_realm_templates': 'cn=cosTemplates,cn=$REALM,cn=kerberos,',
'cos_kerb_realm_pwpolicy':
'cn=Default Password Policy,cn=cosTemplates,cn=$REALM,cn=kerberos,',
'cos_kerb_realm_pwpolicy_entry':
'cn=Default Password Policy,cn=$REALM,cn=kerberos,',
# COS Sysaccounts
'cos_sysacct_templates': 'cn=cosTemplates,cn=sysaccounts,cn=etc,',
'cos_sysacct_pwpolicy':
'cn=Default Password Policy,cn=cosTemplates,cn=sysaccounts,cn=etc,',
'cos_sysacct_pwpolicy_entry':
'cn=Default Password Policy,cn=sysaccounts,cn=etc,',
}
| 33,763
|
Python
|
.py
| 1,064
| 23.485902
| 79
| 0.551246
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,765
|
ldapupdate.py
|
freeipa_freeipa/ipaserver/install/ldapupdate.py
|
# Authors: Rob Crittenden <rcritten@redhat.com>
#
# Copyright (C) 2008 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Documentation can be found at http://freeipa.org/page/LdapUpdate
# TODO
# save undo files?
from __future__ import absolute_import
import base64
import logging
import sys
import uuid
import time
import os
import fnmatch
import warnings
from pysss_murmur import murmurhash3
import six
from ipapython import ipautil, ipaldap
from ipalib import errors
from ipalib import api, create_api
from ipalib import constants
from ipaplatform.constants import constants as platformconstants
from ipaplatform.paths import paths
from ipaplatform.tasks import tasks
from ipapython.dn import DN
from ipaserver.install import installutils, replication
if six.PY3:
unicode = str
logger = logging.getLogger(__name__)
UPDATES_DIR=paths.UPDATES_DIR
UPDATE_SEARCH_TIME_LIMIT = 30 # seconds
def get_sub_dict(realm, domain, suffix, fqdn, idstart=None, idmax=None):
"""LDAP template substitution dict for installer and updater
"""
if idstart is None:
idrange_size = None
subid_base_rid = None
else:
idrange_size = idmax - idstart + 1
subid_base_rid = constants.SUBID_RANGE_START - idrange_size
# uid / gid for autobind
# user is only defined when ipa-server-dns and bind are installed
try:
named_uid = platformconstants.NAMED_USER.uid
named_gid = platformconstants.NAMED_GROUP.gid
except ValueError:
named_uid = None
named_gid = None
return dict(
REALM=realm,
DOMAIN=domain,
SUFFIX=suffix,
ESCAPED_SUFFIX=str(suffix),
FQDN=fqdn,
HOST=fqdn,
LIBARCH=paths.LIBARCH,
TIME=int(time.time()),
FIPS="#" if tasks.is_fips_enabled() else "",
# idstart, idmax, and idrange_size may be None
IDSTART=idstart,
IDMAX=idmax,
IDRANGE_SIZE=idrange_size,
SUBID_COUNT=constants.SUBID_COUNT,
SUBID_RANGE_START=constants.SUBID_RANGE_START,
SUBID_RANGE_SIZE=constants.SUBID_RANGE_SIZE,
SUBID_RANGE_MAX=constants.SUBID_RANGE_MAX,
SUBID_DNA_THRESHOLD=constants.SUBID_DNA_THRESHOLD,
SUBID_BASE_RID=subid_base_rid,
DOMAIN_HASH=murmurhash3(domain, len(domain), 0xdeadbeef),
MAX_DOMAIN_LEVEL=constants.MAX_DOMAIN_LEVEL,
MIN_DOMAIN_LEVEL=constants.MIN_DOMAIN_LEVEL,
STRIP_ATTRS=" ".join(replication.STRIP_ATTRS),
EXCLUDES=(
'(objectclass=*) $ EXCLUDE ' + ' '.join(replication.EXCLUDES)
),
TOTAL_EXCLUDES=(
'(objectclass=*) $ EXCLUDE '
+ ' '.join(replication.TOTAL_EXCLUDES)
),
DEFAULT_SHELL=platformconstants.DEFAULT_SHELL,
DEFAULT_ADMIN_SHELL=platformconstants.DEFAULT_ADMIN_SHELL,
SELINUX_USERMAP_DEFAULT=platformconstants.SELINUX_USERMAP_DEFAULT,
SELINUX_USERMAP_ORDER=platformconstants.SELINUX_USERMAP_ORDER,
NAMED_UID=named_uid,
NAMED_GID=named_gid,
)
def connect(ldapi=False, realm=None, fqdn=None):
"""Create a connection for updates"""
if ldapi:
conn = ipaldap.LDAPClient.from_realm(realm, decode_attrs=False)
else:
conn = ipaldap.LDAPClient.from_hostname_secure(
fqdn, decode_attrs=False
)
try:
if os.getegid() == 0:
try:
# autobind
conn.external_bind()
except errors.NotFound:
# Fall back
conn.gssapi_bind()
else:
conn.gssapi_bind()
except (errors.DatabaseError, errors.NetworkError) as e:
raise RuntimeError("Unable to connect to LDAP server: %s" % e)
except errors.ACIError as e:
raise RuntimeError(
"The password provided is incorrect for LDAP server %s: %s" %
(fqdn, e))
return conn
class BadSyntax(installutils.ScriptError):
def __init__(self, value):
self.value = value
super(BadSyntax, self).__init__(
msg="LDAPUpdate: syntax error: \n %s" % value, rval=1)
def __str__(self):
return repr(self.value)
def safe_output(attr, values):
"""
Sanitizes values we do not want logged, like passwords.
This should be called in all debug statements that output values.
This list does not necessarily need to be exhaustive given the limited
scope of types of values that the updater manages.
This only supports lists, tuples and strings. If you pass a dict you may
get a string back.
"""
sensitive_attributes = ['krbmkey', 'userpassword', 'passwordhistory', 'krbprincipalkey', 'sambalmpassword', 'sambantpassword', 'ipanthash']
if attr.lower() in sensitive_attributes:
if type(values) in (tuple, list):
# try to still look a little like what is in LDAP
return ['XXXXXXX'] * len(values)
else:
return 'XXXXXXXX'
if values is None:
return None
is_list = type(values) in (tuple, list)
if is_list and None in values:
return values
if not is_list:
values = [values]
try:
values = [v.decode('ascii') for v in values]
except UnicodeDecodeError:
try:
values = [base64.b64encode(v).decode('ascii') for v in values]
except TypeError:
pass
if not is_list:
values = values[0]
return values
_sentinel = object()
def run_ldapi_reload_task(conn):
"""Create and wait for reload ldapi mappings task
:param conn: ldap2 connection
:return: exitcode
"""
task_cn = "reload_{}".format(int(time.time()))
task_dn = DN(
('cn', task_cn), ('cn', 'reload ldapi mappings'),
('cn', 'tasks'), ('cn', 'config')
)
entry = conn.make_entry(
task_dn,
objectClass=['top', 'extensibleObject'],
cn=[task_cn],
ttl=[10],
)
logger.debug('Creating reload task %s', task_dn)
conn.add_entry(entry)
# task usually finishes in a few ms, avoid 1 sec delay in wait_for_task
time.sleep(0.1)
exitcode = replication.wait_for_task(api.Backend.ldap2, task_dn)
logger.debug(
'Task %s has finished with exit code %i',
task_dn, exitcode
)
return exitcode
class LDAPUpdate:
action_keywords = {
"default", "add", "remove", "only", "onlyifexist", "deleteentry",
"replace", "addifnew", "addifexist"
}
index_suffix = DN(
('cn', 'index'), ('cn', 'userRoot'), ('cn', 'ldbm database'),
('cn', 'plugins'), ('cn', 'config')
)
ldapi_autobind_suffix = DN(('cn', 'auto_bind'), ('cn', 'config'))
def __init__(self, dm_password=_sentinel, sub_dict=None,
online=_sentinel, ldapi=_sentinel, api=api):
'''
:parameters:
dm_password
deprecated and no longer used
sub_dict
substitution dictionary
online
deprecated and no longer used
ldapi
deprecated and no longer used
api
bootstrapped API object (for configuration)
Data Structure Example:
-----------------------
dn_by_rdn_count = {
3: 'cn=config,dc=example,dc=com':
4: 'cn=bob,ou=people,dc=example,dc=com',
}
all_updates = [
{
'dn': 'cn=config,dc=example,dc=com',
'default': [
dict(attr='attr1', value='default1'),
],
'updates': [
dict(action='action', attr='attr1', value='value1'),
dict(action='replace', attr='attr2', value=['old', 'new']),
]
},
{
'dn': 'cn=bob,ou=people,dc=example,dc=com',
'default': [
dict(attr='attr3', value='default3'),
],
'updates': [
dict(action='action', attr='attr3', value='value3'),
dict(action='action', attr='attr4', value='value4'),
}
}
]
Please notice the replace action requires two values in list
The default and update lists are "dispositions"
Plugins:
Plugins has to be specified in update file to be executed, using
'plugin' directive
Example:
plugin: update_uniqueness_plugins_to_new_syntax
Each plugin returns two values:
1. restart: dirsrv will be restarted AFTER this update is
applied.
2. updates: A list of updates to be applied.
The value of an update is a dictionary with the following possible
values:
- dn: DN, equal to the dn attribute
- updates: list of updates against the dn
- default: list of the default entry to be added if it doesn't
exist
- deleteentry: list of dn's to be deleted (typically single dn)
For example, this update file:
dn: cn=global_policy,cn=$REALM,cn=kerberos,$SUFFIX
replace:krbPwdLockoutDuration:10::600
replace: krbPwdMaxFailure:3::6
Generates this list which contain the update dictionary:
[
{
'dn': 'cn=global_policy,cn=EXAMPLE.COM,cn=kerberos,dc=example,dc=com',
'updates': [
dict(action='replace', attr='krbPwdLockoutDuration',
value=['10','600']),
dict(action='replace', attr='krbPwdMaxFailure',
value=['3','6']),
]
}
]
Here is another example showing how a default entry is configured:
dn: cn=Managed Entries,cn=etc,$SUFFIX
default: objectClass: nsContainer
default: objectClass: top
default: cn: Managed Entries
This generates:
[
{
'dn': 'cn=Managed Entries,cn=etc,dc=example,dc=com',
'default': [
dict(attr='objectClass', value='nsContainer'),
dict(attr='objectClass', value='top'),
dict(attr='cn', value='Managed Entries'),
]
}
]
Note that the variable substitution in both examples has been completed.
Either may make changes directly in LDAP or can return updates in
update format.
'''
if any(arg is not _sentinel for arg in (dm_password, online, ldapi)):
warnings.warn(
"dm_password, online, and ldapi arguments are deprecated",
DeprecationWarning,
stacklevel=2
)
self.sub_dict = sub_dict if sub_dict is not None else {}
self.conn = None
self.modified = False
self.ldapuri = ipaldap.realm_to_ldapi_uri(api.env.realm)
self.api = create_api(mode=None)
self.api.bootstrap(
in_server=True,
context='updates',
confdir=paths.ETC_IPA,
ldap_uri=self.ldapuri
)
self.api.finalize()
self.create_connection()
# get ipa-local domain idrange settings
domain_range = f"{self.api.env.realm}_id_range"
try:
result = self.api.Command.idrange_show(domain_range)["result"]
except errors.NotFound:
idstart = None
idmax = None
else:
idstart = int(result['ipabaseid'][0])
idrange_size = int(result['ipaidrangesize'][0])
idmax = idstart + idrange_size - 1
default_sub = get_sub_dict(
realm=api.env.realm,
domain=api.env.domain,
suffix=api.env.basedn,
fqdn=api.env.host,
idstart=idstart,
idmax=idmax,
)
replication_plugin = (
installutils.get_replication_plugin_name(self.conn.get_entry)
)
default_sub["REPLICATION_PLUGIN"] = replication_plugin
for k, v in default_sub.items():
self.sub_dict.setdefault(k, v)
def _template_str(self, s):
try:
return ipautil.template_str(s, self.sub_dict)
except KeyError as e:
raise BadSyntax("Unknown template keyword %s" % e)
def read_file(self, filename):
if filename == '-':
fd = sys.stdin
else:
fd = open(filename)
text = fd.readlines()
if fd != sys.stdin: fd.close()
return text
def parse_update_file(self, data_source_name, source_data, all_updates):
"""Parse the update file into a dictonary of lists and apply the update
for each DN in the file."""
update = {}
logical_line = ""
dn = None
lcount = 0
def emit_item(logical_line):
'''
Given a logical line containing an item to process perform the following:
* Strip leading & trailing whitespace
* Substitute any variables
* Strip again and skip empty/commented lines after substitution
* Get the action, attribute, and value
* Each update has one list per disposition, append to specified disposition list
'''
logical_line = logical_line.strip()
if logical_line == '':
return
# Perform variable substitution on constructued line
logical_line = self._template_str(logical_line)
# skip line if substitution has added a comment. FIPS mode
# disables some lines that way.
logical_line = logical_line.strip()
if not logical_line or logical_line.startswith('#'):
return
items = logical_line.split(':', 2)
if len(items) == 0:
raise BadSyntax("Bad formatting on line %s:%d: %s" % (data_source_name, lcount, logical_line))
action = items[0].strip().lower()
if action not in self.action_keywords:
raise BadSyntax("Unknown update action '%s', data source=%s" % (action, data_source_name))
if action == 'deleteentry':
new_value = None
disposition = "deleteentry"
else:
if len(items) != 3:
raise BadSyntax("Bad formatting on line %s:%d: %s" % (data_source_name, lcount, logical_line))
attr = items[1].strip()
# do not strip here, we need detect '::' due to base64 encoded
# values, strip may result into fake detection
value = items[2]
# detect base64 encoding
# value which start with ':' are base64 encoded
# decode it as a binary value
if value.startswith(':'):
value = value[1:]
binary = True
else:
binary = False
value = value.strip()
if action == 'replace':
try:
value = value.split('::', 1)
except ValueError:
raise BadSyntax(
"Bad syntax in replace on line %s:%d: %s, needs to "
"be in the format old::new in %s" % (
data_source_name, lcount, logical_line, value)
)
else:
value = [value]
if binary:
for i, v in enumerate(value):
try:
value[i] = base64.b64decode(v)
except (TypeError, ValueError) as e:
raise BadSyntax(
"Base64 encoded value %s on line %s:%d: %s is "
"incorrect (%s)" % (v, data_source_name,
lcount, logical_line, e)
)
else:
for i, v in enumerate(value):
if isinstance(v, unicode):
value[i] = v.encode('utf-8')
if action != 'replace':
value = value[0]
if action == "default":
new_value = {'attr': attr, 'value': value}
disposition = "default"
else:
new_value = {'action': action, "attr": attr,
'value': value}
disposition = "updates"
disposition_list = update.setdefault(disposition, [])
disposition_list.append(new_value)
def emit_update(update):
'''
When processing a dn is completed emit the update by appending it
into list of all updates
'''
dn = update.get('dn')
assert isinstance(dn, DN)
all_updates.append(update)
def emit_plugin_update(update):
'''
When processing a plugin is complete emit the plugin update by
appending it into list of all updates
'''
all_updates.append(update)
# Iterate over source input lines
for source_line in source_data:
lcount += 1
# strip trailing whitespace and newline
source_line = source_line.rstrip()
# skip comments and empty lines
if source_line.startswith('#') or source_line == '':
continue
state = None
emit_previous_dn = False
# parse special keywords
if source_line.lower().startswith('dn:'):
state = 'dn'
emit_previous_dn = True
elif source_line.lower().startswith('plugin:'):
state = 'plugin'
emit_previous_dn = True
if emit_previous_dn and dn is not None:
# Emit previous dn
emit_item(logical_line)
logical_line = ''
emit_update(update)
update = {}
dn = None
if state == 'dn':
# Starting new dn
dn = source_line[3:].strip()
dn = DN(self._template_str(dn))
update['dn'] = dn
elif state == 'plugin':
# plugin specification is online only
plugin_name = source_line[7:].strip()
if not plugin_name:
raise BadSyntax("plugin name is not defined")
update['plugin'] = plugin_name
emit_plugin_update(update)
update = {}
else:
# Process items belonging to dn
if dn is None:
raise BadSyntax("dn is not defined in the update, data source=%s" % (data_source_name))
# If continuation line, append to existing logical line & continue,
# otherwise flush the previous item.
if source_line.startswith(' '):
logical_line += source_line[1:]
continue
emit_item(logical_line)
logical_line = source_line
if dn is not None:
emit_item(logical_line)
logical_line = ''
emit_update(update)
update = {}
return all_updates
def create_index_task(self, *attributes):
"""Create a task to update an index for attributes"""
cn_uuid = uuid.uuid1()
# cn_uuid.time is in nanoseconds, but other users of LDAPUpdate expect
# seconds in 'TIME' so scale the value down
self.sub_dict['TIME'] = int(cn_uuid.time/1e9)
cn = "indextask_%s_%s" % (cn_uuid.time, cn_uuid.clock_seq)
dn = DN(('cn', cn), ('cn', 'index'), ('cn', 'tasks'), ('cn', 'config'))
e = self.conn.make_entry(
dn,
objectClass=['top', 'extensibleObject'],
cn=[cn],
nsInstance=['userRoot'],
nsIndexAttribute=list(attributes),
)
logger.debug(
"Creating task %s to index attributes: %s",
dn, ', '.join(attributes)
)
self.conn.add_entry(e)
return dn
def monitor_index_task(self, dn):
"""Give a task DN monitor it and wait until it has completed (or failed)
"""
assert isinstance(dn, DN)
# Pause for a moment to give the task time to be created
time.sleep(1)
attrlist = ['nstaskstatus', 'nstaskexitcode']
entry = None
while True:
try:
entry = self.conn.get_entry(dn, attrlist)
except errors.NotFound:
logger.error("Task not found: %s", dn)
return
except errors.DatabaseError as e:
logger.error("Task lookup failure %s", e)
return
status = entry.single_value.get('nstaskstatus')
if status is None:
# task doesn't have a status yet
time.sleep(1)
continue
if "finished" in status.lower():
logger.debug("Indexing finished")
break
logger.debug("Indexing in progress")
time.sleep(1)
return
def _create_default_entry(self, dn, default):
"""Create the default entry from the values provided.
The return type is ipaldap.LDAPEntry
"""
assert isinstance(dn, DN)
entry = self.conn.make_entry(dn)
if not default:
# This means that the entire entry needs to be created with add
return entry
for item in default:
# We already do syntax-parsing so this is safe
attr = item['attr']
value = item['value']
e = entry.raw.get(attr)
if e:
# multi-valued attribute
e = list(e)
e.append(value)
else:
e = [value]
entry.raw[attr] = e
entry.reset_modlist()
return entry
def _get_entry(self, dn):
"""Retrieve an object from LDAP.
The return type is ipaldap.LDAPEntry
"""
assert isinstance(dn, DN)
searchfilter="objectclass=*"
sattrs = ["*", "aci", "attributeTypes", "objectClasses"]
scope = self.conn.SCOPE_BASE
return self.conn.get_entries(dn, scope, searchfilter, sattrs)
def _apply_update_disposition(self, updates, entry):
"""
updates is a list of changes to apply
entry is the thing to apply them to
Returns the modified entry
"""
if not updates:
return entry
only = {}
for update in updates:
# We already do syntax-parsing so this is safe
action = update['action']
attr = update['attr']
update_value = update['value']
# do not mix comparison of bytes and unicode, everything in this
# function should be compared as bytes
if isinstance(update_value, (list, tuple)):
update_value = [
v.encode('utf-8') if isinstance(v, unicode) else v
for v in update_value
]
elif isinstance(update_value, unicode):
update_value = update_value.encode('utf-8')
entry_values = entry.raw.get(attr, [])
if action == 'remove':
logger.debug("remove: '%s' from %s, current value %s",
safe_output(attr, update_value),
attr,
safe_output(attr, entry_values))
try:
entry_values.remove(update_value)
except ValueError:
logger.debug(
"remove: '%s' not in %s",
safe_output(attr, update_value), attr)
else:
entry.raw[attr] = entry_values
logger.debug('remove: updated value %s', safe_output(
attr, entry_values))
elif action == 'add':
logger.debug("add: '%s' to %s, current value %s",
safe_output(attr, update_value),
attr,
safe_output(attr, entry_values))
# Remove it, ignoring errors so we can blindly add it later
try:
entry_values.remove(update_value)
except ValueError:
pass
entry_values.append(update_value)
logger.debug('add: updated value %s',
safe_output(attr, entry_values))
entry.raw[attr] = entry_values
elif action == 'addifnew':
logger.debug("addifnew: '%s' to %s, current value %s",
safe_output(attr, update_value),
attr,
safe_output(attr, entry_values))
# Only add the attribute if it doesn't exist. Only works
# with single-value attributes. Entry must exist.
if entry.get('objectclass') and len(entry_values) == 0:
entry_values.append(update_value)
logger.debug('addifnew: set %s to %s',
attr, safe_output(attr, entry_values))
entry.raw[attr] = entry_values
elif action == 'addifexist':
logger.debug("addifexist: '%s' to %s, current value %s",
safe_output(attr, update_value),
attr,
safe_output(attr, entry_values))
# Only add the attribute if the entry doesn't exist. We
# determine this based on whether it has an objectclass
if entry.get('objectclass'):
entry_values.append(update_value)
logger.debug('addifexist: set %s to %s',
attr, safe_output(attr, entry_values))
entry.raw[attr] = entry_values
elif action == 'only':
logger.debug("only: set %s to '%s', current value %s",
attr,
safe_output(attr, update_value),
safe_output(attr, entry_values))
if only.get(attr):
entry_values.append(update_value)
else:
entry_values = [update_value]
only[attr] = True
entry.raw[attr] = entry_values
logger.debug('only: updated value %s',
safe_output(attr, entry_values))
elif action == 'onlyifexist':
logger.debug("onlyifexist: '%s' to %s, current value %s",
safe_output(attr, update_value),
attr,
safe_output(attr, entry_values))
# Only set the attribute if the entry exist's. We
# determine this based on whether it has an objectclass
if entry.get('objectclass'):
if only.get(attr):
entry_values.append(update_value)
else:
entry_values = [update_value]
only[attr] = True
logger.debug('onlyifexist: set %s to %s',
attr, safe_output(attr, entry_values))
entry.raw[attr] = entry_values
elif action == 'deleteentry':
# skip this update type, it occurs in __delete_entries()
return None
elif action == 'replace':
# replace values were store as list
old, new = update_value
try:
entry_values.remove(old)
except ValueError:
logger.debug('replace: %s not found, skipping',
safe_output(attr, old))
else:
entry_values.append(new)
logger.debug('replace: updated value %s',
safe_output(attr, entry_values))
entry.raw[attr] = entry_values
return entry
def print_entity(self, e, message=None):
"""The entity object currently lacks a str() method"""
logger.debug("---------------------------------------------")
if message:
logger.debug("%s", message)
logger.debug("dn: %s", e.dn)
for a, value in e.raw.items():
logger.debug('%s:', a)
for l in value:
logger.debug("\t%s", safe_output(a, l))
def _update_record(self, update):
found = False
new_entry = self._create_default_entry(update.get('dn'),
update.get('default'))
try:
e = self._get_entry(new_entry.dn)
if len(e) > 1:
# we should only ever get back one entry
raise BadSyntax("More than 1 entry returned on a dn search!? %s" % new_entry.dn)
entry = e[0]
found = True
logger.debug("Updating existing entry: %s", entry.dn)
except errors.NotFound:
# Doesn't exist, start with the default entry
entry = new_entry
logger.debug("New entry: %s", entry.dn)
except errors.DatabaseError:
# Doesn't exist, start with the default entry
entry = new_entry
logger.debug("New entry, using default value: %s", entry.dn)
self.print_entity(entry, "Initial value")
# Bring this entry up to date
entry = self._apply_update_disposition(update.get('updates'), entry)
if entry is None:
# It might be None if it is just deleting an entry
return None, False
self.print_entity(entry, "Final value after applying updates")
added = False
updated = False
if not found:
try:
if len(entry):
# addifexist may result in an entry with only a
# dn defined. In that case there is nothing to do.
# It means the entry doesn't exist, so skip it.
try:
self.conn.add_entry(entry)
except errors.NotFound:
# parent entry of the added entry does not exist
# this may not be an error (e.g. entries in NIS container)
logger.error("Parent DN of %s may not exist, cannot "
"create the entry", entry.dn)
return entry, False
added = True
self.modified = True
except Exception as e:
logger.error("Add failure %s", e)
else:
# Update LDAP
try:
changes = entry.generate_modlist()
if len(changes) >= 1:
updated = True
safe_changes = []
for (type, attr, values) in changes:
safe_changes.append((type, attr, safe_output(attr, values)))
logger.debug("%s", safe_changes)
logger.debug("Updated %d", updated)
if updated:
self.conn.update_entry(entry)
logger.debug("Done")
except errors.EmptyModlist:
logger.debug("Entry already up-to-date")
updated = False
except errors.DatabaseError as e:
logger.error("Update failed: %s", e)
updated = False
except errors.DuplicateEntry as e:
logger.debug("Update already exists, skip it: %s", e)
updated = False
except errors.ACIError as e:
logger.error("Update failed: %s", e)
updated = False
if updated:
self.modified = True
return entry, added or updated
def _delete_record(self, updates):
"""
Delete record
"""
dn = updates['dn']
try:
logger.debug("Deleting entry %s", dn)
self.conn.delete_entry(dn)
self.modified = True
except errors.NotFound as e:
logger.debug("%s did not exist:%s", dn, e)
self.modified = True
except errors.DatabaseError as e:
logger.error("Delete failed: %s", e)
def get_all_files(self, root, recursive=False):
"""Get all update files"""
f = []
for path, _subdirs, files in os.walk(root):
for name in files:
if fnmatch.fnmatch(name, "*.update"):
f.append(os.path.join(path, name))
if not recursive:
break
f.sort()
return f
def _run_update_plugin(self, plugin_name):
logger.debug("Executing upgrade plugin: %s", plugin_name)
restart_ds, updates = self.api.Updater[plugin_name]()
if updates:
self._run_updates(updates)
# restart may be required even if no updates were returned
# from plugin, plugin may change LDAP data directly
if restart_ds:
self.close_connection()
self.restart_ds()
self.create_connection()
def create_connection(self):
if self.conn is None:
self.api.Backend.ldap2.connect(
time_limit=UPDATE_SEARCH_TIME_LIMIT,
size_limit=0)
self.conn = self.api.Backend.ldap2
def _run_updates(self, all_updates):
index_attributes = set()
update_ldapi_mappings = False
for update in all_updates:
if 'deleteentry' in update:
self._delete_record(update)
elif 'plugin' in update:
self._run_update_plugin(update['plugin'])
else:
entry, modified = self._update_record(update)
if modified:
if entry.dn.endswith(self.index_suffix):
index_attributes.add(entry.single_value['cn'])
if (
entry.dn.endswith(self.ldapi_autobind_suffix)
and "nsLDAPIFixedAuthMap" in entry.get(
"objectClass", ()
)
):
update_ldapi_mappings = True
if index_attributes:
# The LDAPUpdate framework now keeps record of all changed/added
# indices and batches all changed attribute in a single index
# task. This makes updates much faster when multiple indices are
# added or modified.
task_dn = self.create_index_task(*sorted(index_attributes))
self.monitor_index_task(task_dn)
if update_ldapi_mappings:
# update mappings when any autobind entry is added or modified
run_ldapi_reload_task(self.conn)
def update(self, files, ordered=True):
"""Execute the update. files is a list of the update files to use.
:param ordered: Update files are executed in alphabetical order
returns True if anything was changed, otherwise False
"""
self.modified = False
try:
upgrade_files = files
if ordered:
upgrade_files = sorted(files)
for f in upgrade_files:
start = time.time()
try:
logger.debug("Parsing update file '%s'", f)
data = self.read_file(f)
except Exception as e:
logger.error("error reading update file '%s'", f)
raise RuntimeError(e)
all_updates = []
self.parse_update_file(f, data, all_updates)
self._run_updates(all_updates)
dur = time.time() - start
logger.debug(
"LDAP update duration: %s %.03f sec", f, dur,
extra={'timing': ('ldapupdate', f, None, dur)}
)
finally:
self.close_connection()
return self.modified
def close_connection(self):
"""Close ldap connection"""
if self.conn:
self.api.Backend.ldap2.disconnect()
self.conn = None
def restart_ds(self):
logger.debug('Restarting directory server to apply updates')
installutils.restart_dirsrv()
| 37,807
|
Python
|
.py
| 910
| 28.39011
| 143
| 0.534574
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,766
|
service.py
|
freeipa_freeipa/ipaserver/install/service.py
|
# Authors: Karl MacMillan <kmacmillan@mentalrootkit.com>
#
# Copyright (C) 2007 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import
import logging
import sys
import os
import time
import traceback
import tempfile
import warnings
import six
from ipalib.install import certstore, sysrestore
from ipapython import ipautil
from ipapython.dn import DN
from ipapython import kerberos
from ipalib import api, errors, x509
from ipalib.constants import FQDN
from ipaplatform import services
from ipaplatform.constants import User
from ipaplatform.paths import paths
from ipaserver.masters import (
CONFIGURED_SERVICE, ENABLED_SERVICE, HIDDEN_SERVICE, SERVICE_LIST
)
from ipaserver.servroles import HIDDEN
from ipaserver.install.ldapupdate import LDAPUpdate, run_ldapi_reload_task
logger = logging.getLogger(__name__)
if six.PY3:
unicode = str
def print_msg(message, output_fd=sys.stdout):
logger.debug("%s", message)
output_fd.write(message)
output_fd.write("\n")
output_fd.flush()
def format_seconds(seconds):
"""Format a number of seconds as an English minutes+seconds message"""
parts = []
minutes, seconds = divmod(seconds, 60)
if minutes:
parts.append('%d minute' % minutes)
if minutes != 1:
parts[-1] += 's'
if seconds or not minutes:
parts.append('%d second' % seconds)
if seconds != 1:
parts[-1] += 's'
return ' '.join(parts)
def add_principals_to_group(admin_conn, group, member_attr, principals):
"""Add principals to a GroupOfNames LDAP group
admin_conn -- LDAP connection with admin rights
group -- DN of the group
member_attr -- attribute to represent members
principals -- list of DNs to add as members
"""
try:
current = admin_conn.get_entry(group)
members = current.get(member_attr, [])
if len(members) == 0:
current[member_attr] = []
for amember in principals:
if not(amember in members):
current[member_attr].extend([amember])
admin_conn.update_entry(current)
except errors.NotFound:
entry = admin_conn.make_entry(
group,
objectclass=["top", "GroupOfNames"],
cn=[group['cn']],
member=principals,
)
admin_conn.add_entry(entry)
except errors.EmptyModlist:
# If there are no changes just pass
pass
def case_insensitive_attr_has_value(attr, value):
"""
Helper function to find value in an attribute having case-insensitive
matching rules
:param attr: attribute values
:param value: value to find
:returns: True if the case-insensitive match succeeds, false otherwise
"""
if any(value.lower() == val.lower()
for val in attr):
return True
return False
def set_service_entry_config(name, fqdn, config_values,
ldap_suffix='',
post_add_config=()):
"""
Sets the 'ipaConfigString' values on the entry. If the entry is not present
already, create a new one with desired 'ipaConfigString'
:param name: service entry name
:param config_values: configuration values to store
:param fqdn: master fqdn
:param ldap_suffix: LDAP backend suffix
:param post_add_config: additional configuration to add when adding a
non-existent entry
"""
assert isinstance(ldap_suffix, DN)
entry_name = DN(
('cn', name), ('cn', fqdn), api.env.container_masters, ldap_suffix)
# enable disabled service
try:
entry = api.Backend.ldap2.get_entry(
entry_name, ['ipaConfigString'])
except errors.NotFound:
pass
else:
existing_values = entry.get('ipaConfigString', [])
for value in config_values:
if case_insensitive_attr_has_value(existing_values, value):
logger.debug(
"service %s: config string %s already set", name, value)
entry.setdefault('ipaConfigString', []).append(value)
try:
api.Backend.ldap2.update_entry(entry)
except errors.EmptyModlist:
logger.debug(
"service %s has already enabled config values %s", name,
config_values)
return
except BaseException:
logger.debug("failed to set service %s config values", name)
raise
logger.debug("service %s has all config values set", name)
return
entry = api.Backend.ldap2.make_entry(
entry_name,
objectclass=["nsContainer", "ipaConfigObject"],
cn=[name],
ipaconfigstring=config_values + list(post_add_config),
)
try:
api.Backend.ldap2.add_entry(entry)
except (errors.DuplicateEntry) as e:
logger.debug("failed to add service entry %s", name)
raise e
def enable_services(fqdn):
"""Change all services to enabled state
Server.ldap_configure() only marks a service as configured. Services
are enabled at the very end of installation.
Note: DNS records must be updated with dns_update_system_records, too.
:param fqdn: hostname of server
"""
_set_services_state(fqdn, ENABLED_SERVICE)
def hide_services(fqdn):
"""Change all services to hidden state
Note: DNS records must be updated with dns_update_system_records, too.
:param fqdn: hostname of server
"""
_set_services_state(fqdn, HIDDEN_SERVICE)
def sync_services_state(fqdn):
"""Synchronize services state from IPA master role state
Hide all services if the IPA master role state is in hidden state.
Otherwise enable all services.
:param fqdn: hostname of server
"""
result = api.Command.server_role_find(
server_server=fqdn,
role_servrole=u'IPA master',
status=HIDDEN
)
if result['count']:
# one hidden server role
hide_services(fqdn)
else:
# IPA master is either enabled or configured, enable all
enable_services(fqdn)
def _set_services_state(fqdn, dest_state):
"""Change all services of a host
:param fqdn: hostname of server
:param dest_state: destination state
"""
ldap2 = api.Backend.ldap2
search_base = DN(('cn', fqdn), api.env.container_masters, api.env.basedn)
source_states = {
CONFIGURED_SERVICE.lower(),
ENABLED_SERVICE.lower(),
HIDDEN_SERVICE.lower()
}
source_states.remove(dest_state.lower())
search_filter = ldap2.combine_filters(
[
ldap2.make_filter({'objectClass': 'ipaConfigObject'}),
ldap2.make_filter(
{'ipaConfigString': list(source_states)},
rules=ldap2.MATCH_ANY
),
],
rules=ldap2.MATCH_ALL
)
try:
entries = ldap2.get_entries(
search_base,
filter=search_filter,
scope=api.Backend.ldap2.SCOPE_ONELEVEL,
attrs_list=['cn', 'ipaConfigString']
)
except errors.EmptyResult:
logger.debug("No services with a state from %s, ignoring",
list(source_states))
return
for entry in entries:
name = entry['cn']
cfgstrings = entry.setdefault('ipaConfigString', [])
for value in list(cfgstrings):
if value.lower() in source_states:
cfgstrings.remove(value)
if not case_insensitive_attr_has_value(cfgstrings, dest_state):
cfgstrings.append(dest_state)
try:
ldap2.update_entry(entry)
except errors.EmptyModlist:
logger.debug("Nothing to do for service %s", name)
except Exception:
logger.exception("failed to set service %s config values", name)
raise
else:
logger.debug(
"Set service %s for %s to %s", name, fqdn, dest_state
)
class Service:
def __init__(self, service_name, service_desc=None, sstore=None,
fstore=None, api=api, realm_name=None,
service_user=None, service_prefix=None,
keytab=None):
self.service_name = service_name
self.service_desc = service_desc
self.service = services.service(service_name, api)
self.steps = []
self.output_fd = sys.stdout
self.fqdn = FQDN
if sstore:
self.sstore = sstore
else:
self.sstore = sysrestore.StateFile(paths.SYSRESTORE)
if fstore:
self.fstore = fstore
else:
self.fstore = sysrestore.FileStore(paths.SYSRESTORE)
self.realm = realm_name
self.suffix = DN()
self.service_prefix = service_prefix
self.keytab = keytab
self.cert = None
self.api = api
if service_user is not None:
service_user = User(service_user)
self.service_user = service_user
self.keytab_user = service_user
self.dm_password = None # silence pylint
self.promote = False
@property
def principal(self):
if any(attr is None for attr in (self.realm, self.fqdn,
self.service_prefix)):
return None
return unicode(
kerberos.Principal(
(self.service_prefix, self.fqdn), realm=self.realm))
def get_principal_dn(self, principal=None):
if principal is None:
principal = self.principal
return DN(
('krbprincipalname', principal),
self.api.env.container_service,
self.suffix
)
def _ldap_update(self, filenames, *, basedir=paths.UPDATES_DIR):
"""Apply update ldif files
Note: Additional substitution must be added to LDAPUpdate() to ensure
that ipa-ldap-updater is able to handle all update files as well.
:param filenames: list of file names
:param basedir: base directory for files (default: UPDATES_DIR)
:return: modified state
"""
assert isinstance(filenames, (list, tuple))
if basedir is not None:
filenames = [os.path.join(basedir, fname) for fname in filenames]
ld = LDAPUpdate(api=self.api)
# assume that caller supplies files in correct order
return ld.update(filenames, ordered=False)
def _ldap_mod(self, ldif, sub_dict=None, raise_on_err=True,
ldap_uri=None, dm_password=None):
pw_name = None
fd = None
if not os.path.isabs(ldif):
path = os.path.join(paths.USR_SHARE_IPA_DIR, ldif)
else:
path = ldif
nologlist = []
if sub_dict is not None:
txt = ipautil.template_file(path, sub_dict)
fd = ipautil.write_tmp_file(txt)
path = fd.name
# do not log passwords
if 'PASSWORD' in sub_dict:
nologlist.append(sub_dict['PASSWORD'])
if 'RANDOM_PASSWORD' in sub_dict:
nologlist.append(sub_dict['RANDOM_PASSWORD'])
args = [paths.LDAPMODIFY, "-v", "-f", path]
# As we always connect to the local host,
# use URI of admin connection
if not ldap_uri:
ldap_uri = api.Backend.ldap2.ldap_uri
args += ["-H", ldap_uri]
if dm_password:
with tempfile.NamedTemporaryFile(
mode='w', delete=False) as pw_file:
pw_file.write(dm_password)
pw_name = pw_file.name
auth_parms = ["-x", "-D", "cn=Directory Manager", "-y", pw_name]
# Use GSSAPI auth when not using DM password or not being root
elif os.getegid() != 0:
auth_parms = ["-Y", "GSSAPI"]
# Default to EXTERNAL auth mechanism
else:
auth_parms = ["-Y", "EXTERNAL"]
args += auth_parms
try:
try:
ipautil.run(args, nolog=nologlist)
except ipautil.CalledProcessError as e:
logger.critical("Failed to load %s: %s", ldif, str(e))
if raise_on_err:
raise
finally:
if pw_name:
os.remove(pw_name)
def move_service(self, principal):
"""
Used to move a principal entry created by kadmin.local from
cn=kerberos to cn=services
"""
dn = DN(('krbprincipalname', principal), ('cn', self.realm), ('cn', 'kerberos'), self.suffix)
try:
entry = api.Backend.ldap2.get_entry(dn)
except errors.NotFound:
# There is no service in the wrong location, nothing to do.
# This can happen when installing a replica
return None
entry.pop('krbpwdpolicyreference', None) # don't copy virtual attr
newdn = self.get_principal_dn(principal)
hostdn = DN(('fqdn', self.fqdn), ('cn', 'computers'), ('cn', 'accounts'), self.suffix)
api.Backend.ldap2.delete_entry(entry)
entry.dn = newdn
classes = entry.get("objectclass")
classes = classes + ["ipaobject", "ipaservice", "pkiuser"]
entry["objectclass"] = list(set(classes))
entry["ipauniqueid"] = ['autogenerate']
entry["managedby"] = [hostdn]
api.Backend.ldap2.add_entry(entry)
return newdn
def add_simple_service(self, principal):
"""
Add a very basic IPA service.
The principal needs to be fully-formed: service/host@REALM
"""
dn = self.get_principal_dn(principal)
hostdn = DN(('fqdn', self.fqdn), ('cn', 'computers'), ('cn', 'accounts'), self.suffix)
entry = api.Backend.ldap2.make_entry(
dn,
objectclass=[
"krbprincipal", "krbprincipalaux", "krbticketpolicyaux",
"ipaobject", "ipaservice", "pkiuser"],
krbprincipalname=[principal],
ipauniqueid=['autogenerate'],
managedby=[hostdn],
)
api.Backend.ldap2.add_entry(entry)
return dn
def add_autobind_entry(self, user, group, principal):
"""Add or update LDAPI autobind entry to map uid/gid to principal
:param user: ipaplatform User object
:param group: ipaplatform Group object
:param principal: service principal to bind as
:return: dn of new autobind entry
"""
authdn = self.get_principal_dn(principal)
dn = DN(
("cn", self.service_name), ("cn", "auto_bind"), ("cn", "config")
)
settings = {
"uidNumber": [user.uid],
"gidNumber": [group.gid],
"nsslapd-authenticateAsDN": [authdn]
}
ldap2 = self.api.Backend.ldap2
try:
entry = ldap2.get_entry(dn)
except errors.NotFound:
entry = ldap2.make_entry(
dn,
objectclass=["top", "nsLDAPIFixedAuthMap"],
cn=[self.service_name],
**settings,
)
ldap2.add_entry(entry)
logger.debug("Created autobind entry %s", dn)
else:
entry.update(settings)
try:
ldap2.update_entry(entry)
except errors.EmptyModlist:
logger.debug("Autobind entry %s already configured", dn)
else:
logger.debug("Updated autobind entry %s", dn)
# refresh LDAPI mappings
run_ldapi_reload_task(self.api.Backend.ldap2)
return dn
def add_cert_to_service(self):
"""
Add a certificate to a service
This server cert should be in DER format.
"""
if self.cert is None:
raise ValueError("{} has no cert".format(self.service_name))
dn = self.get_principal_dn()
entry = api.Backend.ldap2.get_entry(dn)
entry.setdefault('userCertificate', []).append(self.cert)
try:
api.Backend.ldap2.update_entry(entry)
except Exception as e:
logger.critical("Could not add certificate to service %s entry: "
"%s", self.principal, str(e))
def export_ca_certs_file(self, cafile, ca_is_configured, conn=None):
"""
Export the CA certificates stored in LDAP into a file
:param cafile: the file to write the CA certificates to
:param ca_is_configured: whether IPA is CA-less or not
:param conn: an optional LDAP connection to use
"""
if conn is None:
conn = api.Backend.ldap2
ca_certs = None
try:
ca_certs = certstore.get_ca_certs(
conn, self.suffix, self.realm, ca_is_configured)
except errors.NotFound:
pass
else:
with open(cafile, 'wb') as fd:
for cert, _unused1, _unused2, _unused3 in ca_certs:
fd.write(cert.public_bytes(x509.Encoding.PEM))
def export_ca_certs_nssdb(self, db, ca_is_configured, conn=None):
"""
Export the CA certificates stored in LDAP into an NSS database
:param db: the target NSS database
:param ca_is_configured: whether IPA is CA-less or not
:param conn: an optional LDAP connection to use
"""
if conn is None:
conn = api.Backend.ldap2
try:
ca_certs = certstore.get_ca_certs_nss(
conn, self.suffix, self.realm, ca_is_configured)
except errors.NotFound:
pass
else:
for cert, nickname, trust_flags in ca_certs:
db.add_cert(cert, nickname, trust_flags)
def is_configured(self):
return self.sstore.has_state(self.service_name)
def set_output(self, fd):
self.output_fd = fd
def stop(self, instance_name="", capture_output=True):
self.service.stop(instance_name, capture_output=capture_output)
def start(self, instance_name="", capture_output=True, wait=True):
self.service.start(instance_name, capture_output=capture_output, wait=wait)
def restart(self, instance_name="", capture_output=True, wait=True):
self.service.restart(instance_name, capture_output=capture_output, wait=wait)
def is_running(self, instance_name="", wait=True):
return self.service.is_running(instance_name, wait)
def install(self):
self.service.install()
def remove(self):
self.service.remove()
def enable(self):
self.service.enable()
def disable(self):
self.service.disable()
def is_enabled(self):
return self.service.is_enabled()
def mask(self):
return self.service.mask()
def unmask(self):
return self.service.unmask()
def is_masked(self):
return self.service.is_masked()
def backup_state(self, key, value):
self.sstore.backup_state(self.service_name, key, value)
def restore_state(self, key):
return self.sstore.restore_state(self.service_name, key)
def get_state(self, key):
return self.sstore.get_state(self.service_name, key)
def delete_state(self, key):
self.sstore.delete_state(self.service_name, key)
def print_msg(self, message):
print_msg(message, self.output_fd)
def step(self, message, method, run_after_failure=False):
self.steps.append((message, method, run_after_failure))
def start_creation(self, start_message=None, end_message=None,
show_service_name=True, runtime=None):
"""
Starts creation of the service.
Use start_message and end_message for explicit messages
at the beggining / end of the process. Otherwise they are generated
using the service description (or service name, if the description has
not been provided).
Use show_service_name to include service name in generated descriptions.
"""
creation_start = time.time()
if start_message is None:
# no other info than mandatory service_name provided, use that
if self.service_desc is None:
start_message = "Configuring %s" % self.service_name
# description should be more accurate than service name
else:
start_message = "Configuring %s" % self.service_desc
if show_service_name:
start_message = "%s (%s)" % (start_message, self.service_name)
if end_message is None:
if self.service_desc is None:
if show_service_name:
end_message = "Done configuring %s." % self.service_name
else:
end_message = "Done."
else:
if show_service_name:
end_message = "Done configuring %s (%s)." % (
self.service_desc, self.service_name)
else:
end_message = "Done configuring %s." % self.service_desc
if runtime is not None and runtime > 0:
self.print_msg('%s. Estimated time: %s' % (start_message,
format_seconds(runtime)))
else:
self.print_msg(start_message)
def run_step(message, method):
self.print_msg(message)
start = time.time()
method()
dur = time.time() - start
name = method.__name__
logger.debug(
"step duration: %s %s %.02f sec",
self.service_name, name, dur,
extra={'timing': ('step', self.service_name, name, dur)},
)
step = 0
steps_iter = iter(self.steps)
try:
for message, method, run_after_failure in steps_iter:
full_msg = " [%d/%d]: %s" % (step+1, len(self.steps), message)
run_step(full_msg, method)
step += 1
except BaseException as e:
if not (isinstance(e, SystemExit) and
e.code == 0): # pylint: disable=no-member
# show the traceback, so it's not lost if cleanup method fails
logger.debug("%s", traceback.format_exc())
self.print_msg(' [error] %s: %s' % (type(e).__name__, e))
# run through remaining methods marked run_after_failure
for message, method, run_after_failure in steps_iter:
if run_after_failure:
run_step(" [cleanup]: %s" % message, method)
raise
self.print_msg(end_message)
dur = time.time() - creation_start
logger.debug(
"service duration: %s %.02f sec",
self.service_name, dur,
extra={'timing': ('service', self.service_name, None, dur)},
)
self.steps = []
def ldap_enable(self, name, fqdn, dm_password=None, ldap_suffix='',
config=()):
"""Legacy function, all services should use ldap_configure()
"""
warnings.warn(
"ldap_enable is deprecated, use ldap_configure instead.",
DeprecationWarning,
stacklevel=2
)
self._ldap_enable(ENABLED_SERVICE, name, fqdn, ldap_suffix, config)
def ldap_configure(self, name, fqdn, dm_password=None, ldap_suffix='',
config=()):
"""Create or modify service entry in cn=masters,cn=ipa,cn=etc
Contrary to ldap_enable(), the method only sets
ipaConfigString=configuredService. ipaConfigString=enabledService
is set at the very end of the installation process, to ensure that
other machines see this master/replica after it is fully installed.
To switch all configured services to enabled, use::
ipaserver.install.service.enable_services(api.env.host)
api.Command.dns_update_system_records()
"""
self._ldap_enable(
CONFIGURED_SERVICE, name, fqdn, ldap_suffix, config
)
def _ldap_enable(self, value, name, fqdn, ldap_suffix, config):
extra_config_opts = [
u'startOrder {}'.format(SERVICE_LIST[name].startorder),
]
extra_config_opts.extend(config)
self.unmask()
self.disable()
set_service_entry_config(
name,
fqdn,
[value],
ldap_suffix=ldap_suffix,
post_add_config=extra_config_opts)
def ldap_disable(self, name, fqdn, ldap_suffix):
assert isinstance(ldap_suffix, DN)
entry_dn = DN(('cn', name), ('cn', fqdn), api.env.container_masters,
ldap_suffix)
search_kw = {'ipaConfigString': ENABLED_SERVICE}
filter = api.Backend.ldap2.make_filter(search_kw)
try:
entries, _truncated = api.Backend.ldap2.find_entries(
filter=filter,
attrs_list=['ipaConfigString'],
base_dn=entry_dn,
scope=api.Backend.ldap2.SCOPE_BASE)
except errors.NotFound:
logger.debug("service %s startup entry already disabled", name)
return
assert len(entries) == 1 # only one entry is expected
entry = entries[0]
# case insensitive
for value in entry.get('ipaConfigString', []):
if value.lower() == ENABLED_SERVICE.lower():
entry['ipaConfigString'].remove(value)
break
try:
api.Backend.ldap2.update_entry(entry)
except errors.EmptyModlist:
pass
except BaseException:
logger.debug("failed to disable service %s startup entry", name)
raise
logger.debug("service %s startup entry disabled", name)
def ldap_remove_service_container(self, name, fqdn, ldap_suffix):
entry_dn = DN(('cn', name), ('cn', fqdn),
self.api.env.container_masters, ldap_suffix)
try:
api.Backend.ldap2.delete_entry(entry_dn)
except errors.NotFound:
logger.debug("service %s container already removed", name)
else:
logger.debug("service %s container sucessfully removed", name)
def _add_service_principal(self):
try:
self.api.Command.service_add(self.principal, force=True)
except errors.DuplicateEntry:
pass
def clean_previous_keytab(self, keytab=None):
if keytab is None:
keytab = self.keytab
self.fstore.backup_file(keytab)
try:
os.unlink(keytab)
except OSError:
pass
def set_keytab_owner(self, keytab=None, owner=None):
if keytab is None:
keytab = self.keytab
if owner is None:
owner = self.keytab_user
owner.chown(keytab)
def run_getkeytab(self, ldap_uri, keytab, principal, retrieve=False):
"""
retrieve service keytab using ipa-getkeytab. This assumes that the
service principal is already created in LDAP. By default GSSAPI
authentication is used unless:
* LDAPI socket is used and effective process UID is 0, then
autobind is used by EXTERNAL SASL mech
* self.dm_password is not none, then DM credentials are used to
fetch keytab
"""
args = [paths.IPA_GETKEYTAB,
'-k', keytab,
'-p', principal,
'-H', ldap_uri]
nolog = tuple()
if ldap_uri.startswith("ldapi://") and os.geteuid() == 0:
args.extend(["-Y", "EXTERNAL"])
elif self.dm_password is not None and not self.promote:
args.extend(
['-D', 'cn=Directory Manager',
'-w', self.dm_password])
nolog += (self.dm_password,)
if retrieve:
args.extend(['-r'])
ipautil.run(args, nolog=nolog)
def request_service_keytab(self):
if any(attr is None for attr in (self.principal, self.keytab)):
raise NotImplementedError(
"service must have defined principal "
"name and keytab")
self._add_service_principal()
self.clean_previous_keytab()
self.run_getkeytab(self.api.env.ldap_uri, self.keytab, self.principal)
self.set_keytab_owner()
def replica_ignore_initial_time_skew(self):
"""
Set nsslapd-ignore-time-skew = on if not already set
and store the initial value in order to restore it later.
The on value allows replica initialization even if there
are excessive time skews.
"""
dn = DN(('cn', 'config'))
entry_attrs = api.Backend.ldap2.get_entry(dn)
self.original_time_skew = entry_attrs['nsslapd-ignore-time-skew'][0]
if self.original_time_skew != 'on':
entry_attrs['nsslapd-ignore-time-skew'] = 'on'
api.Backend.ldap2.update_entry(entry_attrs)
def replica_revert_time_skew(self):
"""
Revert nsslapd-ignore-time-skew to its previous value.
"""
dn = DN(('cn', 'config'))
entry_attrs = api.Backend.ldap2.get_entry(dn)
if self.original_time_skew != 'on':
entry_attrs['nsslapd-ignore-time-skew'] = self.original_time_skew
api.Backend.ldap2.update_entry(entry_attrs)
class SimpleServiceInstance(Service):
def create_instance(self, gensvc_name=None, fqdn=None, ldap_suffix=None,
realm=None):
self.gensvc_name = gensvc_name
self.fqdn = fqdn
self.suffix = ldap_suffix
self.realm = realm
self.step("starting %s " % self.service_name, self.__start)
self.step("configuring %s to start on boot" % self.service_name, self.__enable)
self.start_creation("Configuring %s" % self.service_name)
suffix = ipautil.dn_attribute_property('_ldap_suffix')
def __start(self):
self.backup_state("running", self.is_running())
self.restart()
def __enable(self):
self.backup_state("enabled", self.is_enabled())
if self.gensvc_name is None:
self.enable()
else:
self.ldap_configure(self.gensvc_name, self.fqdn, None, self.suffix)
def is_installed(self):
return self.service.is_installed()
def uninstall(self):
if self.is_configured():
self.print_msg("Unconfiguring %s" % self.service_name)
running = self.restore_state("running")
enabled = self.restore_state("enabled")
if self.is_installed():
self.stop()
self.disable()
if running:
self.start()
if enabled:
self.enable()
| 31,605
|
Python
|
.py
| 776
| 30.679124
| 101
| 0.60032
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,767
|
dnskeysyncinstance.py
|
freeipa_freeipa/ipaserver/install/dnskeysyncinstance.py
|
#
# Copyright (C) 2014 FreeIPA Contributors see COPYING for license
#
from __future__ import print_function, absolute_import
import errno
import logging
import os
import re
import shutil
import stat
import ldap
from ipaserver import p11helper as _ipap11helper
from ipapython.dnsutil import DNSName
from ipaserver.install import service
from ipaserver.install import installutils
from ipapython.dn import DN
from ipapython import directivesetter
from ipapython import ipautil
from ipaplatform.constants import constants
from ipaplatform.paths import paths
from ipalib import errors, api
from ipalib.constants import SOFTHSM_DNSSEC_TOKEN_LABEL
from ipaserver.install.bindinstance import dns_container_exists
logger = logging.getLogger(__name__)
replica_keylabel_template = u"dnssec-replica:%s"
def dnssec_container_exists(suffix):
"""
Test whether the dns container exists.
"""
assert isinstance(suffix, DN)
return api.Backend.ldap2.entry_exists(
DN(('cn', 'sec'), ('cn', 'dns'), suffix))
def remove_replica_public_keys(hostname):
keysyncd = DNSKeySyncInstance()
keysyncd.remove_replica_public_keys(hostname)
class DNSKeySyncInstance(service.Service):
def __init__(self, fstore=None, logger=logger):
super(DNSKeySyncInstance, self).__init__(
"ipa-dnskeysyncd",
service_desc="DNS key synchronization service",
fstore=fstore,
service_prefix=u'ipa-dnskeysyncd',
keytab=paths.IPA_DNSKEYSYNCD_KEYTAB
)
self.extra_config = [u'dnssecVersion 1', ] # DNSSEC enabled
suffix = ipautil.dn_attribute_property('_suffix')
def set_dyndb_ldap_workdir_permissions(self):
"""
Setting up correct permissions to allow write/read access for daemons
"""
directories = [
paths.BIND_LDAP_DNS_IPA_WORKDIR,
paths.BIND_LDAP_DNS_ZONE_WORKDIR,
]
for directory in directories:
try:
os.mkdir(directory, 0o770)
except FileExistsError:
pass
else:
os.chmod(directory, 0o770)
# dnssec daemons require to have access into the directory
constants.NAMED_USER.chown(directory, gid=constants.NAMED_GROUP.gid)
def remove_replica_public_keys(self, replica_fqdn):
ldap = api.Backend.ldap2
dn_base = DN(('cn', 'keys'), ('cn', 'sec'), ('cn', 'dns'), api.env.basedn)
keylabel = replica_keylabel_template % DNSName(replica_fqdn).\
make_absolute().canonicalize().ToASCII()
# get old keys from LDAP
search_kw = {
'objectclass': u"ipaPublicKeyObject",
'ipk11Label': keylabel,
'ipk11Wrap': True,
}
filter = ldap.make_filter(search_kw, rules=ldap.MATCH_ALL)
entries, _truncated = ldap.find_entries(filter=filter, base_dn=dn_base)
for entry in entries:
ldap.delete_entry(entry)
def start_dnskeysyncd(self):
print("Restarting ipa-dnskeysyncd")
self.__start()
def create_instance(self, fqdn, realm_name):
self.fqdn = fqdn
self.realm = realm_name
self.suffix = ipautil.realm_to_suffix(self.realm)
try:
self.stop()
except Exception:
pass
# checking status step must be first
self.step("checking status", self.__check_dnssec_status)
self.step("setting up bind-dyndb-ldap working directory",
self.set_dyndb_ldap_workdir_permissions)
self.step("setting up kerberos principal", self.__setup_principal)
self.step("setting up SoftHSM", self.__setup_softhsm)
self.step("adding DNSSEC containers", self.__setup_dnssec_containers)
self.step("creating replica keys", self.__setup_replica_keys)
self.step("configuring ipa-dnskeysyncd to start on boot", self.__enable)
# we need restart named after setting up this service
self.start_creation()
def __check_dnssec_status(self):
if not dns_container_exists(self.suffix):
raise RuntimeError("DNS container does not exist")
# ready to be installed, storing a state is required to run uninstall
self.backup_state("configured", True)
def __setup_dnssec_containers(self):
"""
Setup LDAP containers for DNSSEC
"""
if dnssec_container_exists(self.suffix):
logger.info("DNSSEC container exists (step skipped)")
return
self._ldap_mod("dnssec.ldif", {'SUFFIX': self.suffix, })
def _are_named_options_configured(self, options):
"""Check whether the sysconfig of named is patched
Additional command line options for named are passed
via OPTIONS env variable. Since custom options can be
supplied by a vendor, at least, the base parsing of such
is required.
Current named command line options:
NS_MAIN_ARGS "46A:c:C:d:D:E:fFgi:lL:M:m:n:N:p:P:sS:t:T:U:u:vVx:X:"
If there are several same options the last passed wins.
"""
if options:
pattern = r"[ ]*-[a-zA-Z46]*E[ ]*(.*?)(?: |$)"
engines = re.findall(pattern, options)
if engines and engines[-1] == constants.NAMED_OPENSSL_ENGINE:
return True
return False
def setup_named_openssl_conf(self):
if constants.NAMED_OPENSSL_ENGINE is not None:
logger.debug("Setup OpenSSL config for BIND")
# setup OpenSSL config for BIND,
# this one is needed because FreeIPA installation
# disables p11-kit-proxy PKCS11 module
conf_file_dict = {
'OPENSSL_ENGINE': constants.NAMED_OPENSSL_ENGINE,
'SOFTHSM_MODULE': paths.LIBSOFTHSM2_SO,
'CRYPTO_POLICY_FILE': paths.CRYPTO_POLICY_OPENSSLCNF_FILE,
}
if paths.CRYPTO_POLICY_OPENSSLCNF_FILE is None:
opensslcnf_tmpl = "bind.openssl.cnf.template"
else:
opensslcnf_tmpl = "bind.openssl.cryptopolicy.cnf.template"
named_openssl_txt = ipautil.template_file(
os.path.join(paths.USR_SHARE_IPA_DIR, opensslcnf_tmpl),
conf_file_dict
)
with open(paths.DNSSEC_OPENSSL_CONF, 'w') as f:
os.fchmod(f.fileno(), 0o640)
os.fchown(f.fileno(), 0, gid=constants.NAMED_GROUP.gid)
f.write(named_openssl_txt)
def setup_named_sysconfig(self):
logger.debug("Setup BIND sysconfig")
sysconfig = paths.SYSCONFIG_NAMED
self.fstore.backup_file(sysconfig)
directivesetter.set_directive(
sysconfig,
'SOFTHSM2_CONF', paths.DNSSEC_SOFTHSM2_CONF,
quotes=False, separator='=')
if constants.NAMED_OPENSSL_ENGINE is not None:
directivesetter.set_directive(
sysconfig,
'OPENSSL_CONF', paths.DNSSEC_OPENSSL_CONF,
quotes=False, separator='=')
options = directivesetter.get_directive(
paths.SYSCONFIG_NAMED,
constants.NAMED_OPTIONS_VAR,
separator="="
) or ''
if not self._are_named_options_configured(options):
engine_cmd = "-E {}".format(constants.NAMED_OPENSSL_ENGINE)
new_options = ' '.join([options, engine_cmd])
directivesetter.set_directive(
sysconfig,
constants.NAMED_OPTIONS_VAR, new_options,
quotes=True, separator='=')
def setup_ipa_dnskeysyncd_sysconfig(self):
logger.debug("Setup ipa-dnskeysyncd sysconfig")
sysconfig = paths.SYSCONFIG_IPA_DNSKEYSYNCD
directivesetter.set_directive(
sysconfig,
'SOFTHSM2_CONF', paths.DNSSEC_SOFTHSM2_CONF,
quotes=False, separator='=')
if constants.NAMED_OPENSSL_ENGINE is not None:
directivesetter.set_directive(
sysconfig,
'OPENSSL_CONF', paths.DNSSEC_OPENSSL_CONF,
quotes=False, separator='=')
def __setup_softhsm(self):
token_dir_exists = os.path.exists(paths.DNSSEC_TOKENS_DIR)
# create dnssec directory
if not os.path.exists(paths.IPA_DNSSEC_DIR):
logger.debug("Creating %s directory", paths.IPA_DNSSEC_DIR)
os.mkdir(paths.IPA_DNSSEC_DIR)
os.chmod(paths.IPA_DNSSEC_DIR, 0o770)
# chown ods:named
constants.ODS_USER.chown(paths.IPA_DNSSEC_DIR,
gid=constants.NAMED_GROUP.gid)
# setup softhsm2 config file
softhsm_conf_txt = ("# SoftHSM v2 configuration file \n"
"# File generated by IPA instalation\n"
"directories.tokendir = %(tokens_dir)s\n"
"objectstore.backend = file") % {
'tokens_dir': paths.DNSSEC_TOKENS_DIR
}
logger.debug("Creating new softhsm config file")
with open(paths.DNSSEC_SOFTHSM2_CONF, 'w') as f:
os.fchmod(f.fileno(), 0o644)
f.write(softhsm_conf_txt)
# setting up named and ipa-dnskeysyncd to use our softhsm2 and
# openssl configs
self.setup_named_openssl_conf()
self.setup_named_sysconfig()
self.setup_ipa_dnskeysyncd_sysconfig()
if (token_dir_exists and os.path.exists(paths.DNSSEC_SOFTHSM_PIN) and
os.path.exists(paths.DNSSEC_SOFTHSM_PIN_SO)):
# there is initialized softhsm
return
# remove old tokens
if token_dir_exists:
logger.debug('Removing old tokens directory %s',
paths.DNSSEC_TOKENS_DIR)
shutil.rmtree(paths.DNSSEC_TOKENS_DIR)
# create tokens subdirectory
logger.debug('Creating tokens %s directory', paths.DNSSEC_TOKENS_DIR)
# sticky bit is required by daemon
os.mkdir(paths.DNSSEC_TOKENS_DIR)
os.chmod(paths.DNSSEC_TOKENS_DIR, 0o770 | stat.S_ISGID)
# chown to ods:named
constants.ODS_USER.chown(paths.DNSSEC_TOKENS_DIR,
gid=constants.NAMED_GROUP.gid)
# generate PINs for softhsm
pin_length = 30 # Bind allows max 32 bytes including ending '\0'
pin = ipautil.ipa_generate_password(
entropy_bits=0, special=None, min_len=pin_length)
pin_so = ipautil.ipa_generate_password(
entropy_bits=0, special=None, min_len=pin_length)
logger.debug("Saving user PIN to %s", paths.DNSSEC_SOFTHSM_PIN)
with open(paths.DNSSEC_SOFTHSM_PIN, 'w') as f:
# chown to ods:named
constants.ODS_USER.chown(f.fileno(), gid=constants.NAMED_GROUP.gid)
os.fchmod(f.fileno(), 0o660)
f.write(pin)
logger.debug("Saving SO PIN to %s", paths.DNSSEC_SOFTHSM_PIN_SO)
with open(paths.DNSSEC_SOFTHSM_PIN_SO, 'w') as f:
# owner must be root
os.fchmod(f.fileno(), 0o400)
f.write(pin_so)
# initialize SoftHSM
command = [
paths.SOFTHSM2_UTIL,
'--init-token',
'--free', # use random free slot
'--label', SOFTHSM_DNSSEC_TOKEN_LABEL,
'--pin', pin,
'--so-pin', pin_so,
]
logger.debug("Initializing tokens")
os.environ["SOFTHSM2_CONF"] = paths.DNSSEC_SOFTHSM2_CONF
ipautil.run(command, nolog=(pin, pin_so,))
def __setup_replica_keys(self):
keylabel = replica_keylabel_template % DNSName(self.fqdn).\
make_absolute().canonicalize().ToASCII()
ldap = api.Backend.ldap2
dn_base = DN(('cn', 'keys'), ('cn', 'sec'), ('cn', 'dns'), api.env.basedn)
with open(paths.DNSSEC_SOFTHSM_PIN, "r") as f:
pin = f.read()
os.environ["SOFTHSM2_CONF"] = paths.DNSSEC_SOFTHSM2_CONF
p11 = _ipap11helper.P11_Helper(
SOFTHSM_DNSSEC_TOKEN_LABEL, pin, paths.LIBSOFTHSM2_SO)
try:
# generate replica keypair
logger.debug("Creating replica's key pair")
key_id = None
while True:
# check if key with this ID exist in softHSM
key_id = _ipap11helper.gen_key_id()
replica_pubkey_dn = DN(('ipk11UniqueId', 'autogenerate'), dn_base)
pub_keys = p11.find_keys(_ipap11helper.KEY_CLASS_PUBLIC_KEY,
label=keylabel,
id=key_id)
if pub_keys:
# key with id exists
continue
priv_keys = p11.find_keys(_ipap11helper.KEY_CLASS_PRIVATE_KEY,
label=keylabel,
id=key_id)
if not priv_keys:
break # we found unique id
public_key_handle, _privkey_handle = p11.generate_replica_key_pair(
keylabel, key_id,
pub_cka_verify=False,
pub_cka_verify_recover=False,
pub_cka_wrap=True,
priv_cka_unwrap=True,
priv_cka_sensitive=True,
priv_cka_extractable=False)
# export public key
public_key_blob = p11.export_public_key(public_key_handle)
# save key to LDAP
replica_pubkey_objectclass = [
'ipk11Object', 'ipk11PublicKey', 'ipaPublicKeyObject', 'top'
]
kw = {
'objectclass': replica_pubkey_objectclass,
'ipk11UniqueId': [u'autogenerate'],
'ipk11Label': [keylabel],
'ipaPublicKey': [public_key_blob],
'ipk11Id': [key_id],
'ipk11Wrap': [True],
'ipk11Verify': [False],
'ipk11VerifyRecover': [False],
}
logger.debug("Storing replica public key to LDAP, %s",
replica_pubkey_dn)
entry = ldap.make_entry(replica_pubkey_dn, **kw)
ldap.add_entry(entry)
logger.debug("Replica public key stored")
logger.debug("Setting CKA_WRAP=False for old replica keys")
# first create new keys, we don't want disable keys before, we
# have new keys in softhsm and LDAP
# get replica pub keys with CKA_WRAP=True
replica_pub_keys = p11.find_keys(_ipap11helper.KEY_CLASS_PUBLIC_KEY,
label=keylabel,
cka_wrap=True)
# old keys in softHSM
for handle in replica_pub_keys:
# don't disable wrapping for new key
# compare IDs not handle
if key_id != p11.get_attribute(handle, _ipap11helper.CKA_ID):
p11.set_attribute(handle, _ipap11helper.CKA_WRAP, False)
# get old keys from LDAP
search_kw = {
'objectclass': u"ipaPublicKeyObject",
'ipk11Label': keylabel,
'ipk11Wrap': True,
}
filter = ldap.make_filter(search_kw, rules=ldap.MATCH_ALL)
entries, _truncated = ldap.find_entries(filter=filter,
base_dn=dn_base)
for entry in entries:
# don't disable wrapping for new key
if entry.single_value['ipk11Id'] != key_id:
entry['ipk11Wrap'] = [False]
ldap.update_entry(entry)
finally:
p11.finalize()
# change tokens mod/owner
logger.debug("Changing ownership of token files")
for (root, dirs, files) in os.walk(paths.DNSSEC_TOKENS_DIR):
for directory in dirs:
dir_path = os.path.join(root, directory)
os.chmod(dir_path, 0o770 | stat.S_ISGID)
# chown to ods:named
constants.ODS_USER.chown(dir_path,
gid=constants.NAMED_GROUP.gid)
for filename in files:
file_path = os.path.join(root, filename)
os.chmod(file_path, 0o660 | stat.S_ISGID)
# chown to ods:named
constants.ODS_USER.chown(file_path,
gid=constants.NAMED_GROUP.gid)
def __enable(self):
try:
self.ldap_configure('DNSKeySync', self.fqdn, None,
self.suffix, self.extra_config)
except errors.DuplicateEntry:
logger.error("DNSKeySync service already exists")
def __setup_principal(self):
ipautil.remove_keytab(self.keytab)
installutils.kadmin_addprinc(self.principal)
# Store the keytab on disk
installutils.create_keytab(self.keytab, self.principal)
p = self.move_service(self.principal)
if p is None:
# the service has already been moved, perhaps we're doing a DNS reinstall
dnssynckey_principal_dn = DN(
('krbprincipalname', self.principal),
('cn', 'services'), ('cn', 'accounts'), self.suffix)
else:
dnssynckey_principal_dn = p
# Make sure access is strictly reserved to the named user
os.chown(self.keytab, 0, constants.ODS_GROUP.gid)
os.chmod(self.keytab, 0o440)
dns_group = DN(('cn', 'DNS Servers'), ('cn', 'privileges'),
('cn', 'pbac'), self.suffix)
mod = [(ldap.MOD_ADD, 'member', dnssynckey_principal_dn)]
try:
api.Backend.ldap2.modify_s(dns_group, mod)
except ldap.TYPE_OR_VALUE_EXISTS:
pass
except Exception as e:
logger.critical("Could not modify principal's %s entry: %s",
dnssynckey_principal_dn, str(e))
raise
# bind-dyndb-ldap persistent search feature requires both size and time
# limit-free connection
mod = [(ldap.MOD_REPLACE, 'nsTimeLimit', '-1'),
(ldap.MOD_REPLACE, 'nsSizeLimit', '-1'),
(ldap.MOD_REPLACE, 'nsIdleTimeout', '-1'),
(ldap.MOD_REPLACE, 'nsLookThroughLimit', '-1')]
try:
api.Backend.ldap2.modify_s(dnssynckey_principal_dn, mod)
except Exception as e:
logger.critical("Could not set principal's %s LDAP limits: %s",
dnssynckey_principal_dn, str(e))
raise
def __start(self):
try:
self.restart()
except Exception as e:
print("Failed to start ipa-dnskeysyncd")
logger.debug("Failed to start ipa-dnskeysyncd: %s", e)
def uninstall(self):
if self.is_configured():
self.print_msg("Unconfiguring %s" % self.service_name)
# Just eat states
self.restore_state("running")
self.restore_state("enabled")
self.restore_state("configured")
# stop and disable service (IPA service, we do not need it anymore)
self.stop()
self.disable()
for f in [paths.SYSCONFIG_NAMED]:
try:
self.fstore.restore_file(f)
except ValueError as error:
logger.debug('%s', error)
# remove softhsm pin, to make sure new installation will generate
# new token database
# do not delete *so pin*, user can need it to get token data
ipautil.remove_file(paths.DNSSEC_SOFTHSM_PIN)
ipautil.remove_file(paths.DNSSEC_SOFTHSM2_CONF)
ipautil.remove_file(paths.DNSSEC_OPENSSL_CONF)
ipautil.rmtree(paths.IPA_DNSSEC_DIR)
try:
shutil.rmtree(paths.DNSSEC_TOKENS_DIR)
except OSError as e:
if e.errno != errno.ENOENT:
logger.exception(
"Failed to remove %s", paths.DNSSEC_TOKENS_DIR
)
ipautil.remove_keytab(self.keytab)
| 20,336
|
Python
|
.py
| 444
| 33.175676
| 85
| 0.580565
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,768
|
certs.py
|
freeipa_freeipa/ipaserver/install/certs.py
|
# Authors: Karl MacMillan <kmacmillan@mentalrootkit.com>
#
# Copyright (C) 2007 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import
import configparser
import logging
import os
import stat
import sys
import tempfile
import shutil
import xml.dom.minidom
import grp
import pwd
import base64
import fcntl
import time
import datetime
import six
from ipalib.install import certmonger, sysrestore
from ipapython import dogtag
from ipapython import ipautil
from ipapython.certdb import EMPTY_TRUST_FLAGS, IPA_CA_TRUST_FLAGS
from ipapython.certdb import get_ca_nickname, find_cert_from_txt, NSSDatabase
from ipapython.dn import DN
from ipalib import x509, api
from ipalib.errors import CertificateOperationError
from ipalib.install import certstore
from ipalib.util import strip_csr_header
from ipalib.text import _
from ipaplatform.paths import paths
from ipaplatform.tasks import tasks
logger = logging.getLogger(__name__)
def get_cert_nickname(cert):
"""
Using the subject from cert come up with a nickname suitable
for NSS. The caller can decide whether to use just the RDN
or the whole subject.
Returns a tuple of (rdn, subject_dn) when rdn is the string
representation of the first RDN in the subject and subject_dn
is a DN object.
"""
dn = DN(cert.subject)
return (str(dn[0]), dn)
def install_pem_from_p12(p12_fname, p12_passwd, pem_fname):
pwd = ipautil.write_tmp_file(p12_passwd)
args = [paths.OPENSSL, "pkcs12", "-nokeys", "-clcerts",
"-in", p12_fname, "-out", pem_fname,
"-passin", "file:" + pwd.name]
# the PKCS12 MAC requires PKCS12KDF which is not an approved FIPS
# algorithm and cannot be supported by the FIPS provider.
# Do not require mac verification in FIPS mode
fips_enabled = tasks.is_fips_enabled()
if fips_enabled:
args.append('-nomacver')
ipautil.run(args)
def install_key_from_p12(
p12_fname, p12_passwd, pem_fname, out_passwd_fname=None):
pwd = ipautil.write_tmp_file(p12_passwd)
args = [
paths.OPENSSL, "pkcs12", "-nocerts",
"-in", p12_fname, "-out", pem_fname,
"-passin", "file:" + pwd.name]
if out_passwd_fname is not None:
args.extend(['-passout', 'file:{}'.format(out_passwd_fname)])
else:
args.append('-nodes')
# the PKCS12 MAC requires PKCS12KDF which is not an approved FIPS
# algorithm and cannot be supported by the FIPS provider.
# Do not require mac verification in FIPS mode
fips_enabled = tasks.is_fips_enabled()
if fips_enabled:
args.append('-nomacver')
ipautil.run(args, umask=0o077)
def pkcs12_to_certkeys(p12_fname, p12_passwd=None):
"""
Deserializes pkcs12 file to python objects
:param p12_fname: A PKCS#12 filename
:param p12_passwd: Optional password for the pkcs12_fname file
"""
args = [paths.OPENSSL, "pkcs12", "-in", p12_fname, "-nodes"]
if p12_passwd:
pwd = ipautil.write_tmp_file(p12_passwd)
args.extend(["-passin", "file:{fname}".format(fname=pwd.name)])
else:
args.extend(["-passin", "pass:"])
pems = ipautil.run(args).raw_output
certs = x509.load_certificate_list(pems)
priv_keys = x509.load_private_key_list(pems)
return (certs, priv_keys)
def is_ipa_issued_cert(api, cert):
"""
Return True if the certificate has been issued by IPA
Note that this method can only be executed if the api has been
initialized.
:param api: The pre-initialized IPA API
:param cert: The IPACertificate certificiate to test
"""
cacert_subject = certstore.get_ca_subject(
api.Backend.ldap2,
api.env.container_ca,
api.env.basedn)
return DN(cert.issuer) == cacert_subject
class CertDB:
"""An IPA-server-specific wrapper around NSS
This class knows IPA-specific details such as nssdir location, or the
CA cert name.
``subject_base``
Realm subject base DN. This argument is required when creating
server or object signing certs.
``ca_subject``
IPA CA subject DN. This argument is required when importing
CA certificates into the certificate database.
"""
# TODO: Remove all selfsign code
def __init__(self, realm, nssdir, fstore=None,
host_name=None, subject_base=None, ca_subject=None,
user=None, group=None, mode=None, create=False,
dbtype='auto', pwd_file=None):
self.nssdb = NSSDatabase(nssdir, dbtype=dbtype, pwd_file=pwd_file)
self.realm = realm
self.noise_fname = os.path.join(self.secdir, "noise.txt")
self.pk12_fname = os.path.join(self.secdir, "cacert.p12")
self.pin_fname = os.path.join(self.secdir + "pin.txt")
self.reqdir = None
self.certreq_fname = None
self.certder_fname = None
self.host_name = host_name
self.ca_subject = ca_subject
self.subject_base = subject_base
self.cacert_name = get_ca_nickname(self.realm)
self.user = user
self.group = group
self.mode = mode
self.uid = 0
self.gid = 0
if not create:
if os.path.isdir(self.secdir):
# We are going to set the owner of all of the cert
# files to the owner of the containing directory
# instead of that of the process. This works when
# this is called by root for a daemon that runs as
# a normal user
mode = os.stat(self.secdir)
self.uid = mode[stat.ST_UID]
self.gid = mode[stat.ST_GID]
else:
if user is not None:
pu = pwd.getpwnam(user)
self.uid = pu.pw_uid
self.gid = pu.pw_gid
if group is not None:
self.gid = grp.getgrnam(group).gr_gid
self.create_certdbs()
if fstore:
self.fstore = fstore
else:
self.fstore = sysrestore.FileStore(paths.SYSRESTORE)
ca_subject = ipautil.dn_attribute_property('_ca_subject')
subject_base = ipautil.dn_attribute_property('_subject_base')
# migration changes paths, just forward attribute lookup to nssdb
@property
def secdir(self):
return self.nssdb.secdir
@property
def dbtype(self):
return self.nssdb.dbtype
@property
def certdb_fname(self):
return self.nssdb.certdb
@property
def keydb_fname(self):
return self.nssdb.keydb
@property
def secmod_fname(self):
return self.nssdb.secmod
@property
def passwd_fname(self):
return self.nssdb.pwd_file
def exists(self):
"""
Checks whether all NSS database files + our pwd_file exist
"""
return self.nssdb.exists()
def __del__(self):
if self.reqdir is not None:
shutil.rmtree(self.reqdir, ignore_errors=True)
self.reqdir = None
self.nssdb.close()
def setup_cert_request(self):
"""
Create a temporary directory to store certificate requests and
certificates. This should be called before requesting certificates.
This is set outside of __init__ to avoid creating a temporary
directory every time we open a cert DB.
"""
if self.reqdir is not None:
return
self.reqdir = tempfile.mkdtemp('', 'ipa-', paths.VAR_LIB_IPA)
self.certreq_fname = self.reqdir + "/tmpcertreq"
self.certder_fname = self.reqdir + "/tmpcert.der"
def set_perms(self, fname, write=False):
perms = stat.S_IRUSR
if write:
perms |= stat.S_IWUSR
if hasattr(fname, 'fileno'):
os.fchown(fname.fileno(), self.uid, self.gid)
os.fchmod(fname.fileno(), perms)
else:
os.chown(fname, self.uid, self.gid)
os.chmod(fname, perms)
def run_certutil(self, args, stdin=None, **kwargs):
return self.nssdb.run_certutil(args, stdin, **kwargs)
def create_noise_file(self):
if os.path.isfile(self.noise_fname):
os.remove(self.noise_fname)
with open(self.noise_fname, "w") as f:
self.set_perms(f)
f.write(ipautil.ipa_generate_password())
def create_passwd_file(self, passwd=None):
ipautil.backup_file(self.passwd_fname)
with open(self.passwd_fname, "w") as f:
self.set_perms(f)
if passwd is not None:
f.write("%s\n" % passwd)
else:
f.write(ipautil.ipa_generate_password())
def create_certdbs(self):
self.nssdb.create_db(
user=self.user, group=self.group, mode=self.mode,
backup=True
)
self.set_perms(self.passwd_fname, write=True)
def restore(self):
self.nssdb.restore()
def list_certs(self):
"""
Return a tuple of tuples containing (nickname, trust)
"""
return self.nssdb.list_certs()
def has_nickname(self, nickname):
"""
Returns True if nickname exists in the certdb, False otherwise.
This could also be done directly with:
certutil -L -d -n <nickname> ...
"""
certs = self.list_certs()
for cert in certs:
if nickname == cert[0]:
return True
return False
def export_ca_cert(self, nickname, create_pkcs12=False):
"""create_pkcs12 tells us whether we should create a PKCS#12 file
of the CA or not. If we are running on a replica then we won't
have the private key to make a PKCS#12 file so we don't need to
do that step."""
cacert_fname = paths.IPA_CA_CRT
# export the CA cert for use with other apps
ipautil.backup_file(cacert_fname)
root_nicknames = self.find_root_cert(nickname)[:-1]
with open(cacert_fname, "w") as f:
os.fchmod(f.fileno(), 0o644)
for root in root_nicknames:
result = self.run_certutil(["-L", "-n", root, "-a"],
capture_output=True)
f.write(result.output)
if create_pkcs12:
ipautil.backup_file(self.pk12_fname)
self.nssdb.run_pk12util([
"-o", self.pk12_fname,
"-n", self.cacert_name,
"-k", self.passwd_fname,
"-w", self.passwd_fname,
])
self.set_perms(self.pk12_fname)
def load_cacert(self, cacert_fname, trust_flags):
"""
Load all the certificates from a given file. It is assumed that
this file creates CA certificates.
"""
with open(cacert_fname) as f:
certs = f.read()
st = 0
while True:
try:
(cert, st) = find_cert_from_txt(certs, st)
_rdn, subject_dn = get_cert_nickname(cert)
if subject_dn == self.ca_subject:
nick = get_ca_nickname(self.realm)
else:
nick = str(subject_dn)
self.nssdb.add_cert(cert, nick, trust_flags)
except RuntimeError:
break
def get_cert_from_db(self, nickname):
"""
Retrieve a certificate from the current NSS database for nickname.
"""
if ':' in nickname:
token = nickname.split(':', 1)[0]
else:
token = None
try:
args = ["-L", "-n", nickname, "-a"]
if token:
args.extend(['-h', token])
result = self.run_certutil(args, capture_output=True)
return x509.load_pem_x509_certificate(result.raw_output)
except ipautil.CalledProcessError:
return None
def track_server_cert(
self, nickname, principal,
password_file=None, command=None, profile=None):
"""
Tell certmonger to track the given certificate nickname.
"""
try:
request_id = certmonger.start_tracking(
self.secdir, nickname=nickname, pinfile=password_file,
post_command=command, profile=profile)
except RuntimeError as e:
logger.error("certmonger failed starting to track certificate: %s",
str(e))
return
cert = self.get_cert_from_db(nickname)
subject = str(DN(cert.subject))
certmonger.add_principal(request_id, principal)
certmonger.add_subject(request_id, subject)
def untrack_server_cert(self, nickname):
"""
Tell certmonger to stop tracking the given certificate nickname.
"""
try:
certmonger.stop_tracking(self.secdir, nickname=nickname)
except RuntimeError as e:
logger.error("certmonger failed to stop tracking certificate: %s",
str(e))
def create_server_cert(self, nickname, hostname, subject=None):
"""
If we are using a dogtag CA then other_certdb contains the RA agent key
that will issue our cert.
You can override the certificate Subject by specifying a subject.
Returns a certificate in DER format.
"""
if subject is None:
subject=DN(('CN', hostname), self.subject_base)
self.request_cert(subject, san_dnsnames=[hostname])
try:
self.issue_server_cert(self.certreq_fname, self.certder_fname)
self.import_cert(self.certder_fname, nickname)
with open(self.certder_fname, "rb") as f:
dercert = f.read()
return x509.load_der_x509_certificate(dercert)
finally:
for fname in (self.certreq_fname, self.certder_fname):
try:
os.unlink(fname)
except OSError:
pass
def request_cert(
self, subject, certtype="rsa", keysize="2048",
san_dnsnames=None):
assert isinstance(subject, DN)
self.create_noise_file()
self.setup_cert_request()
args = ["-R", "-s", str(subject),
"-o", self.certreq_fname,
"-k", certtype,
"-g", keysize,
"-z", self.noise_fname,
"-f", self.passwd_fname,
"-a"]
if san_dnsnames is not None and len(san_dnsnames) > 0:
args += ['-8', ','.join(san_dnsnames)]
result = self.run_certutil(args,
capture_output=True, capture_error=True)
os.remove(self.noise_fname)
return (result.output, result.error_output)
def issue_server_cert(self, certreq_fname, cert_fname):
self.setup_cert_request()
if self.host_name is None:
raise RuntimeError("CA Host is not set.")
with open(certreq_fname, "rb") as f:
csr = f.read()
# We just want the CSR bits, make sure there is no thing else
csr = strip_csr_header(csr).decode('utf8')
params = {'profileId': dogtag.DEFAULT_PROFILE,
'cert_request_type': 'pkcs10',
'requestor_name': 'IPA Installer',
'cert_request': csr,
'xmlOutput': 'true'}
# Send the request to the CA
result = dogtag.https_request(
self.host_name, 8443,
url="/ca/ee/ca/profileSubmitSSLClient",
cafile=api.env.tls_ca_cert,
client_certfile=paths.RA_AGENT_PEM,
client_keyfile=paths.RA_AGENT_KEY,
**params)
http_status, _http_headers, http_body = result
logger.debug("CA answer: %r", http_body)
if http_status != 200:
raise CertificateOperationError(
error=_('Unable to communicate with CMS (status %d)') % http_status)
# The result is an XML blob. Pull the certificate out of that
doc = xml.dom.minidom.parseString(http_body)
item_node = doc.getElementsByTagName("b64")
try:
try:
cert = item_node[0].childNodes[0].data
except IndexError:
raise RuntimeError("Certificate issuance failed")
finally:
doc.unlink()
# base64-decode the result for uniformity
cert = base64.b64decode(cert)
# Write the certificate to a file. It will be imported in a later
# step. This file will be read later to be imported.
with open(cert_fname, "wb") as f:
f.write(cert)
def add_cert(self, cert, nick, flags):
self.nssdb.add_cert(cert, nick, flags)
def import_cert(self, cert_fname, nickname):
"""
Load a certificate from a PEM file and add minimal trust.
"""
args = ["-A", "-n", nickname,
"-t", "u,u,u",
"-i", cert_fname,
"-f", self.passwd_fname]
self.run_certutil(args)
def delete_cert(self, nickname):
self.nssdb.delete_cert(nickname)
def create_pin_file(self):
"""
This is the format of Directory Server pin files.
"""
ipautil.backup_file(self.pin_fname)
with open(self.pin_fname, "w") as pinfile:
self.set_perms(pinfile)
pinfile.write("Internal (Software) Token:")
with open(self.passwd_fname) as pwdfile:
pinfile.write(pwdfile.read())
def find_root_cert(self, nickname):
"""
Given a nickname, return a list of the certificates that make up
the trust chain.
"""
root_nicknames = self.nssdb.get_trust_chain(nickname)
return root_nicknames
def trust_root_cert(self, root_nickname, trust_flags):
if root_nickname is None:
logger.debug("Unable to identify root certificate to trust. "
"Continuing but things are likely to fail.")
return
try:
self.nssdb.trust_root_cert(root_nickname, trust_flags)
except RuntimeError:
pass
def find_server_certs(self):
return self.nssdb.find_server_certs()
def import_pkcs12(self, pkcs12_fname, pkcs12_passwd=None):
return self.nssdb.import_pkcs12(pkcs12_fname,
pkcs12_passwd=pkcs12_passwd)
def export_pkcs12(self, pkcs12_fname, pkcs12_pwd_fname, nickname=None):
if nickname is None:
nickname = get_ca_nickname(api.env.realm)
self.nssdb.run_pk12util([
"-o", pkcs12_fname,
"-n", nickname,
"-k", self.passwd_fname,
"-w", pkcs12_pwd_fname
])
def create_from_cacert(self):
"""
Ensure that a CA chain is in the NSS database.
If an NSS database already exists ensure that the CA chain
we want to load is in there and if not add it. If there is no
database then create an NSS database and load the CA chain.
"""
cacert_fname = paths.IPA_CA_CRT
if self.nssdb.exists():
# We already have a cert db, see if it is for the same CA.
# If it is we leave things as they are.
with open(cacert_fname, "r") as f:
newca = f.read()
newca, _st = find_cert_from_txt(newca)
cacert = self.get_cert_from_db(self.cacert_name)
if newca == cacert:
return
# The CA certificates are different or something went wrong. Start with
# a new certificate database.
self.create_passwd_file()
self.create_certdbs()
self.load_cacert(cacert_fname, IPA_CA_TRUST_FLAGS)
def create_from_pkcs12(self, pkcs12_fname, pkcs12_passwd,
ca_file, trust_flags):
"""Create a new NSS database using the certificates in a PKCS#12 file.
pkcs12_fname: the filename of the PKCS#12 file
pkcs12_pwd_fname: the file containing the pin for the PKCS#12 file
nickname: the nickname/friendly-name of the cert we are loading
The global CA may be added as well in case it wasn't included in the
PKCS#12 file. Extra certs won't hurt in any case.
The global CA may be specified in ca_file, as a PEM filename.
"""
self.create_noise_file()
self.create_passwd_file()
self.create_certdbs()
self.init_from_pkcs12(
pkcs12_fname,
pkcs12_passwd,
ca_file=ca_file,
trust_flags=trust_flags)
def init_from_pkcs12(self, pkcs12_fname, pkcs12_passwd,
ca_file, trust_flags):
self.import_pkcs12(pkcs12_fname, pkcs12_passwd)
server_certs = self.find_server_certs()
if len(server_certs) == 0:
raise RuntimeError("Could not find a suitable server cert in import in %s" % pkcs12_fname)
if ca_file:
try:
with open(ca_file) as fd:
certs = fd.read()
except IOError as e:
raise RuntimeError(
"Failed to open %s: %s" % (ca_file, e.strerror))
st = 0
num = 1
while True:
try:
cert, st = find_cert_from_txt(certs, st)
except RuntimeError:
break
self.add_cert(cert, 'CA %s' % num, EMPTY_TRUST_FLAGS)
num += 1
# We only handle one server cert
nickname = server_certs[0][0]
ca_names = self.find_root_cert(nickname)[:-1]
if len(ca_names) == 0:
raise RuntimeError("Could not find a CA cert in %s" % pkcs12_fname)
self.cacert_name = ca_names[-1]
self.trust_root_cert(self.cacert_name, trust_flags)
self.export_ca_cert(nickname, False)
def export_pem_cert(self, nickname, location):
return self.nssdb.export_pem_cert(nickname, location)
def request_service_cert(self, nickname, principal, host,
resubmit_timeout=None):
if resubmit_timeout is None:
resubmit_timeout = api.env.certmonger_wait_timeout
return certmonger.request_and_wait_for_cert(
certpath=self.secdir,
storage='NSSDB',
nickname=nickname,
principal=principal,
subject=host,
dns=[host],
passwd_fname=self.passwd_fname,
resubmit_timeout=resubmit_timeout
)
def is_ipa_issued_cert(self, api, nickname):
"""
Return True if the certificate contained in the CertDB with the
provided nickname has been issued by IPA.
Note that this method can only be executed if the api has been
initialized.
This method needs to compare the cert issuer (from the NSS DB
and the subject from the CA (from LDAP), because nicknames are not
always aligned.
The cert can be issued directly by IPA. In this case, the cert
issuer is IPA CA subject.
"""
cert = self.get_cert_from_db(nickname)
if cert is None:
raise RuntimeError("Could not find the cert %s in %s"
% (nickname, self.secdir))
return is_ipa_issued_cert(api, cert)
def needs_upgrade_format(self):
"""Check if NSSDB file format needs upgrade
Only upgrade if it's an existing dbm database and default
database type is no 'dbm'.
"""
return (
self.nssdb.dbtype == 'dbm' and
self.exists()
)
def upgrade_format(self):
"""Upgrade NSSDB to new file format
"""
self.nssdb.convert_db()
class _CrossProcessLock:
_DATETIME_FORMAT = '%Y%m%d%H%M%S%f'
def __init__(self, filename):
self._filename = filename
def __enter__(self):
self.acquire()
def __exit__(self, exc_type, exc_value, traceback):
self.release()
def acquire(self, owner=None):
self._do(self._acquire, owner)
def release(self, owner=None):
self._do(self._release, owner)
def _acquire(self, owner):
now = datetime.datetime.now(tz=datetime.timezone.utc)
if self._locked and now >= self._expire:
self._locked = False
if self._locked:
return False
self._locked = True
self._owner = owner
self._expire = now + datetime.timedelta(hours=1)
return True
def _release(self, owner):
if not self._locked or self._owner != owner:
raise RuntimeError("lock not acquired by %s" % owner)
self._locked = False
self._owner = None
self._expire = None
return True
def _do(self, func, owner):
if owner is None:
owner = '%s[%s]' % (os.path.basename(sys.argv[0]), os.getpid())
while True:
with open(self._filename, 'a+') as f:
fcntl.flock(f, fcntl.LOCK_EX)
f.seek(0)
self._read(f)
if func(owner):
f.seek(0)
f.truncate()
self._write(f)
return
time.sleep(10)
def _read(self, fileobj):
p = configparser.RawConfigParser()
if six.PY2:
p.readfp(fileobj) # pylint: disable=no-member
else:
p.read_file(fileobj)
try:
self._locked = p.getboolean('lock', 'locked')
if self._locked:
self._owner = p.get('lock', 'owner')
expire = p.get('lock', 'expire')
try:
self._expire = datetime.datetime.strptime(
expire, self._DATETIME_FORMAT).replace(
tzinfo=datetime.timezone.utc)
except ValueError:
raise configparser.Error
except configparser.Error:
self._locked = False
self._owner = None
self._expire = None
def _write(self, fileobj):
p = configparser.RawConfigParser()
p.add_section('lock')
locked = '1' if self._locked else '0'
p.set('lock', 'locked', locked)
if self._locked:
expire = self._expire.strftime(self._DATETIME_FORMAT)
p.set('lock', 'owner', self._owner)
p.set('lock', 'expire', expire)
p.write(fileobj)
renewal_lock = _CrossProcessLock(paths.IPA_RENEWAL_LOCK)
| 27,376
|
Python
|
.py
| 679
| 30.223859
| 102
| 0.594651
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,769
|
custodiainstance.py
|
freeipa_freeipa/ipaserver/install/custodiainstance.py
|
# Copyright (C) 2015 FreeIPa Project Contributors, see 'COPYING' for license.
from __future__ import print_function, absolute_import
import enum
import logging
from ipalib import api
from ipaserver.secrets.kem import IPAKEMKeys, KEMLdap
from ipaserver.secrets.client import CustodiaClient
from ipaplatform.paths import paths
from ipaplatform.constants import constants
from ipaserver.install.service import SimpleServiceInstance
from ipapython import ipautil
from ipapython import ipaldap
from ipapython.certdb import NSSDatabase
from ipaserver.install import sysupgrade
from base64 import b64decode
from jwcrypto.common import json_decode
import os
import stat
import time
logger = logging.getLogger(__name__)
class CustodiaModes(enum.Enum):
# peer must have a CA
CA_PEER = 'Custodia CA peer'
# peer must have a CA, KRA preferred
KRA_PEER = 'Custodia KRA peer'
# any master will do
MASTER_PEER = 'Custodia master peer'
# local instance (first master)
FIRST_MASTER = 'Custodia on first master'
def get_custodia_instance(config, mode):
"""Create Custodia instance
:param config: configuration/installer object
:param mode: CustodiaModes member
:return: CustodiaInstance object
The config object must have the following attribute
*host_name*
FQDN of the new replica/master
*realm_name*
Kerberos realm
*master_host_name* (for *CustodiaModes.MASTER_PEER*)
hostname of a master (may not have a CA)
*ca_host_name* (for *CustodiaModes.CA_PEER*)
hostname of a master with CA
*kra_host_name* (for *CustodiaModes.KRA_PEER*)
hostname of a master with KRA or CA
For replicas, the instance will upload new keys and retrieve secrets
to the same host. Therefore it uses *ca_host_name* instead of
*master_host_name* to create a replica with CA.
"""
assert isinstance(mode, CustodiaModes)
logger.debug(
"Custodia client for '%r' with promotion %s.",
mode, 'yes' if mode != CustodiaModes.FIRST_MASTER else 'no'
)
if mode == CustodiaModes.CA_PEER:
# In case we install replica with CA, prefer CA host as source for
# all Custodia secret material.
custodia_peer = config.ca_host_name
elif mode == CustodiaModes.KRA_PEER:
custodia_peer = config.kra_host_name
elif mode == CustodiaModes.MASTER_PEER:
custodia_peer = config.master_host_name
elif mode == CustodiaModes.FIRST_MASTER:
custodia_peer = None
else:
raise RuntimeError("Unknown custodia mode %s" % mode)
if custodia_peer is None:
# use ldapi with local dirsrv instance
logger.debug("Custodia uses LDAPI.")
else:
logger.info("Custodia uses '%s' as master peer.", custodia_peer)
return CustodiaInstance(
host_name=config.host_name,
realm=config.realm_name,
custodia_peer=custodia_peer
)
class CustodiaInstance(SimpleServiceInstance):
def __init__(self, host_name=None, realm=None, custodia_peer=None):
super(CustodiaInstance, self).__init__("ipa-custodia")
self.config_file = paths.IPA_CUSTODIA_CONF
self.server_keys = paths.IPA_CUSTODIA_KEYS
self.custodia_peer = custodia_peer
self.fqdn = host_name
self.realm = realm
@property
def ldap_uri(self):
if self.custodia_peer is None:
return ipaldap.realm_to_ldapi_uri(self.realm)
else:
return "ldap://{}".format(self.custodia_peer)
def __config_file(self):
template_file = os.path.basename(self.config_file) + '.template'
template = os.path.join(paths.USR_SHARE_IPA_DIR, template_file)
sub_dict = dict(
IPA_CUSTODIA_CONF_DIR=paths.IPA_CUSTODIA_CONF_DIR,
IPA_CUSTODIA_KEYS=paths.IPA_CUSTODIA_KEYS,
IPA_CUSTODIA_SOCKET=paths.IPA_CUSTODIA_SOCKET,
IPA_CUSTODIA_AUDIT_LOG=paths.IPA_CUSTODIA_AUDIT_LOG,
LDAP_URI=ipaldap.realm_to_ldapi_uri(self.realm),
UID=constants.HTTPD_USER.uid,
GID=constants.HTTPD_USER.pgid
)
conf = ipautil.template_file(template, sub_dict)
with open(self.config_file, "w") as f:
f.write(conf)
ipautil.flush_sync(f)
def create_instance(self):
if self.ldap_uri.startswith('ldapi://'):
# local case, ensure container exists
self.step("Making sure custodia container exists",
self.__create_container)
self.step("Generating ipa-custodia config file", self.__config_file)
self.step("Generating ipa-custodia keys", self.__gen_keys)
super(CustodiaInstance, self).create_instance(
gensvc_name='KEYS',
fqdn=self.fqdn,
ldap_suffix=ipautil.realm_to_suffix(self.realm),
realm=self.realm
)
sysupgrade.set_upgrade_state('custodia', 'installed', True)
def uninstall(self):
super(CustodiaInstance, self).uninstall()
keystore = IPAKEMKeys({
'server_keys': self.server_keys,
'ldap_uri': self.ldap_uri
})
keystore.remove_server_keys_file()
ipautil.remove_file(self.config_file)
ipautil.remove_file(paths.IPA_CUSTODIA_SOCKET)
sysupgrade.set_upgrade_state('custodia', 'installed', False)
def __gen_keys(self):
keystore = IPAKEMKeys({
'server_keys': self.server_keys,
'ldap_uri': self.ldap_uri
})
keystore.generate_server_keys()
def upgrade_instance(self):
installed = sysupgrade.get_upgrade_state("custodia", "installed")
if installed:
if (not os.path.isfile(self.server_keys)
or not os.path.isfile(self.config_file)):
logger.warning(
"Custodia server keys or config are missing, forcing "
"reinstallation of ipa-custodia."
)
installed = False
if not installed:
logger.info("Custodia service is being configured")
self.create_instance()
else:
old_config = open(self.config_file).read()
self.__config_file()
new_config = open(self.config_file).read()
if new_config != old_config:
logger.info("Restarting Custodia")
self.restart()
mode = os.stat(self.server_keys).st_mode
if stat.S_IMODE(mode) != 0o600:
logger.info("Secure server.keys mode")
os.chmod(self.server_keys, 0o600)
def __create_container(self):
"""
Runs the custodia update file to ensure custodia container is present.
"""
self._ldap_update(['73-custodia.update'])
def import_ra_key(self):
cli = self._get_custodia_client()
# please note that ipaCert part has to stay here for historical
# reasons (old servers expect you to ask for ra/ipaCert during
# replication as they store the RA agent cert in an NSS database
# with this nickname)
cli.fetch_key('ra/ipaCert')
def import_dm_password(self):
cli = self._get_custodia_client()
cli.fetch_key('dm/DMHash')
def _wait_keys(self):
timeout = api.env.replication_wait_timeout
deadline = int(time.time()) + timeout
logger.debug("Waiting up to %s seconds to see our keys "
"appear on host %s", timeout, self.ldap_uri)
konn = KEMLdap(self.ldap_uri)
saved_e = None
while True:
try:
return konn.check_host_keys(self.fqdn)
except Exception as e:
# Print message to console only once for first error.
if saved_e is None:
# FIXME: Change once there's better way to show this
# message in installer output,
print(
" Waiting for keys to appear on host: {}, please "
"wait until this has completed.".format(
self.ldap_uri)
)
# log only once for the same error
if not isinstance(e, type(saved_e)):
logger.debug(
"Transient error getting keys: '%s'", e)
saved_e = e
if int(time.time()) > deadline:
raise RuntimeError("Timed out trying to obtain keys.")
time.sleep(1)
def _get_custodia_client(self):
if self.custodia_peer is None:
raise ValueError("Can't replicate secrets without Custodia peer")
# Before we attempt to fetch keys from this host, make sure our public
# keys have been replicated there.
self._wait_keys()
return CustodiaClient(
client_service='host@{}'.format(self.fqdn),
keyfile=self.server_keys, keytab=paths.KRB5_KEYTAB,
server=self.custodia_peer, realm=self.realm
)
def _get_keys(self, cacerts_file, cacerts_pwd, data):
# Fetch all needed certs one by one, then combine them in a single
# PKCS12 file
prefix = data['prefix']
certlist = data['list']
cli = self._get_custodia_client()
with NSSDatabase(None) as tmpdb:
tmpdb.create_db()
# Cert file password
crtpwfile = os.path.join(tmpdb.secdir, 'crtpwfile')
with open(crtpwfile, 'w+') as f:
f.write(cacerts_pwd)
for nickname in certlist:
value = cli.fetch_key(os.path.join(prefix, nickname), False)
v = json_decode(value)
pk12pwfile = os.path.join(tmpdb.secdir, 'pk12pwfile')
with open(pk12pwfile, 'w+') as f:
f.write(v['export password'])
pk12file = os.path.join(tmpdb.secdir, 'pk12file')
with open(pk12file, 'wb') as f:
f.write(b64decode(v['pkcs12 data']))
tmpdb.run_pk12util([
'-k', tmpdb.pwd_file,
'-n', nickname,
'-i', pk12file,
'-w', pk12pwfile
])
# Add CA certificates
self.export_ca_certs_nssdb(tmpdb, True)
# Now that we gathered all certs, re-export
ipautil.run([
paths.PKCS12EXPORT,
'-d', tmpdb.secdir,
'-p', tmpdb.pwd_file,
'-w', crtpwfile,
'-o', cacerts_file
])
def get_ca_keys(self, cacerts_file, cacerts_pwd):
certlist = ['caSigningCert cert-pki-ca',
'ocspSigningCert cert-pki-ca',
'auditSigningCert cert-pki-ca',
'subsystemCert cert-pki-ca']
data = {'prefix': 'ca',
'list': certlist}
self._get_keys(cacerts_file, cacerts_pwd, data)
def get_kra_keys(self, cacerts_file, cacerts_pwd):
certlist = ['auditSigningCert cert-pki-kra',
'storageCert cert-pki-kra',
'subsystemCert cert-pki-ca',
'transportCert cert-pki-kra']
data = {'prefix': 'ca',
'list': certlist}
self._get_keys(cacerts_file, cacerts_pwd, data)
| 11,407
|
Python
|
.py
| 267
| 32.179775
| 78
| 0.60409
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,770
|
ca.py
|
freeipa_freeipa/ipaserver/install/ca.py
|
#
# Copyright (C) 2015 FreeIPA Contributors see COPYING for license
#
"""
CA installer module
"""
from __future__ import print_function, absolute_import
import enum
import logging
import os.path
import pki.util
import six
from ipalib.constants import IPA_CA_CN
from ipalib.install import certstore
from ipalib.install.service import enroll_only, master_install_only, replica_install_only
from ipaplatform.constants import constants
from ipaserver.install import sysupgrade
from ipapython.install import typing
from ipapython.install.core import group, knob, extend_knob
from ipaserver.install import acmeinstance, cainstance, bindinstance, dsinstance
from ipapython import ipautil, certdb
from ipapython import ipaldap
from ipapython.admintool import ScriptError
from ipaplatform import services
from ipaplatform.paths import paths
from ipaserver.install import installutils, certs
from ipaserver.install.replication import replica_conn_check
from ipalib import api, errors, x509
from ipapython.dn import DN
from . import conncheck, dogtag, cainstance
if six.PY3:
unicode = str
VALID_SUBJECT_BASE_ATTRS = {
'st', 'o', 'ou', 'dnqualifier', 'c', 'serialnumber', 'l', 'title', 'sn',
'givenname', 'initials', 'generationqualifier', 'dc', 'mail', 'uid',
'postaladdress', 'postalcode', 'postofficebox', 'houseidentifier', 'e',
'street', 'pseudonym', 'incorporationlocality', 'incorporationstate',
'incorporationcountry', 'businesscategory',
}
VALID_SUBJECT_ATTRS = {'cn'} | VALID_SUBJECT_BASE_ATTRS
logger = logging.getLogger(__name__)
external_cert_file = None
external_ca_file = None
def subject_validator(valid_attrs, value):
if not isinstance(value, unicode):
v = unicode(value, 'utf-8')
else:
v = value
if any(ord(c) < 0x20 for c in v):
raise ValueError("must not contain control characters")
if '&' in v:
raise ValueError("must not contain an ampersand (\"&\")")
try:
dn = DN(v)
for rdn in dn:
if rdn.attr.lower() not in valid_attrs:
raise ValueError("invalid attribute: \"%s\"" % rdn.attr)
except ValueError as e:
raise ValueError("invalid DN: %s" % e)
def random_serial_numbers_version(enabled):
"""Return True if PKI supports RSNv3
The caller is responsible for raising the exception.
"""
if not enabled:
return None, None
pki_version = pki.util.Version(pki.specification_version())
return pki_version >= pki.util.Version("11.2.0"), pki_version
def random_serial_numbers_validator(enabled):
val, pki_version = random_serial_numbers_version(enabled)
if val is False:
raise ValueError(
"Random Serial Numbers are not supported in PKI version %s"
% pki_version
)
def lookup_ca_subject(api, subject_base):
dn = DN(('cn', IPA_CA_CN), api.env.container_ca, api.env.basedn)
try:
# we do not use api.Command.ca_show because it attempts to
# talk to the CA (to read certificate / chain), but the RA
# backend may be unavailable (ipa-replica-install) or unusable
# due to RA Agent cert not yet created (ipa-ca-install).
ca_subject = api.Backend.ldap2.get_entry(dn)['ipacasubjectdn'][0]
except errors.NotFound:
# if the entry doesn't exist, we are dealing with a pre-v4.4
# installation, where the default CA subject was always based
# on the subject_base.
#
# installutils.default_ca_subject_dn is NOT used here in
# case the default changes in the future.
ca_subject = DN(('CN', 'Certificate Authority'), subject_base)
return str(ca_subject)
def lookup_random_serial_number_version(api):
"""
Retrieve the random serial number version number from the
remote server.
If the value is > 0 then RSN was enabled. Return the raw
value for future-proofing in case version-specific decisions
need to be made.
Returns 0 if RSN is not enabled or otherwise not available.
"""
dn = DN(('cn', IPA_CA_CN), api.env.container_ca, api.env.basedn)
version = 0
try:
# we do not use api.Command.ca_show because it attempts to
# talk to the CA (to read certificate / chain), but the RA
# backend may be unavailable (ipa-replica-install) or unusable
# due to RA Agent cert not yet created (ipa-ca-install).
entry = api.Backend.ldap2.get_entry(dn)
# If the attribute doesn't exist then the remote didn't
# enable RSN.
if 'ipacarandomserialnumberversion' in entry:
version = int(entry['ipacarandomserialnumberversion'][0])
except (errors.NotFound, KeyError):
# if the entry doesn't exist then the remote doesn't support
# RSN so there is nothing to do.
pass
return version
def lookup_hsm_configuration(api):
"""
If an HSM was configured on the initial install then return the
token name and PKCS#11 library path from that install.
Returns a tuple of (token_name, token_library_path) or (None, None)
"""
dn = DN(('cn', IPA_CA_CN), api.env.container_ca, api.env.basedn)
token_name = None
token_library_path = None
try:
# we do not use api.Command.ca_show because it attempts to
# talk to the CA (to read certificate / chain), but the RA
# backend may be unavailable (ipa-replica-install) or unusable
# due to RA Agent cert not yet created (ipa-ca-install).
entry = api.Backend.ldap2.get_entry(dn)
# If the attribute doesn't exist then the remote didn't
# enable RSN.
if 'ipacahsmconfiguration' in entry:
val = entry['ipacahsmconfiguration'][0]
(token_name, token_library_path) = val.split(';')
except (errors.NotFound, KeyError):
# if the entry doesn't exist then the remote doesn't support
# HSM so there is nothing to do.
pass
return (token_name, token_library_path)
def hsm_version():
"""Return True if PKI supports working HSM code
The caller is responsible for raising the exception.
"""
pki_version = pki.util.Version(pki.specification_version())
return pki_version >= pki.util.Version("11.5.0"), pki_version
def hsm_validator(token_name, token_library, token_password):
"""Do some basic validation of the HSM information provided.
- The local PKI server supports IPA HSM
- The token library exists
- The token name doesn't have a colon or semi-colon in it
- The token name exists after loading the library
- The token password works
- Super-simple test to see if the SELinux module is loaded
"""
if not token_name:
logger.debug("No token name, assuming not an HSM install")
return
if not token_password:
raise ValueError("No token password provided")
val, pki_version = hsm_version()
if val is False:
raise ValueError(
"HSM is not supported in PKI version %s" % pki_version
)
if ':' in token_name or ';' in token_name:
raise ValueError(
"Colon and semi-colon are not allowed in a token name."
)
if not os.path.exists(token_library):
raise ValueError(
"Token library path '%s' does not exist" % token_library
)
pkiuser = constants.PKI_USER
pkigroup = constants.PKI_GROUP
if 'libsofthsm' in token_library:
import grp
group = grp.getgrnam(constants.ODS_GROUP)
if str(constants.PKI_USER) in group.gr_mem:
pkigroup = constants.ODS_GROUP
with certdb.NSSDatabase() as tempnssdb:
tempnssdb.create_db(user=str(pkiuser), group=str(pkigroup))
# Try adding the token library to the temporary database in
# case it isn't already available. Ignore all errors.
command = [
paths.MODUTIL,
'-dbdir', '{}:{}'.format(tempnssdb.dbtype, tempnssdb.secdir),
'-nocertdb',
'-add', 'test',
'-libfile', token_library,
'-force',
]
# It may fail if p11-kit has already registered the library, that's
# ok.
ipautil.run(command, stdin='\n', cwd=tempnssdb.secdir,
runas=pkiuser, suplementary_groups=[pkigroup],
raiseonerr=False)
command = [
paths.MODUTIL,
'-dbdir', '{}:{}'.format(tempnssdb.dbtype, tempnssdb.secdir),
'-list',
'-force'
]
lines = ipautil.run(
command, cwd=tempnssdb.secdir, capture_output=True,
runas=pkiuser, suplementary_groups=[pkigroup]).output
found = False
token_line = f'token: {token_name}'
for line in lines.split('\n'):
if token_line in line.strip():
found = True
break
if not found:
raise ValueError(
"Token named '%s' was not found. Check permissions"
% token_name
)
pwdfile = ipautil.write_tmp_file(token_password)
os.fchown(pwdfile.fileno(), pkiuser.uid, pkigroup.gid)
args = [
paths.CERTUTIL,
"-d", '{}:{}'.format(tempnssdb.dbtype, tempnssdb.secdir),
"-K",
"-h", token_name,
"-f", pwdfile.name,
]
result = ipautil.run(args, cwd=tempnssdb.secdir,
runas=pkiuser,
suplementary_groups=[pkigroup],
capture_error=True, raiseonerr=False)
if result.returncode != 0 and len(result.error_output):
if 'SEC_ERROR_BAD_PASSWORD' in result.error_output:
raise ValueError('Invalid HSM token password')
else:
raise ValueError(
"Validating HSM password failed: %s" % result.error_output
)
# validate that the appropriate SELinux module is installed
# Only warn in case the expected paths don't match.
if 'nfast' in token_library:
module = 'ipa-nfast'
elif 'luna' in token_library:
module = 'ipa-luna'
else:
module = None
if module:
args = [paths.SEMODULE, "-l"]
result = ipautil.run(args, cwd=tempnssdb.secdir,
capture_output=True, raiseonerr=False)
if module not in result.output:
logger.info('\nWARNING: The associated SELinux module ,%s, '
'for this HSM was not detected.\nVerify '
'that the appropriate subpackage is installed '
'for this HSM\n', module)
def set_subject_base_in_config(subject_base):
entry_attrs = api.Backend.ldap2.get_ipa_config()
entry_attrs['ipacertificatesubjectbase'] = [str(subject_base)]
try:
api.Backend.ldap2.update_entry(entry_attrs)
except errors.EmptyModlist:
pass
def print_ca_configuration(options):
"""Print info about how the CA will be configured.
Does not print trailing empty line.
"""
print("The CA will be configured with:")
print("Subject DN: {}".format(options.ca_subject))
print("Subject base: {}".format(options.subject_base))
if options.external_ca:
chaining = "externally signed (two-step installation)"
elif options.external_cert_files:
chaining = "externally signed"
else:
chaining = "self-signed"
print("Chaining: {}".format(chaining))
def uninstall_check(options):
"""IPA needs to be running so pkidestroy can unregister CA"""
ca = cainstance.CAInstance(api.env.realm)
if not ca.is_installed():
return
result = ipautil.run([paths.IPACTL, 'status'],
raiseonerr=False)
if result.returncode not in [0, 4]:
try:
logger.info(
"Starting services to unregister CA from security domain")
ipautil.run([paths.IPACTL, 'start'])
except Exception:
logger.info("Re-starting IPA failed, continuing uninstall")
def uninstall_crl_check(options):
"""Check if the host is CRL generation master"""
# Skip the checks if the host is not a CA instance
ca = cainstance.CAInstance(api.env.realm)
if not (api.Command.ca_is_enabled()['result'] and
cainstance.is_ca_installed_locally()):
return
# skip the checks if the host is the last master
ipa_config = api.Command.config_show()['result']
ipa_masters = ipa_config.get('ipa_master_server', [])
if len(ipa_masters) <= 1:
return
try:
crlgen_enabled = ca.is_crlgen_enabled()
except cainstance.InconsistentCRLGenConfigException:
# If config is inconsistent, let's be safe and act as if
# crl gen was enabled
crlgen_enabled = True
if crlgen_enabled:
print("Deleting this server will leave your installation "
"without a CRL generation master.")
if (options.unattended and not options.ignore_last_of_role) or \
not (options.unattended or ipautil.user_input(
"Are you sure you want to continue with the uninstall "
"procedure?", False)):
raise ScriptError("Aborting uninstall operation.")
def install_check(standalone, replica_config, options):
global external_cert_file
global external_ca_file
realm_name = options.realm_name
host_name = options.host_name
if replica_config is None:
options._subject_base = options.subject_base
options._ca_subject = options.ca_subject
options._random_serial_numbers = options.random_serial_numbers
token_name = options.token_name
token_library_path = options.token_library_path
if "setup_ca" in options.__dict__:
setup_ca = options.setup_ca
else:
# We got here through ipa-ca-install
setup_ca = True
else:
# during replica install, this gets invoked before local DS is
# available, so use the remote api.
_api = api if standalone else options._remote_api
# for replica-install the knobs cannot be written, hence leading '_'
options._subject_base = str(replica_config.subject_base)
options._ca_subject = lookup_ca_subject(_api, options._subject_base)
options._random_serial_numbers = (
lookup_random_serial_number_version(_api) > 0
)
if options._random_serial_numbers and replica_config.setup_ca:
try:
random_serial_numbers_validator(
options._random_serial_numbers
)
except ValueError as e:
raise ScriptError(str(e))
(token_name, token_library_path) = lookup_hsm_configuration(_api)
# IPA version and dependency checking should prevent this but
# better to be safe and avoid a failed install.
if replica_config.setup_ca and token_name:
if not options.token_library_path:
options.token_library_path = token_library_path
setup_ca = replica_config.setup_ca
if setup_ca and token_name:
if (options.token_password_file and options.token_password):
raise ScriptError(
"token-password and token-password-file are mutually exclusive"
)
if options.token_password_file:
with open(options.token_password_file, "r") as fd:
options.token_password = fd.readline().strip()
if (
not options.token_password_file
and not options.token_password
):
if options.unattended:
raise ScriptError("HSM token password required")
token_password = installutils.read_password(
f"HSM token '{token_name}'", confirm=False
)
if token_password is None:
raise ScriptError("HSM token password required")
else:
options.token_password = token_password
try:
hsm_validator(
token_name, token_library_path,
options.token_password)
except ValueError as e:
raise ScriptError(str(e))
if replica_config is not None and not replica_config.setup_ca:
return
if replica_config is not None:
if standalone and api.env.ra_plugin == 'selfsign':
raise ScriptError('A selfsign CA can not be added')
if standalone and not options.skip_conncheck:
principal = options.principal
replica_conn_check(
replica_config.ca_host_name, host_name, realm_name, True,
replica_config.ca_ds_port, options.admin_password,
principal=principal, ca_cert_file=options.ca_cert_file)
if options.skip_schema_check:
logger.info("Skipping CA DS schema check")
return
if standalone:
if api.Command.ca_is_enabled()['result']:
raise ScriptError(
"One or more CA masters are already present in IPA realm "
"'%s'.\nIf you wish to replicate CA to this host, please "
"re-run 'ipa-ca-install'\nwith a replica file generated on "
"an existing CA master as argument." % realm_name
)
if options.external_cert_files:
if not cainstance.is_step_one_done():
# This can happen if someone passes external_ca_file without
# already having done the first stage of the CA install.
raise ScriptError(
"CA is not installed yet. To install with an external CA "
"is a two-stage process.\nFirst run the installer with "
"--external-ca.")
external_cert_file, external_ca_file = installutils.load_external_cert(
options.external_cert_files, options._ca_subject)
elif options.external_ca:
if cainstance.is_step_one_done():
raise ScriptError(
"CA is already installed.\nRun the installer with "
"--external-cert-file.")
if os.path.isfile(paths.ROOT_IPA_CSR):
raise ScriptError(
"CA CSR file %s already exists.\nIn order to continue "
"remove the file and run the installer again." %
paths.ROOT_IPA_CSR)
if not options.external_ca_type:
options.external_ca_type = x509.ExternalCAType.GENERIC.value
if options.external_ca_profile is not None:
# check that profile is valid for the external ca type
if options.external_ca_type \
not in options.external_ca_profile.valid_for:
raise ScriptError(
"External CA profile specification '{}' "
"cannot be used with external CA type '{}'."
.format(
options.external_ca_profile.unparsed_input,
options.external_ca_type)
)
if not options.external_cert_files:
if not cainstance.check_ports():
print(
"IPA requires ports 8080 and 8443 for PKI, but one or more "
"are currently in use."
)
raise ScriptError("Aborting installation")
if standalone:
dirname = dsinstance.config_dirname(
ipaldap.realm_to_serverid(realm_name))
cadb = certs.CertDB(realm_name, nssdir=paths.PKI_TOMCAT_ALIAS_DIR,
subject_base=options._subject_base)
dsdb = certs.CertDB(
realm_name, nssdir=dirname, subject_base=options._subject_base)
# Check that we can add our CA cert to DS and PKI NSS databases
for db in (cadb, dsdb):
if not db.exists():
continue
for nickname, _trust_flags in db.list_certs():
if nickname == certdb.get_ca_nickname(realm_name):
raise ScriptError(
"Certificate with nickname %s is present in %s, "
"cannot continue." % (nickname, db.secdir))
cert = db.get_cert_from_db(nickname)
if not cert:
continue
subject = DN(cert.subject)
if subject == DN(options._ca_subject):
raise ScriptError(
"Certificate with subject %s is present in %s, "
"cannot continue." % (subject, db.secdir))
def install(standalone, replica_config, options, custodia):
install_step_0(standalone, replica_config, options, custodia=custodia)
install_step_1(standalone, replica_config, options, custodia=custodia)
def install_step_0(standalone, replica_config, options, custodia):
realm_name = options.realm_name
dm_password = options.dm_password
host_name = options.host_name
ca_subject = options._ca_subject
subject_base = options._subject_base
external_ca_profile = None
if replica_config is None:
ca_signing_algorithm = options.ca_signing_algorithm
if options.external_ca:
ca_type = options.external_ca_type
external_ca_profile = options.external_ca_profile
csr_file = paths.ROOT_IPA_CSR
else:
ca_type = None
csr_file = None
if options.external_cert_files:
cert_file = external_cert_file.name
cert_chain_file = external_ca_file.name
else:
cert_file = None
cert_chain_file = None
token_name = options.token_name
pkcs12_info = None
master_host = None
master_replication_port = None
ra_p12 = None
ra_only = False
promote = False
else:
_api = api if standalone else options._remote_api
(token_name, _token_library_path) = lookup_hsm_configuration(api)
if not token_name:
cafile = os.path.join(replica_config.dir, 'cacert.p12')
if replica_config.setup_ca:
custodia.get_ca_keys(
cafile,
replica_config.dirman_password)
else:
cafile = None
ca_signing_algorithm = None
ca_type = None
csr_file = None
cert_file = None
cert_chain_file = None
pkcs12_info = (cafile,)
master_host = replica_config.ca_host_name
master_replication_port = replica_config.ca_ds_port
ra_p12 = os.path.join(replica_config.dir, 'ra.p12')
ra_only = not replica_config.setup_ca
promote = True
# if upgrading from CA-less to CA-ful, need to rewrite
# certmap.conf and subject_base configuration
#
set_subject_base_in_config(subject_base)
sysupgrade.set_upgrade_state(
'certmap.conf', 'subject_base', str(subject_base))
dsinstance.write_certmap_conf(realm_name, ca_subject)
# use secure ldaps when installing a replica or upgrading to CA-ful
# In both cases, 389-DS is already configured to have a trusted cert.
use_ldaps = standalone or replica_config is not None
ca = cainstance.CAInstance(
realm=realm_name, host_name=host_name, custodia=custodia
)
ca.configure_instance(
host_name, dm_password, dm_password,
subject_base=subject_base,
ca_subject=ca_subject,
ca_signing_algorithm=ca_signing_algorithm,
ca_type=ca_type,
external_ca_profile=external_ca_profile,
csr_file=csr_file,
cert_file=cert_file,
cert_chain_file=cert_chain_file,
pkcs12_info=pkcs12_info,
master_host=master_host,
master_replication_port=master_replication_port,
ra_p12=ra_p12,
ra_only=ra_only,
promote=promote,
use_ldaps=use_ldaps,
pki_config_override=options.pki_config_override,
random_serial_numbers=options._random_serial_numbers,
token_name=token_name,
token_library_path=options.token_library_path,
token_password=options.token_password,
)
def install_step_1(standalone, replica_config, options, custodia):
if replica_config is not None and not replica_config.setup_ca:
return
realm_name = options.realm_name
host_name = options.host_name
subject_base = options._subject_base
basedn = ipautil.realm_to_suffix(realm_name)
ca = cainstance.CAInstance(
realm=realm_name, host_name=host_name, custodia=custodia
)
ca.stop('pki-tomcat')
# This is done within stopped_service context, which restarts CA
ca.enable_client_auth_to_db()
# Lightweight CA key retrieval is configured in step 1 instead
# of CAInstance.configure_instance (which is invoked from step
# 0) because kadmin_addprinc fails until krb5.conf is installed
# by krb.create_instance.
#
ca.setup_lightweight_ca_key_retrieval()
serverid = ipaldap.realm_to_serverid(realm_name)
if standalone and replica_config is None:
dirname = dsinstance.config_dirname(serverid)
# Store the new IPA CA cert chain in DS NSS database and LDAP
cadb = certs.CertDB(
realm_name, nssdir=paths.PKI_TOMCAT_ALIAS_DIR,
subject_base=subject_base)
dsdb = certs.CertDB(
realm_name, nssdir=dirname, subject_base=subject_base)
cacert = cadb.get_cert_from_db('caSigningCert cert-pki-ca')
nickname = certdb.get_ca_nickname(realm_name)
trust_flags = certdb.IPA_CA_TRUST_FLAGS
dsdb.add_cert(cacert, nickname, trust_flags)
certstore.put_ca_cert_nss(api.Backend.ldap2, api.env.basedn,
cacert, nickname, trust_flags,
config_ipa=True, config_compat=True)
# Store DS CA cert in Dogtag NSS database
trust_flags = dict(reversed(dsdb.list_certs()))
server_certs = dsdb.find_server_certs()
trust_chain = dsdb.find_root_cert(server_certs[0][0])[:-1]
nickname = trust_chain[-1]
cert = dsdb.get_cert_from_db(nickname)
cadb.add_cert(cert, nickname, trust_flags[nickname])
installutils.restart_dirsrv()
ca.start('pki-tomcat')
if standalone or replica_config is not None:
# We need to restart apache as we drop a new config file in there
services.knownservices.httpd.restart(capture_output=True)
if standalone:
# Install CA DNS records
if bindinstance.dns_container_exists(basedn):
bind = bindinstance.BindInstance()
bind.update_system_records()
def uninstall():
acme = acmeinstance.ACMEInstance(api.env.realm)
acme.uninstall()
ca_instance = cainstance.CAInstance(api.env.realm)
ca_instance.stop_tracking_certificates()
ipautil.remove_file(paths.RA_AGENT_PEM)
ipautil.remove_file(paths.RA_AGENT_KEY)
if ca_instance.is_configured():
ca_instance.uninstall()
class CASigningAlgorithm(enum.Enum):
SHA1_WITH_RSA = 'SHA1withRSA'
SHA_256_WITH_RSA = 'SHA256withRSA'
SHA_384_WITH_RSA = 'SHA384withRSA'
SHA_512_WITH_RSA = 'SHA512withRSA'
@group
class CAInstallInterface(dogtag.DogtagInstallInterface,
conncheck.ConnCheckInterface):
"""
Interface of the CA installer
Knobs defined here will be available in:
* ipa-server-install
* ipa-replica-prepare
* ipa-replica-install
* ipa-ca-install
"""
description = "Certificate system"
principal = conncheck.ConnCheckInterface.principal
principal = extend_knob(
principal,
description="User allowed to manage replicas",
cli_names=list(principal.cli_names) + ['-P'],
)
principal = enroll_only(principal)
principal = replica_install_only(principal)
admin_password = conncheck.ConnCheckInterface.admin_password
admin_password = extend_knob(
admin_password,
description="Admin user Kerberos password used for connection check",
cli_names=list(admin_password.cli_names) + ['-w'],
)
admin_password = enroll_only(admin_password)
external_ca = knob(
None,
description=("Generate a CSR for the IPA CA certificate to be signed "
"by an external CA"),
)
external_ca = master_install_only(external_ca)
external_ca_type = knob(
x509.ExternalCAType, None, description="Type of the external CA")
external_ca_type = master_install_only(external_ca_type)
external_ca_profile = knob(
type=x509.ExternalCAProfile,
default=None,
description=(
"Specify the certificate profile/template to use at the "
"external CA"),
)
external_ca_profile = master_install_only(external_ca_profile)
external_cert_files = knob(
typing.List[str], None,
description=("File containing the IPA CA certificate and the external "
"CA certificate chain"),
cli_names='--external-cert-file',
cli_deprecated_names=['--external_cert_file', '--external_ca_file'],
cli_metavar='FILE',
)
external_cert_files = master_install_only(external_cert_files)
@external_cert_files.validator
def external_cert_files(self, value):
if any(not os.path.isabs(path) for path in value):
raise ValueError("must use an absolute path")
subject_base = knob(
str, None,
description=(
"The certificate subject base (default O=<realm-name>). "
"RDNs are in LDAP order (most specific RDN first)."
),
cli_deprecated_names=['--subject'],
)
subject_base = master_install_only(subject_base)
@subject_base.validator
def subject_base(self, value):
subject_validator(VALID_SUBJECT_BASE_ATTRS, value)
ca_subject = knob(
str, None,
description=(
"The CA certificate subject DN "
"(default CN=Certificate Authority,O=<realm-name>). "
"RDNs are in LDAP order (most specific RDN first)."
),
)
ca_subject = master_install_only(ca_subject)
@ca_subject.validator
def ca_subject(self, value):
subject_validator(VALID_SUBJECT_ATTRS, value)
ca_signing_algorithm = knob(
CASigningAlgorithm, None,
description="Signing algorithm of the IPA CA certificate",
)
ca_signing_algorithm = master_install_only(ca_signing_algorithm)
skip_schema_check = knob(
None,
description="skip check for updated CA DS schema on the remote master",
)
skip_schema_check = enroll_only(skip_schema_check)
skip_schema_check = replica_install_only(skip_schema_check)
random_serial_numbers = knob(
None,
description="Enable random serial numbers",
)
random_serial_numbers = master_install_only(random_serial_numbers)
@random_serial_numbers.validator
def random_serial_numbers(self, value):
random_serial_numbers_validator(value)
| 31,230
|
Python
|
.py
| 726
| 33.769972
| 89
| 0.634978
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,771
|
ipa_cacert_manage.py
|
freeipa_freeipa/ipaserver/install/ipa_cacert_manage.py
|
# Authors: Jan Cholasta <jcholast@redhat.com>
#
# Copyright (C) 2014 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function, absolute_import
import datetime
import logging
import os
from optparse import OptionGroup # pylint: disable=deprecated-module
import gssapi
from ipalib.constants import (
RENEWAL_CA_NAME, RENEWAL_REUSE_CA_NAME, RENEWAL_SELFSIGNED_CA_NAME,
IPA_CA_CN)
from ipalib.install import certmonger, certstore
from ipapython import admintool, ipautil
from ipapython.certdb import (EMPTY_TRUST_FLAGS,
EXTERNAL_CA_TRUST_FLAGS,
TrustFlags,
parse_trust_flags,
get_ca_nickname)
from ipapython.dn import DN
from ipaplatform.paths import paths
from ipalib import api, errors, x509
from ipaserver.install import certs, cainstance, installutils
logger = logging.getLogger(__name__)
class CACertManage(admintool.AdminTool):
command_name = 'ipa-cacert-manage'
usage = "%prog renew [options]\n%prog install [options] CERTFILE\n" \
"%prog delete [options] NICKNAME\n%prog list\n%prog prune"
description = "Manage CA certificates."
cert_nickname = 'caSigningCert cert-pki-ca'
@classmethod
def add_options(cls, parser):
super(CACertManage, cls).add_options(parser)
parser.add_option(
"-p", "--password", dest='password',
help="Directory Manager password")
renew_group = OptionGroup(parser, "Renew options")
renew_group.add_option(
"--self-signed", dest='self_signed',
action='store_true',
help="Sign the renewed certificate by itself")
renew_group.add_option(
"--external-ca", dest='self_signed',
action='store_false',
help="Sign the renewed certificate by external CA")
ext_cas = tuple(x.value for x in x509.ExternalCAType)
renew_group.add_option(
"--external-ca-type", dest="external_ca_type",
type="choice", choices=ext_cas,
metavar="{{{0}}}".format(",".join(ext_cas)),
help="Type of the external CA. Default: generic")
renew_group.add_option(
"--external-ca-profile", dest="external_ca_profile",
type='constructor', constructor=x509.ExternalCAProfile,
default=None, metavar="PROFILE-SPEC",
help="Specify the certificate profile/template to use "
"at the external CA")
renew_group.add_option(
"--external-cert-file", dest="external_cert_files",
action="append", metavar="FILE",
help="File containing the IPA CA certificate and the external CA "
"certificate chain")
parser.add_option_group(renew_group)
install_group = OptionGroup(parser, "Install options")
install_group.add_option(
"-n", "--nickname", dest='nickname',
help="Nickname for the certificate")
install_group.add_option(
"-t", "--trust-flags", dest='trust_flags', default='C,,',
help="Trust flags for the certificate in certutil format")
parser.add_option_group(install_group)
delete_group = OptionGroup(parser, "Delete options")
delete_group.add_option(
"-f", "--force", action='store_true',
help="Force removing the CA even if chain validation fails")
parser.add_option_group(delete_group)
def validate_options(self):
super(CACertManage, self).validate_options(needs_root=True)
installutils.check_server_configuration()
parser = self.option_parser
if not self.args:
parser.error("command not provided")
command = self.command = self.args[0]
if command not in ('renew', 'list', 'install', 'delete', 'prune'):
parser.error("unknown command \"%s\"" % command)
elif command == 'install':
if len(self.args) < 2:
parser.error("certificate file name not provided")
elif command == 'delete':
if len(self.args) < 2:
parser.error("nickname not provided")
def run(self):
command = self.command
api.bootstrap(in_server=True, confdir=paths.ETC_IPA)
api.finalize()
self.ldap_connect()
try:
if command == 'renew':
return self.renew()
elif command == 'install':
return self.install()
elif command == 'list':
return self.list()
elif command == 'delete':
return self.delete()
elif command == 'prune':
return self.prune()
else:
raise NotImplementedError
finally:
api.Backend.ldap2.disconnect()
def ldap_connect(self):
password = self.options.password
if not password:
try:
api.Backend.ldap2.connect(ccache=os.environ.get('KRB5CCNAME'))
except (gssapi.exceptions.GSSError, errors.ACIError):
pass
else:
return
password = installutils.read_password(
"Directory Manager", confirm=False, validate=False)
if password is None:
raise admintool.ScriptError(
"Directory Manager password required")
api.Backend.ldap2.connect(bind_pw=password)
def _get_ca_request_id(self, ca_name):
"""Lookup tracking request for IPA CA, using given ca-name."""
criteria = {
'cert-database': paths.PKI_TOMCAT_ALIAS_DIR,
'cert-nickname': self.cert_nickname,
'ca-name': ca_name,
}
return certmonger.get_request_id(criteria)
def renew(self):
ca = cainstance.CAInstance(api.env.realm)
if not ca.is_configured():
raise admintool.ScriptError("CA is not configured on this system")
self.request_id = self._get_ca_request_id(RENEWAL_CA_NAME)
if self.request_id is None:
# if external CA renewal was interrupted, the request may have
# been left with the "dogtag-ipa-ca-renew-agent-reuse" CA;
# look for it too
self.request_id = self._get_ca_request_id(RENEWAL_REUSE_CA_NAME)
if self.request_id is None:
raise admintool.ScriptError(
"CA certificate is not tracked by certmonger")
logger.debug(
"Found certmonger request id %r", self.request_id)
db = certs.CertDB(api.env.realm, nssdir=paths.PKI_TOMCAT_ALIAS_DIR)
cert = db.get_cert_from_db(self.cert_nickname)
options = self.options
if options.external_cert_files:
return self.renew_external_step_2(ca, cert)
if options.self_signed is not None:
self_signed = options.self_signed
else:
self_signed = cert.is_self_signed()
if self_signed:
return self.renew_self_signed(ca)
else:
return self.renew_external_step_1(ca)
def renew_self_signed(self, ca):
print("Renewing CA certificate, please wait")
msg = "You cannot specify {} when renewing a self-signed CA"
if self.options.external_ca_type:
raise admintool.ScriptError(msg.format("--external-ca-type"))
if self.options.external_ca_profile:
raise admintool.ScriptError(msg.format("--external-ca-profile"))
try:
ca.set_renewal_master()
except errors.NotFound:
raise admintool.ScriptError("CA renewal master not found")
self.resubmit_request(RENEWAL_SELFSIGNED_CA_NAME)
db = certs.CertDB(api.env.realm, nssdir=paths.PKI_TOMCAT_ALIAS_DIR)
cert = db.get_cert_from_db(self.cert_nickname)
update_ipa_ca_entry(api, cert)
print("CA certificate successfully renewed")
def renew_external_step_1(self, ca):
print("Exporting CA certificate signing request, please wait")
options = self.options
if not options.external_ca_type:
options.external_ca_type = x509.ExternalCAType.GENERIC.value
if options.external_ca_type == x509.ExternalCAType.MS_CS.value \
and options.external_ca_profile is None:
options.external_ca_profile = x509.MSCSTemplateV1(u"SubCA")
if options.external_ca_profile is not None:
# check that profile is valid for the external ca type
if options.external_ca_type \
not in options.external_ca_profile.valid_for:
raise admintool.ScriptError(
"External CA profile specification '{}' "
"cannot be used with external CA type '{}'."
.format(
options.external_ca_profile.unparsed_input,
options.external_ca_type)
)
self.resubmit_request(
RENEWAL_REUSE_CA_NAME,
profile=options.external_ca_profile)
print(("The next step is to get %s signed by your CA and re-run "
"ipa-cacert-manage as:" % paths.IPA_CA_CSR))
print("ipa-cacert-manage renew "
"--external-cert-file=/path/to/signed_certificate "
"--external-cert-file=/path/to/external_ca_certificate")
def renew_external_step_2(self, ca, old_cert):
print("Importing the renewed CA certificate, please wait")
options = self.options
conn = api.Backend.ldap2
old_spki = old_cert.public_key_info_bytes
cert_file, ca_file = installutils.load_external_cert(
options.external_cert_files, DN(old_cert.subject))
with open(cert_file.name, 'rb') as f:
new_cert_data = f.read()
new_cert = x509.load_pem_x509_certificate(new_cert_data)
new_spki = new_cert.public_key_info_bytes
if new_cert.subject != old_cert.subject:
raise admintool.ScriptError(
"Subject name mismatch (visit "
"http://www.freeipa.org/page/Troubleshooting for "
"troubleshooting guide)")
if new_cert.subject_bytes != old_cert.subject_bytes:
raise admintool.ScriptError(
"Subject name encoding mismatch (visit "
"http://www.freeipa.org/page/Troubleshooting for "
"troubleshooting guide)")
if new_spki != old_spki:
raise admintool.ScriptError(
"Subject public key info mismatch (visit "
"http://www.freeipa.org/page/Troubleshooting for "
"troubleshooting guide)")
with certs.NSSDatabase() as tmpdb:
tmpdb.create_db()
tmpdb.add_cert(old_cert, 'IPA CA', EXTERNAL_CA_TRUST_FLAGS)
try:
tmpdb.add_cert(new_cert, 'IPA CA', EXTERNAL_CA_TRUST_FLAGS)
except ipautil.CalledProcessError as e:
raise admintool.ScriptError(
"Not compatible with the current CA certificate: %s" % e)
ca_certs = x509.load_certificate_list_from_file(ca_file.name)
for ca_cert in ca_certs:
tmpdb.add_cert(
ca_cert, str(DN(ca_cert.subject)), EXTERNAL_CA_TRUST_FLAGS)
try:
tmpdb.verify_ca_cert_validity('IPA CA')
except ValueError as e:
raise admintool.ScriptError(
"Not a valid CA certificate: %s (visit "
"http://www.freeipa.org/page/Troubleshooting for "
"troubleshooting guide)" % e)
trust_chain = tmpdb.get_trust_chain('IPA CA')[:-1]
for nickname in trust_chain:
try:
ca_cert = tmpdb.get_cert(nickname)
except RuntimeError:
break
certstore.put_ca_cert_nss(
conn,
api.env.basedn,
ca_cert,
nickname,
EMPTY_TRUST_FLAGS)
dn = DN(('cn', self.cert_nickname), ('cn', 'ca_renewal'),
('cn', 'ipa'), ('cn', 'etc'), api.env.basedn)
try:
entry = conn.get_entry(dn, ['usercertificate'])
entry['usercertificate'] = [new_cert]
conn.update_entry(entry)
except errors.NotFound:
entry = conn.make_entry(
dn,
objectclass=['top', 'pkiuser', 'nscontainer'],
cn=[self.cert_nickname],
usercertificate=[new_cert])
conn.add_entry(entry)
except errors.EmptyModlist:
pass
update_ipa_ca_entry(api, new_cert)
try:
ca.set_renewal_master()
except errors.NotFound:
raise admintool.ScriptError("CA renewal master not found")
self.resubmit_request(RENEWAL_REUSE_CA_NAME)
print("CA certificate successfully renewed")
def resubmit_request(self, ca=RENEWAL_CA_NAME, profile=None):
timeout = api.env.startup_timeout + 60
cm_profile = None
if isinstance(profile, x509.MSCSTemplateV1):
cm_profile = profile.unparsed_input
cm_template = None
if isinstance(profile, x509.MSCSTemplateV2):
cm_template = profile.unparsed_input
logger.debug("resubmitting certmonger request '%s'", self.request_id)
certmonger.resubmit_request(self.request_id, ca=ca, profile=cm_profile,
template_v2=cm_template, is_ca=True)
try:
state = certmonger.wait_for_request(self.request_id, timeout)
except RuntimeError:
raise admintool.ScriptError(
"Resubmitting certmonger request '%s' timed out, "
"please check the request manually" % self.request_id)
ca_error = certmonger.get_request_value(self.request_id, 'ca-error')
if state != 'MONITORING' or ca_error:
raise admintool.ScriptError(
"Error resubmitting certmonger request '%s', "
"please check the request manually" % self.request_id)
logger.debug("modifying certmonger request '%s'", self.request_id)
certmonger.modify(self.request_id,
ca=RENEWAL_CA_NAME,
profile='', template_v2='')
def install(self):
print("Installing CA certificate, please wait")
options = self.options
ca_certs = certstore.get_ca_certs_nss(api.Backend.ldap2,
api.env.basedn,
api.env.realm,
False)
with certs.NSSDatabase() as tmpdb:
tmpdb.create_db()
tmpdb.import_files(self.args[1:])
imported = tmpdb.list_certs()
logger.debug("loaded raw certs '%s'", imported)
if len(imported) > 1 and options.nickname:
raise admintool.ScriptError(
"Nickname can only be used if only a single "
"certificate is loaded")
# If a nickname was provided re-import the cert
if options.nickname:
(nickname, trust_flags) = imported[0]
cert = tmpdb.get_cert(nickname)
tmpdb.delete_cert(nickname)
tmpdb.add_cert(cert, options.nickname, EXTERNAL_CA_TRUST_FLAGS)
imported = tmpdb.list_certs()
for ca_cert, ca_nickname, ca_trust_flags in ca_certs:
tmpdb.add_cert(ca_cert, ca_nickname, ca_trust_flags)
for nickname, trust_flags in imported:
if trust_flags.has_key:
continue
tmpdb.trust_root_cert(nickname, EXTERNAL_CA_TRUST_FLAGS)
for nickname, trust_flags in imported:
try:
tmpdb.verify_ca_cert_validity(nickname)
except ValueError as e:
raise admintool.ScriptError(
"Not a valid CA certificate: %s (visit "
"http://www.freeipa.org/page/Troubleshooting for "
"troubleshooting guide)" % e)
else:
print("Verified %s" % nickname)
trust_flags = options.trust_flags.split(',')
if (set(options.trust_flags) - set(',CPTcgpuw') or
len(trust_flags) not in [3, 4]):
raise admintool.ScriptError("Invalid trust flags")
extra_flags = trust_flags[3:]
extra_usages = set()
if extra_flags:
if 'C' in extra_flags[0]:
extra_usages.add(x509.EKU_PKINIT_KDC)
if 'T' in extra_flags[0]:
extra_usages.add(x509.EKU_PKINIT_CLIENT_AUTH)
trust_flags = parse_trust_flags(','.join(trust_flags[:3]))
trust_flags = TrustFlags(trust_flags.has_key,
trust_flags.trusted,
trust_flags.ca,
trust_flags.usages | extra_usages)
for nickname, _trust_flags in imported:
try:
cert = tmpdb.get_cert(nickname)
certstore.put_ca_cert_nss(
api.Backend.ldap2, api.env.basedn, cert, nickname,
trust_flags)
except ValueError as e:
raise admintool.ScriptError(
"Failed to install the certificate: %s" % e)
print("CA certificate successfully installed")
def list(self):
ca_certs = certstore.get_ca_certs_nss(api.Backend.ldap2,
api.env.basedn,
api.env.realm,
False)
for _ca_cert, ca_nickname, _ca_trust_flags in ca_certs:
print(ca_nickname)
def _delete_by_nickname(self, nicknames, options):
conn = api.Backend.ldap2
ca_certs = certstore.get_ca_certs_nss(api.Backend.ldap2,
api.env.basedn,
api.env.realm,
False)
ipa_ca_nickname = get_ca_nickname(api.env.realm)
for nickname in nicknames:
found = False
for _ca_cert, ca_nickname, _ca_trust_flags in ca_certs:
if ca_nickname == nickname:
if ca_nickname == ipa_ca_nickname:
raise admintool.ScriptError(
'The IPA CA cannot be removed with this tool'
)
else:
found = True
break
if not found:
raise admintool.ScriptError(
'Unknown CA \'{}\''.format(nickname)
)
with certs.NSSDatabase() as tmpdb:
tmpdb.create_db()
for ca_cert, ca_nickname, ca_trust_flags in ca_certs:
tmpdb.add_cert(ca_cert, ca_nickname, ca_trust_flags)
loaded = tmpdb.list_certs()
logger.debug("loaded raw certs '%s'", loaded)
for nickname in nicknames:
tmpdb.delete_cert(nickname)
for ca_nickname, _trust_flags in loaded:
if ca_nickname in nicknames:
continue
if ipa_ca_nickname in nicknames:
raise admintool.ScriptError(
"The IPA CA cannot be removed")
logger.debug("Verifying %s", ca_nickname)
try:
tmpdb.verify_ca_cert_validity(ca_nickname)
except ValueError as e:
msg = "Verifying \'%s\' failed. Removing part of the " \
"chain? %s" % (nickname, e)
if options.force:
print(msg)
continue
raise admintool.ScriptError(msg)
else:
logger.debug("Verified %s", ca_nickname)
for _ca_cert, ca_nickname, _ca_trust_flags in ca_certs:
if ca_nickname in nicknames:
container_dn = DN(('cn', 'certificates'), ('cn', 'ipa'),
('cn', 'etc'), api.env.basedn)
dn = DN(('cn', nickname), container_dn)
logger.debug("Deleting %s", ca_nickname)
conn.delete_entry(dn)
return
def delete(self):
nickname = self.args[1]
self._delete_by_nickname([nickname], self.options)
def prune(self):
expired_certs = []
ca_certs = certstore.get_ca_certs_nss(api.Backend.ldap2,
api.env.basedn,
api.env.realm,
False)
now = datetime.datetime.now(tz=datetime.timezone.utc)
for ca_cert, ca_nickname, _ca_trust_flags in ca_certs:
if ca_cert.not_valid_after_utc < now:
expired_certs.append(ca_nickname)
if expired_certs:
self._delete_by_nickname(expired_certs, self.options)
print("Expired certificates deleted:")
for nickname in expired_certs:
print(nickname)
print("Run ipa-certupdate on enrolled machines to apply changes.")
else:
print("No certificates were deleted")
def update_ipa_ca_entry(api, cert):
"""
The Issuer DN of the IPA CA may have changed. Update the IPA CA entry.
:param api: finalised API object, with *connected* LDAP backend
:param cert: a python-cryptography Certificate object
"""
try:
entry = api.Backend.ldap2.get_entry(
DN(('cn', IPA_CA_CN), api.env.container_ca, api.env.basedn),
['ipacaissuerdn'])
entry['ipacaissuerdn'] = [DN(cert.issuer)]
api.Backend.ldap2.update_entry(entry)
except errors.EmptyModlist:
pass
| 23,135
|
Python
|
.py
| 488
| 33.534836
| 79
| 0.565827
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,772
|
ipa_acme_manage.py
|
freeipa_freeipa/ipaserver/install/ipa_acme_manage.py
|
#
# Copyright (C) 2020 FreeIPA Contributors see COPYING for license
#
import enum
import pki.util
import logging
from optparse import OptionGroup # pylint: disable=deprecated-module
from ipalib import api, errors, x509
from ipalib import _
from ipalib.facts import is_ipa_configured
from ipaplatform.paths import paths
from ipapython.admintool import AdminTool
from ipapython import cookie, dogtag
from ipapython.ipautil import run
from ipapython.certdb import NSSDatabase, EXTERNAL_CA_TRUST_FLAGS
from ipaserver.install import cainstance
from ipaserver.install.ca import lookup_random_serial_number_version
from ipaserver.plugins.dogtag import RestClient
logger = logging.getLogger(__name__)
default_pruning_options = {
'certRetentionTime': '30',
'certRetentionUnit': 'day',
'certSearchSizeLimit': '1000',
'certSearchTimeLimit': '0',
'requestRetentionTime': 'day',
'requestRetentionUnit': '30',
'requestSearchSizeLimit': '1000',
'requestSearchTimeLimit': '0',
'cron': ''
}
pruning_labels = {
'certRetentionTime': 'Certificate Retention Time',
'certRetentionUnit': 'Certificate Retention Unit',
'certSearchSizeLimit': 'Certificate Search Size Limit',
'certSearchTimeLimit': 'Certificate Search Time Limit',
'requestRetentionTime': 'Request Retention Time',
'requestRetentionUnit': 'Request Retention Unit',
'requestSearchSizeLimit': 'Request Search Size Limit',
'requestSearchTimeLimit': 'Request Search Time Limit',
'cron': 'cron Schedule'
}
def validate_range(val, min, max):
"""dogtag appears to have no error checking in the cron
entry so do some minimum amount of validation. It is
left as an exercise for the user to do month/day
validation so requesting Feb 31 will be accepted.
Only * and a number within a min/max range are allowed.
"""
if val == '*':
return
if '-' in val or '/' in val:
raise ValueError(f"{val} ranges are not supported")
try:
int(val)
except ValueError:
# raise a clearer error
raise ValueError(f"{val} is not a valid integer")
if int(val) < min or int(val) > max:
raise ValueError(f"{val} not within the range {min}-{max}")
# Manages the FreeIPA ACME service on a per-server basis.
#
# This program is a stop-gap until the deployment-wide management of
# the ACME service is implemented. So we will eventually have API
# calls for managing the ACME service, e.g. `ipa acme-enable'.
# After that is implemented, we can either deprecate and eventually
# remove this program, or make it a wrapper for the API commands.
class acme_state(RestClient):
def _request(self, url, headers=None):
headers = headers or {}
return dogtag.https_request(
self.ca_host, 8443,
url=url,
cafile=self.ca_cert,
client_certfile=paths.RA_AGENT_PEM,
client_keyfile=paths.RA_AGENT_KEY,
headers=headers,
method='POST'
)
def __enter__(self):
status, resp_headers, _unused = self._request('/acme/login')
cookies = cookie.Cookie.parse(resp_headers.get('set-cookie', ''))
if status != 200 or len(cookies) == 0:
raise errors.RemoteRetrieveError(
reason=_('Failed to authenticate to CA REST API')
)
object.__setattr__(self, 'cookie', str(cookies[0]))
return self
def __exit__(self, exc_type, exc_value, traceback):
"""Log out of the REST API"""
headers = dict(Cookie=self.cookie)
status, unused, _unused = self._request('/acme/logout', headers=headers)
object.__setattr__(self, 'cookie', None)
if status != 204:
raise RuntimeError('Failed to logout')
def enable(self):
headers = dict(Cookie=self.cookie)
status, unused, _unused = self._request('/acme/enable', headers=headers)
if status != 200:
raise RuntimeError('Failed to enable ACME')
def disable(self):
headers = dict(Cookie=self.cookie)
status, unused, _unused = self._request('/acme/disable',
headers=headers)
if status != 200:
raise RuntimeError('Failed to disable ACME')
class Command(enum.Enum):
ENABLE = 'enable'
DISABLE = 'disable'
STATUS = 'status'
PRUNE = 'pruning'
class IPAACMEManage(AdminTool):
command_name = "ipa-acme-manage"
usage = "%prog [enable|disable|status|pruning]"
description = "Manage the IPA ACME service"
@classmethod
def add_options(cls, parser):
group = OptionGroup(parser, 'Pruning')
group.add_option(
"--enable", dest="enable", action="store_true",
default=False, help="Enable certificate pruning")
group.add_option(
"--disable", dest="disable", action="store_true",
default=False, help="Disable certificate pruning")
group.add_option(
"--cron", dest="cron", action="store",
default=None, help="Configure the pruning cron job")
group.add_option(
"--certretention", dest="certretention", action="store",
default=None, help="Certificate retention time", type=int)
group.add_option(
"--certretentionunit", dest="certretentionunit", action="store",
choices=['minute', 'hour', 'day', 'year'],
default=None, help="Certificate retention units")
group.add_option(
"--certsearchsizelimit", dest="certsearchsizelimit",
action="store",
default=None, help="LDAP search size limit", type=int)
group.add_option(
"--certsearchtimelimit", dest="certsearchtimelimit", action="store",
default=None, help="LDAP search time limit", type=int)
group.add_option(
"--requestretention", dest="requestretention", action="store",
default=None, help="Request retention time", type=int)
group.add_option(
"--requestretentionunit", dest="requestretentionunit",
choices=['minute', 'hour', 'day', 'year'],
action="store", default=None, help="Request retention units")
group.add_option(
"--requestsearchsizelimit", dest="requestsearchsizelimit",
action="store",
default=None, help="LDAP search size limit", type=int)
group.add_option(
"--requestsearchtimelimit", dest="requestsearchtimelimit",
action="store",
default=None, help="LDAP search time limit", type=int)
group.add_option(
"--config-show", dest="config_show", action="store_true",
default=False, help="Show the current pruning configuration")
group.add_option(
"--run", dest="run", action="store_true",
default=False, help="Run the pruning job now")
parser.add_option_group(group)
super(IPAACMEManage, cls).add_options(parser, debug_option=True)
def validate_options(self):
super(IPAACMEManage, self).validate_options(needs_root=True)
if len(self.args) < 1:
self.option_parser.error(f'missing command argument')
if self.args[0] == "pruning":
if self.options.enable and self.options.disable:
self.option_parser.error("Cannot both enable and disable")
elif (
any(
[
self.options.enable,
self.options.disable,
self.options.cron,
self.options.certretention is not None,
self.options.certretentionunit,
self.options.requestretention is not None,
self.options.requestretentionunit,
self.options.certsearchsizelimit is not None,
self.options.certsearchtimelimit is not None,
self.options.requestsearchsizelimit is not None,
self.options.requestsearchtimelimit is not None,
]
)
and (self.options.config_show or self.options.run)
):
self.option_parser.error(
"Cannot change and show config or run at the same time"
)
elif self.options.cron:
if len(self.options.cron.split()) != 5:
self.option_parser.error("Invalid format for --cron")
# dogtag does no validation when setting this option so
# do the minimum. The dogtag cron is limited compared to
# crontab(5).
opt = self.options.cron.split()
validate_range(opt[0], 0, 59)
validate_range(opt[1], 0, 23)
validate_range(opt[2], 1, 31)
validate_range(opt[3], 1, 12)
validate_range(opt[4], 0, 6)
try:
self.command = Command(self.args[0])
except ValueError:
self.option_parser.error(f'unknown command "{self.args[0]}"')
def check_san_status(self):
"""
Require the Apache cert to have ipa-ca.$DOMAIN SAN
"""
cert = x509.load_certificate_from_file(paths.HTTPD_CERT_FILE)
cainstance.check_ipa_ca_san(cert)
def pruning(self):
def run_pki_server(command, directive, prefix, value=None):
"""Take a set of arguments to append to pki-server"""
args = [
'pki-server', command,
f'{prefix}.{directive}'
]
if value is not None:
args.extend([str(value)])
logger.debug(args)
result = run(args, raiseonerr=False, capture_output=True,
capture_error=True)
if result.returncode != 0:
# See if the parameter doesn't exist. If not then no
# user-specified value has been set.
# ERROR: No such parameter: jobsScheduler...
if 'No such parameter' in result.error_output:
return ''
raise RuntimeError(result.error_output)
return result.output.strip()
def ca_config_set(directive, value,
prefix='jobsScheduler.job.pruning'):
run_pki_server('ca-config-set', directive, prefix, value)
# ca-config-set always succeeds, even if the option is
# not supported.
newvalue = ca_config_show(directive)
if str(value) != newvalue.strip():
raise RuntimeError('Updating %s failed' % directive)
def ca_config_show(directive):
return run_pki_server('ca-config-show', directive,
prefix='jobsScheduler.job.pruning')
def config_show():
status = ca_config_show('enabled')
if status.strip() == 'true':
print("Status: enabled")
else:
print("Status: disabled")
for option in (
'certRetentionTime', 'certRetentionUnit',
'certSearchSizeLimit', 'certSearchTimeLimit',
'requestRetentionTime', 'requestRetentionUnit',
'requestSearchSizeLimit', 'requestSearchTimeLimit',
'cron',
):
value = ca_config_show(option)
if value:
print("{}: {}".format(pruning_labels[option], value))
else:
print("{}: {}".format(pruning_labels[option],
default_pruning_options[option]))
def run_pruning():
"""Run the pruning job manually"""
with NSSDatabase() as tmpdb:
print("Preparing...")
tmpdb.create_db()
tmpdb.import_files((paths.RA_AGENT_PEM, paths.RA_AGENT_KEY),
import_keys=True)
tmpdb.import_files((paths.IPA_CA_CRT,))
for nickname, trust_flags in tmpdb.list_certs():
if trust_flags.has_key:
ra_nickname = nickname
continue
# external is suffucient for our purposes: C,,
tmpdb.trust_root_cert(nickname, EXTERNAL_CA_TRUST_FLAGS)
print("Starting job...")
args = ['pki', '-C', tmpdb.pwd_file, '-d', tmpdb.secdir,
'-n', ra_nickname,
'ca-job-start', 'pruning']
logger.debug(args)
run(args, stdin='y')
pki_version = pki.util.Version(pki.specification_version())
if pki_version < pki.util.Version("11.3.0"):
raise RuntimeError(
'Certificate pruning is not supported in PKI version %s'
% pki_version
)
if lookup_random_serial_number_version(api) == 0:
raise RuntimeError(
'Certificate pruning requires random serial numbers'
)
if self.options.config_show:
config_show()
return
if self.options.run:
run_pruning()
return
# Don't play the enable/disable at the same time game
if self.options.enable:
ca_config_set('owner', 'ipara')
ca_config_set('enabled', 'true')
ca_config_set('enabled', 'true', 'jobsScheduler')
elif self.options.disable:
ca_config_set('enabled', 'false')
# pki-server ca-config-set can only set one option at a time so
# loop through all the options and set what is there.
if self.options.certretention is not None:
ca_config_set('certRetentionTime',
self.options.certretention)
if self.options.certretentionunit:
ca_config_set('certRetentionUnit',
self.options.certretentionunit)
if self.options.certsearchtimelimit is not None:
ca_config_set('certSearchTimeLimit',
self.options.certsearchtimelimit)
if self.options.certsearchsizelimit is not None:
ca_config_set('certSearchSizeLimit',
self.options.certsearchsizelimit)
if self.options.requestretention is not None:
ca_config_set('requestRetentionTime',
self.options.requestretention)
if self.options.requestretentionunit:
ca_config_set('requestRetentionUnit',
self.options.requestretentionunit)
if self.options.requestsearchsizelimit is not None:
ca_config_set('requestSearchSizeLimit',
self.options.requestsearchsizelimit)
if self.options.requestsearchtimelimit is not None:
ca_config_set('requestSearchTimeLimit',
self.options.requestsearchtimelimit)
if self.options.cron:
ca_config_set('cron', self.options.cron)
config_show()
print("The CA service must be restarted for changes to take effect")
def run(self):
if not is_ipa_configured():
print("IPA is not configured.")
return 2
if not cainstance.is_ca_installed_locally():
print("CA is not installed on this server.")
return 3
api.bootstrap(in_server=True, confdir=paths.ETC_IPA)
api.finalize()
api.Backend.ldap2.connect()
state = acme_state(api)
with state as ca_api:
if self.command == Command.ENABLE:
self.check_san_status()
ca_api.enable()
elif self.command == Command.DISABLE:
ca_api.disable()
elif self.command == Command.STATUS:
status = "enabled" if dogtag.acme_status() else "disabled"
print("ACME is {}".format(status))
elif self.command == Command.PRUNE:
self.pruning()
else:
raise RuntimeError('programmer error: unhandled enum case')
return 0
| 16,359
|
Python
|
.py
| 361
| 33.218837
| 80
| 0.586501
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,773
|
adtrustinstance.py
|
freeipa_freeipa/ipaserver/install/adtrustinstance.py
|
# Authors: Sumit Bose <sbose@redhat.com>
#
# Copyright (C) 2011 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function, absolute_import
import logging
import os
import errno
import ldap
import tempfile
import uuid
import struct
import re
import socket
import six
from ipaserver.install import service
from ipaserver.install import installutils
from ipaserver.install.replication import wait_for_task
from ipalib import errors, api
from ipalib.constants import SUBID_RANGE_START, ALLOWED_NETBIOS_CHARS
from ipalib.util import normalize_zone
from ipapython.dn import DN
from ipapython import ipachangeconf
from ipapython import ipaldap
from ipapython import ipautil
import ipapython.errors
from ipaplatform import services
from ipaplatform.constants import constants
from ipaplatform.paths import paths
from ipaplatform.tasks import tasks
if six.PY3:
unicode = str
logger = logging.getLogger(__name__)
UPGRADE_ERROR = """
Entry %(dn)s does not exist.
This means upgrade from IPA 2.x to 3.x did not went well and required S4U2Proxy
configuration was not set up properly. Please run ipa-ldap-updater manually
and re-run ipa-adtrust-instal again afterwards.
"""
def check_inst():
for smbfile in [paths.SMBD, paths.NET]:
if not os.path.exists(smbfile):
logger.error("%s was not found on this system", smbfile)
logger.error("Please install the 'samba' packages and "
"start the installation again")
return False
# Check that ipa-server-trust-ad package is installed,
# by looking for the file /usr/share/ipa/smb.conf.empty
if not os.path.exists(os.path.join(paths.USR_SHARE_IPA_DIR,
"smb.conf.empty")):
logger.error("AD Trust requires the '%s' package",
constants.IPA_ADTRUST_PACKAGE_NAME)
logger.error(
"Please install the package and start the installation again")
return False
#TODO: Add check for needed samba4 libraries
return True
def ipa_smb_conf_exists():
try:
conf_fd = open(paths.SMB_CONF, 'r')
except IOError as err:
if err.errno == errno.ENOENT:
return False
else:
raise
lines = conf_fd.readlines()
conf_fd.close()
for line in lines:
if line.startswith('### Added by IPA Installer ###'):
return True
return False
def check_netbios_name(name):
# Empty NetBIOS name is not allowed
if not name:
return False
# NetBIOS names may not be longer than 15 allowed characters
invalid_netbios_name = any([
len(name) > 15,
''.join([c for c in name if c not in ALLOWED_NETBIOS_CHARS])
])
return not invalid_netbios_name
def make_netbios_name(s):
return ''.join([c for c in s.split('.')[0].upper() \
if c in ALLOWED_NETBIOS_CHARS])[:15]
def map_Guests_to_nobody():
env = {'LC_ALL': 'C'}
args = [paths.NET, '-s', '/dev/null', 'groupmap', 'add',
'sid=S-1-5-32-546', 'unixgroup=nobody', 'type=builtin']
logger.debug("Map BUILTIN\\Guests to a group 'nobody'")
ipautil.run(args, env=env, raiseonerr=False, capture_error=True)
def get_idmap_range(realm):
idrange = api.Command.idrange_show('{}_id_range'.format(realm))['result']
range_start = int(idrange['ipabaseid'][0])
range_size = int(idrange['ipaidrangesize'][0])
range_fmt = '{} - {}'.format(range_start, range_start + range_size)
return range_fmt
class ADTRUSTInstance(service.Service):
ATTR_SID = "ipaNTSecurityIdentifier"
ATTR_FLAT_NAME = "ipaNTFlatName"
ATTR_GUID = "ipaNTDomainGUID"
ATTR_FALLBACK_GROUP = "ipaNTFallbackPrimaryGroup"
OBJC_USER = "ipaNTUserAttrs"
OBJC_GROUP = "ipaNTGroupAttrs"
OBJC_DOMAIN = "ipaNTDomainAttrs"
FALLBACK_GROUP_NAME = u'Default SMB Group'
SERVER_ROLE_OLD = "CLASSIC PRIMARY DOMAIN CONTROLLER"
SERVER_ROLE_NEW = "IPA PRIMARY DOMAIN CONTROLLER"
def __init__(self, fstore=None, fulltrust=True):
self.netbios_name = None
self.reset_netbios_name = None
self.add_sids = None
self.smbd_user = None
self.smb_dn_pwd = None
self.trust_dn = None
self.smb_dom_dn = None
self.sub_dict = None
self.rid_base = None
self.secondary_rid_base = None
self.fqdn = None
self.host_netbios_name = None
self.fulltrust = fulltrust
if self.fulltrust:
super(ADTRUSTInstance, self).__init__(
"smb", service_desc="CIFS", fstore=fstore,
service_prefix=u'cifs',
keytab=paths.SAMBA_KEYTAB)
else:
super(ADTRUSTInstance, self).__init__("SID generation")
self.__setup_default_attributes()
def __setup_default_attributes(self):
"""
This method setups default attributes that are either constants, or
based on api.env attributes, such as realm, hostname or domain name.
"""
# Constants
self.smb_conf = paths.SMB_CONF
self.cifs_hosts = []
# Values obtained from API.env
self.fqdn = self.fqdn or api.env.host
self.host_netbios_name = make_netbios_name(self.fqdn)
self.realm = self.realm or api.env.realm
if not self.netbios_name:
self.netbios_name = make_netbios_name(self.realm)
self.suffix = ipautil.realm_to_suffix(self.realm)
self.ldapi_socket = "%%2fvar%%2frun%%2fslapd-%s.socket" % \
ipaldap.realm_to_serverid(self.realm)
# DN definitions
self.trust_dn = DN(api.env.container_trusts, self.suffix)
self.smb_dn = DN(('cn', 'adtrust agents'),
api.env.container_sysaccounts,
self.suffix)
self.smb_dom_dn = DN(('cn', api.env.domain),
api.env.container_cifsdomains,
self.suffix)
if self.fulltrust:
self.cifs_agent = DN(('krbprincipalname', self.principal.lower()),
api.env.container_service,
self.suffix)
self.host_princ = DN(('fqdn', self.fqdn),
api.env.container_host,
self.suffix)
def __gen_sid_string(self):
sub_ids = struct.unpack("<LLL", os.urandom(12))
return "S-1-5-21-%d-%d-%d" % (sub_ids[0], sub_ids[1], sub_ids[2])
def __add_admin_sids(self):
"""
The IPA admin and the IPA admins group with get the well knows SIDs
used by AD for the administrator and the administrator group.
By default new users belong only to a user private group (UPG) and no
other Posix group since ipausers is not a Posix group anymore. To be
able to add a RID to the primary RID attribute in a PAC a fallback
group is added.
"""
admin_dn = DN(('uid', 'admin'), api.env.container_user,
self.suffix)
admin_group_dn = DN(('cn', 'admins'), api.env.container_group,
self.suffix)
try:
dom_entry = api.Backend.ldap2.get_entry(self.smb_dom_dn)
except errors.NotFound:
self.print_msg("Samba domain object not found")
return
dom_sid = dom_entry.single_value.get(self.ATTR_SID)
if not dom_sid:
self.print_msg("Samba domain object does not have a SID")
return
try:
admin_entry = api.Backend.ldap2.get_entry(admin_dn)
except errors.NotFound:
self.print_msg("IPA admin object not found")
return
try:
admin_group_entry = api.Backend.ldap2.get_entry(admin_group_dn)
except errors.NotFound:
self.print_msg("IPA admin group object not found")
return
if admin_entry.single_value.get(self.ATTR_SID):
self.print_msg("Admin SID already set, nothing to do")
else:
try:
api.Backend.ldap2.modify_s(
admin_dn,
[(ldap.MOD_ADD, "objectclass", self.OBJC_USER),
(ldap.MOD_ADD, self.ATTR_SID, dom_sid + "-500")])
except Exception:
self.print_msg("Failed to modify IPA admin object")
if admin_group_entry.single_value.get(self.ATTR_SID):
self.print_msg("Admin group SID already set, nothing to do")
else:
try:
api.Backend.ldap2.modify_s(
admin_group_dn,
[(ldap.MOD_ADD, "objectclass", self.OBJC_GROUP),
(ldap.MOD_ADD, self.ATTR_SID, dom_sid + "-512")])
except Exception:
self.print_msg("Failed to modify IPA admin group object")
def __add_default_trust_view(self):
default_view_dn = DN(('cn', 'Default Trust View'),
api.env.container_views, self.suffix)
try:
api.Backend.ldap2.get_entry(default_view_dn)
except errors.NotFound:
try:
self._ldap_mod('default-trust-view.ldif', self.sub_dict)
except Exception as e:
self.print_msg("Failed to add default trust view.")
raise e
else:
self.print_msg("Default Trust View already exists.")
# _ldap_mod does not return useful error codes, so we must check again
# if the default trust view was created properly.
try:
api.Backend.ldap2.get_entry(default_view_dn)
except errors.NotFound:
self.print_msg("Failed to add Default Trust View.")
def __add_fallback_group(self):
"""
By default new users belong only to a user private group (UPG) and no
other Posix group since ipausers is not a Posix group anymore. To be
able to add a RID to the primary RID attribute in a PAC a fallback
group is added.
Since this method must be run after a restart of the directory server
to enable the sidgen plugin we have to reconnect to the directory
server.
"""
try:
dom_entry = api.Backend.ldap2.get_entry(self.smb_dom_dn)
except errors.NotFound:
self.print_msg("Samba domain object not found")
return
if dom_entry.single_value.get(self.ATTR_FALLBACK_GROUP):
self.print_msg("Fallback group already set, nothing to do")
return
fb_group_dn = DN(('cn', self.FALLBACK_GROUP_NAME),
api.env.container_group, self.suffix)
try:
api.Backend.ldap2.get_entry(fb_group_dn)
except errors.NotFound:
try:
self._ldap_mod('default-smb-group.ldif', self.sub_dict)
except Exception as e:
self.print_msg("Failed to add fallback group.")
raise e
# _ldap_mod does not return useful error codes, so we must check again
# if the fallback group was created properly.
try:
# Remove entry from cache otherwise get_entry won't find it
api.Backend.ldap2.remove_cache_entry(fb_group_dn)
api.Backend.ldap2.get_entry(fb_group_dn)
except errors.NotFound:
self.print_msg("Failed to add fallback group.")
return
try:
mod = [(ldap.MOD_ADD, self.ATTR_FALLBACK_GROUP, fb_group_dn)]
api.Backend.ldap2.modify_s(self.smb_dom_dn, mod)
except Exception:
self.print_msg("Failed to add fallback group to domain object")
def __add_rid_bases(self):
"""
Add RID bases to the range object for the local ID range.
TODO: handle missing or multiple ranges more gracefully.
"""
try:
# Get the ranges
ranges = api.Backend.ldap2.get_entries(
DN(api.env.container_ranges, self.suffix),
ldap.SCOPE_ONELEVEL, "(objectclass=ipaDomainIDRange)")
ranges_with_no_rid_base = []
for entry in ranges:
sv = entry.single_value
if sv.get('ipaBaseRID') or sv.get('ipaSecondaryBaseRID'):
# skip range where RID base is already set
continue
if sv.get('ipaRangeType') == 'ipa-local-subid':
# ignore subid ranges
continue
ranges_with_no_rid_base.append(entry)
logger.debug(repr(ranges))
logger.debug(repr(ranges_with_no_rid_base))
# Return if no range is without RID base
if len(ranges_with_no_rid_base) == 0:
self.print_msg("RID bases already set, nothing to do")
return
# Abort if RID base needs to be added to more than one range
if len(ranges_with_no_rid_base) != 1:
logger.critical("Found more than one local domain ID "
"range with no RID base set.")
raise RuntimeError("Too many ID ranges\n")
# Abort if RID bases are too close
local_range = ranges_with_no_rid_base[0]
size_value = local_range.single_value.get('ipaIDRangeSize')
try:
size = int(size_value)
except (ValueError, TypeError):
raise RuntimeError(
"ipaIDRangeSize is set to a non-integer value or is not set"
f" at all (got {size_value!r})"
) from None
if abs(self.rid_base - self.secondary_rid_base) < size:
self.print_msg("Primary and secondary RID base are too close. "
"They have to differ at least by %d." % size)
raise RuntimeError("RID bases too close.\n")
# values above
if any(
v + size >= SUBID_RANGE_START
for v in (self.rid_base, self.secondary_rid_base)
):
self.print_msg(
"Ceiling of primary or secondary base is larger than "
f"start of subordinate id range {SUBID_RANGE_START}."
)
raise RuntimeError("RID bases overlap with SUBID range.\n")
# Modify the range
# If the RID bases would cause overlap with some other range,
# this will be detected by ipa-range-check DS plugin
try:
api.Backend.ldap2.modify_s(local_range.dn,
[(ldap.MOD_ADD, "ipaBaseRID",
str(self.rid_base)),
(ldap.MOD_ADD, "ipaSecondaryBaseRID",
str(self.secondary_rid_base))])
except ldap.CONSTRAINT_VIOLATION as e:
self.print_msg("Failed to add RID bases to the local range "
"object:\n %s" % e[0]['info'])
raise RuntimeError("Constraint violation.\n")
except errors.NotFound as e:
logger.critical("ID range of the local domain not found, "
"define it and run again.")
raise e
def __reset_netbios_name(self):
"""
Set the NetBIOS domain name to a new value.
"""
self.print_msg("Reset NetBIOS domain name")
try:
api.Backend.ldap2.modify_s(self.smb_dom_dn,
[(ldap.MOD_REPLACE, self.ATTR_FLAT_NAME,
self.netbios_name)])
except ldap.LDAPError:
self.print_msg("Failed to reset the NetBIOS domain name")
def __create_samba_domain_object(self):
try:
api.Backend.ldap2.get_entry(self.smb_dom_dn)
if self.reset_netbios_name:
self.__reset_netbios_name()
else :
self.print_msg("Samba domain object already exists")
return
except errors.NotFound:
pass
for new_dn in (self.trust_dn, \
DN(('cn', 'ad'), self.trust_dn), \
DN(api.env.container_cifsdomains, self.suffix)):
try:
api.Backend.ldap2.get_entry(new_dn)
except errors.NotFound:
try:
name = new_dn[1].attr
except Exception as e:
self.print_msg('Cannot extract RDN attribute value from "%s": %s' % \
(new_dn, e))
return
entry = api.Backend.ldap2.make_entry(
new_dn, objectclass=['nsContainer'], cn=[name])
api.Backend.ldap2.add_entry(entry)
entry = api.Backend.ldap2.make_entry(
self.smb_dom_dn,
{
'objectclass': [self.OBJC_DOMAIN, "nsContainer"],
'cn': [api.env.domain],
self.ATTR_FLAT_NAME: [self.netbios_name],
self.ATTR_SID: [self.__gen_sid_string()],
self.ATTR_GUID: [str(uuid.uuid4())],
}
)
#TODO: which MAY attributes do we want to set ?
api.Backend.ldap2.add_entry(entry)
def __write_smb_conf(self):
template = os.path.join(
paths.USR_SHARE_IPA_DIR, "smb.conf.template"
)
conf = ipautil.template_file(template, self.sub_dict)
with open(self.smb_conf, "w") as f:
f.write(conf)
def __add_plugin_conf(self, name, plugin_cn, ldif_file):
"""
Add directory server plugin configuration if it not already
exists.
"""
try:
plugin_dn = DN(('cn', plugin_cn), ('cn', 'plugins'),
('cn', 'config'))
api.Backend.ldap2.get_entry(plugin_dn)
self.print_msg('%s plugin already configured, nothing to do' % name)
except errors.NotFound:
try:
self._ldap_mod(ldif_file, self.sub_dict)
except Exception:
pass
def __add_cldap_module(self):
"""
Add cldap directory server plugin configuration if it not already
exists.
"""
self.__add_plugin_conf('CLDAP', 'ipa_cldap', 'ipa-cldap-conf.ldif')
def __add_sidgen_task(self):
"""
Add sidgen directory server plugin configuration and the related task
if they not already exist.
"""
self.__add_plugin_conf('Sidgen task', 'ipa-sidgen-task',
'ipa-sidgen-task-conf.ldif')
def __add_sids(self):
"""
Add SIDs for existing users and groups. Make sure the task is finished
before continuing.
"""
try:
# Start the sidgen task
self._ldap_mod("ipa-sidgen-task-run.ldif", self.sub_dict)
# Notify the user about the possible delay
self.print_msg("This step may take considerable amount of time, please wait..")
# Wait for the task to complete
task_dn = DN('cn=sidgen,cn=ipa-sidgen-task,cn=tasks,cn=config')
wait_for_task(api.Backend.ldap2, task_dn)
except Exception as e:
logger.warning("Exception occured during SID generation: %s",
str(e))
def __add_s4u2proxy_target(self):
"""
Add CIFS principal to S4U2Proxy target
"""
targets_dn = DN(('cn', 'ipa-cifs-delegation-targets'), ('cn', 's4u2proxy'),
('cn', 'etc'), self.suffix)
try:
current = api.Backend.ldap2.get_entry(targets_dn)
members = current.get('memberPrincipal', [])
if self.principal not in members:
current["memberPrincipal"] = members + [self.principal]
api.Backend.ldap2.update_entry(current)
else:
self.print_msg('cifs principal already targeted, nothing to do.')
except errors.NotFound:
self.print_msg(UPGRADE_ERROR % dict(dn=targets_dn))
except errors.EmptyModlist:
pass
def __write_smb_registry(self):
"""Import IPA specific config into Samba registry
Configuration is imported after __write_smb_conf() has modified
smb.conf to include registry.
"""
template = os.path.join(
paths.USR_SHARE_IPA_DIR, "smb.conf.registry.template"
)
conf = ipautil.template_file(template, self.sub_dict)
with tempfile.NamedTemporaryFile(mode='w') as tmp_conf:
tmp_conf.write(conf)
tmp_conf.flush()
try:
ipautil.run([paths.NET, "conf", "import", tmp_conf.name])
except ipautil.CalledProcessError as e:
if e.returncode == 255:
# We have old Samba that doesn't support IPA DC server role
# re-try again with the older variant, upgrade code will
# take care to change the role later when Samba is upgraded
# as well.
self.sub_dict['SERVER_ROLE'] = self.SERVER_ROLE_OLD
self.__write_smb_registry()
def __map_Guests_to_nobody(self):
map_Guests_to_nobody()
def __setup_group_membership(self):
# Add the CIFS and host principals to the 'adtrust agents' group
# as 389-ds only operates with GroupOfNames, we have to use
# the principal's proper dn as defined in self.cifs_agent
service.add_principals_to_group(
api.Backend.ldap2, self.smb_dn, "member",
[self.cifs_agent, self.host_princ])
def clean_previous_keytab(self, keytab=None):
"""
Purge old CIFS keys from samba and clean up samba ccache
"""
self.clean_samba_keytab()
ipautil.remove_ccache(paths.KRB5CC_SAMBA)
def set_keytab_owner(self, keytab=None, owner=None):
"""
Do not re-set ownership of samba keytab
"""
def clean_samba_keytab(self):
if os.path.exists(self.keytab):
try:
ipautil.run([
paths.IPA_RMKEYTAB, "--principal", self.principal,
"-k", self.keytab
])
except ipautil.CalledProcessError as e:
if e.returncode != 5:
logger.critical("Failed to remove old key for %s",
self.principal)
def __configure_selinux_for_smbd(self):
try:
tasks.set_selinux_booleans(constants.SELINUX_BOOLEAN_ADTRUST,
self.backup_state)
except ipapython.errors.SetseboolError as e:
self.print_msg(e.format_service_warning('adtrust service'))
def __mod_krb5_conf(self):
"""
Set dns_lookup_kdc to true and master_kdc in /etc/krb5.conf
"""
if not self.fqdn or not self.realm:
self.print_msg("Cannot modify /etc/krb5.conf")
krbconf = (
ipachangeconf.IPAChangeConf("IPA Installer"))
krbconf.setOptionAssignment((" = ", " "))
krbconf.setSectionNameDelimiters(("[", "]"))
krbconf.setSubSectionDelimiters(("{", "}"))
krbconf.setIndent(("", " ", " "))
libopts = [{'name':'dns_lookup_kdc', 'type':'option', 'action':'set',
'value':'true'}]
master_kdc = self.fqdn + ":88"
kropts = [{'name':'master_kdc', 'type':'option', 'action':'set',
'value':master_kdc}]
ropts = [{'name':self.realm, 'type':'subsection', 'action':'set',
'value':kropts}]
opts = [{'name':'libdefaults', 'type':'section', 'action':'set',
'value':libopts},
{'name':'realms', 'type':'section', 'action':'set',
'value':ropts}]
krbconf.changeConf(paths.KRB5_CONF, opts)
def __update_krb5_conf(self):
"""
Update /etc/krb5.conf if needed
"""
try:
krb5conf = open(paths.KRB5_CONF, 'r')
except IOError as e:
self.print_msg("Cannot open /etc/krb5.conf (%s)\n" % str(e))
return
has_dns_lookup_kdc_true = False
for line in krb5conf:
regex = r"^\s*dns_lookup_kdc\s*=\s*[Tt][Rr][Uu][Ee]\s*$"
if re.match(regex, line):
has_dns_lookup_kdc_true = True
break
krb5conf.close()
if not has_dns_lookup_kdc_true:
self.__mod_krb5_conf()
else:
self.print_msg("'dns_lookup_kdc' already set to 'true', "
"nothing to do.")
def __check_replica(self):
try:
cifs_services = DN(api.env.container_service, self.suffix)
# Search for cifs services which also belong to adtrust agents, these are our DCs
res = api.Backend.ldap2.get_entries(cifs_services,
ldap.SCOPE_ONELEVEL,
"(&(krbprincipalname=cifs/*@%s)(memberof=%s))" % (self.realm, str(self.smb_dn)))
if len(res) > 1:
# there are other CIFS services defined, we are not alone
for entry in res:
managedBy = entry.single_value.get('managedBy')
if managedBy:
fqdn = DN(managedBy)['fqdn']
if fqdn != unicode(self.fqdn):
# this is CIFS service of a different host in our
# REALM, we need to remember it to announce via
# SRV records for _msdcs
self.cifs_hosts.append(normalize_zone(fqdn))
except Exception as e:
logger.critical("Checking replicas for cifs principals failed "
"with error '%s'", e)
def __enable_compat_tree(self):
try:
compat_plugin_dn = DN("cn=Schema Compatibility,cn=plugins,cn=config")
lookup_nsswitch_name = "schema-compat-lookup-nsswitch"
for config in (("cn=users", "user"), ("cn=groups", "group")):
entry_dn = DN(config[0], compat_plugin_dn)
current = api.Backend.ldap2.get_entry(entry_dn)
lookup_nsswitch = current.get(lookup_nsswitch_name, [])
if not(config[1] in lookup_nsswitch):
current[lookup_nsswitch_name] = [config[1]]
api.Backend.ldap2.update_entry(current)
except Exception as e:
logger.critical("Enabling nsswitch support in slapi-nis failed "
"with error '%s'", e)
def __validate_server_hostname(self):
hostname = socket.gethostname()
if hostname != self.fqdn:
raise ValueError("Host reports different name than configured: "
"'%s' versus '%s'. Samba requires to have "
"the same hostname or Kerberos principal "
"'cifs/%s' will not be found in Samba keytab." %
(hostname, self.fqdn, self.fqdn))
def __start(self):
try:
self.start()
services.service('winbind', api).start()
except Exception:
logger.critical("CIFS services failed to start")
def __stop(self):
self.backup_state("running", self.is_running())
try:
services.service('winbind', api).stop()
self.stop()
except Exception:
pass
def __restart_dirsrv(self):
try:
installutils.restart_dirsrv()
except Exception:
pass
def __restart_smb(self):
try:
services.knownservices.smb.restart()
except Exception:
pass
def __enable(self):
self.backup_state("enabled", self.is_enabled())
# We do not let the system start IPA components on its own,
# Instead we reply on the IPA init script to start only enabled
# components as found in our LDAP configuration tree
# Note that self.dm_password is None for ADTrustInstance because
# we ensure to be called as root and using ldapi to use autobind
try:
self.ldap_configure('ADTRUST', self.fqdn, None, self.suffix)
except (ldap.ALREADY_EXISTS, errors.DuplicateEntry):
logger.info("ADTRUST Service startup entry already exists.")
try:
self.ldap_configure('EXTID', self.fqdn, None, self.suffix)
except (ldap.ALREADY_EXISTS, errors.DuplicateEntry):
logger.info("EXTID Service startup entry already exists.")
def __setup_sub_dict(self):
self.sub_dict = dict(
REALM=self.realm,
SUFFIX=self.suffix,
NETBIOS_NAME=self.netbios_name,
HOST_NETBIOS_NAME=self.host_netbios_name,
SMB_DN=self.smb_dn,
LDAPI_SOCKET=self.ldapi_socket,
FQDN=self.fqdn,
SAMBA_DIR=paths.SAMBA_DIR,
SERVER_ROLE=self.SERVER_ROLE_NEW,
)
def setup(self, fqdn, realm_name, netbios_name,
reset_netbios_name, rid_base, secondary_rid_base,
add_sids=False, smbd_user="samba",
enable_compat=False):
self.fqdn = fqdn
self.realm = realm_name
self.netbios_name = netbios_name
self.reset_netbios_name = reset_netbios_name
self.rid_base = rid_base
self.secondary_rid_base = secondary_rid_base
self.add_sids = add_sids
self.enable_compat = enable_compat
self.smbd_user = smbd_user
# Setup constants and attributes derived from the values above
self.__setup_default_attributes()
self.__setup_sub_dict()
def find_local_id_range(self):
if api.Backend.ldap2.get_entries(
DN(api.env.container_ranges, self.suffix),
ldap.SCOPE_ONELEVEL,
"(objectclass=ipaDomainIDRange)"):
return
try:
entry = api.Backend.ldap2.get_entry(
DN(('cn', 'admins'), api.env.container_group, self.suffix))
except errors.NotFound:
raise ValueError("No local ID range and no admins group found.\n" \
"Add local ID range manually and try again!")
base_id = int(entry.single_value['gidNumber'])
id_range_size = 200000
id_filter = "(&" \
"(|(objectclass=posixAccount)" \
"(objectclass=posixGroup)" \
"(objectclass=ipaIDObject))" \
"(|(uidNumber<=%d)(uidNumber>=%d)" \
"(gidNumber<=%d)(gidNumner>=%d)))" % \
((base_id - 1), (base_id + id_range_size),
(base_id - 1), (base_id + id_range_size))
if api.Backend.ldap2.get_entries(DN(('cn', 'accounts'), self.suffix),
ldap.SCOPE_SUBTREE, id_filter):
raise ValueError("There are objects with IDs out of the expected" \
"range.\nAdd local ID range manually and try " \
"again!")
entry = api.Backend.ldap2.make_entry(
DN(
('cn', ('%s_id_range' % self.realm)),
api.env.container_ranges, self.suffix),
objectclass=['ipaDomainIDRange'],
cn=['%s_id_range' % self.realm],
ipaBaseID=[str(base_id)],
ipaIDRangeSize=[str(id_range_size)],
)
api.Backend.ldap2.add_entry(entry)
def __retrieve_local_range(self):
"""Retrieves local IPA ID range to make sure
"""
self.sub_dict['IPA_LOCAL_RANGE'] = get_idmap_range(self.realm)
def create_instance(self):
if self.fulltrust:
self.step("validate server hostname",
self.__validate_server_hostname)
self.step("stopping smbd", self.__stop)
self.step("adding RID bases", self.__add_rid_bases)
self.step("creating samba domain object", \
self.__create_samba_domain_object)
if self.fulltrust:
self.step("retrieve local idmap range",
self.__retrieve_local_range)
self.step("writing samba config file", self.__write_smb_conf)
self.step("creating samba config registry",
self.__write_smb_registry)
self.step("adding cifs Kerberos principal",
self.request_service_keytab)
self.step("adding cifs and host Kerberos principals to the "
"adtrust agents group",
self.__setup_group_membership)
self.step("check for cifs services defined on other replicas",
self.__check_replica)
self.step("adding cifs principal to S4U2Proxy targets",
self.__add_s4u2proxy_target)
self.step("adding admin(group) SIDs", self.__add_admin_sids)
self.step("updating Kerberos config", self.__update_krb5_conf)
if self.fulltrust:
self.step("activating CLDAP plugin", self.__add_cldap_module)
self.step("activating sidgen task", self.__add_sidgen_task)
if self.fulltrust:
self.step("map BUILTIN\\Guests to nobody group",
self.__map_Guests_to_nobody)
self.step("configuring smbd to start on boot", self.__enable)
if self.enable_compat:
self.step("enabling trusted domains support for older clients via "
"Schema Compatibility plugin",
self.__enable_compat_tree)
self.step("restarting Directory Server to take MS PAC and LDAP "
"plugins changes into account",
self.__restart_dirsrv)
self.step("adding fallback group", self.__add_fallback_group)
if self.fulltrust:
self.step("adding Default Trust View",
self.__add_default_trust_view)
self.step("setting SELinux booleans",
self.__configure_selinux_for_smbd)
self.step("starting CIFS services", self.__start)
if self.add_sids:
self.step("adding SIDs to existing users and groups",
self.__add_sids)
if self.fulltrust:
self.step("restarting smbd", self.__restart_smb)
self.start_creation(show_service_name=False)
def uninstall(self):
if self.is_configured():
self.print_msg("Unconfiguring %s" % self.service_name)
# Call restore_state so that we do not leave mess in the statestore
# Otherwise this does nothing
self.restore_state("running")
self.restore_state("enabled")
winbind = services.service("winbind", api)
# Always try to stop and disable smb service, since we do not leave
# working configuration after uninstall
try:
self.stop()
self.disable()
winbind.stop()
winbind.disable()
except Exception:
pass
# Since we do not guarantee restoring back to working samba state,
# we should not restore smb.conf
# Restore the state of affected selinux booleans
boolean_states = {name: self.restore_state(name)
for name in constants.SELINUX_BOOLEAN_ADTRUST}
try:
tasks.set_selinux_booleans(boolean_states)
except ipapython.errors.SetseboolError as e:
self.print_msg('WARNING: ' + str(e))
# Remove samba's credentials cache
ipautil.remove_ccache(ccache_path=paths.KRB5CC_SAMBA)
# Remove samba's configuration file
ipautil.remove_file(self.smb_conf)
# Remove samba's persistent and temporary tdb files
# in /var/lib/samba and /var/lib/samba/private
for smbpath in (paths.SAMBA_DIR,
os.path.join(paths.SAMBA_DIR, "private"),
os.path.join(paths.SAMBA_DIR, "lock")):
if os.path.isdir(smbpath):
tdb_files = [
os.path.join(smbpath, tdb_file)
for tdb_file in os.listdir(smbpath)
if tdb_file.endswith(".tdb")
]
for tdb_file in tdb_files:
ipautil.remove_file(tdb_file)
# Remove our keys from samba's keytab
self.clean_samba_keytab()
| 37,574
|
Python
|
.py
| 830
| 32.881928
| 96
| 0.571257
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,774
|
ipa_replica_install.py
|
freeipa_freeipa/ipaserver/install/ipa_replica_install.py
|
#
# Copyright (C) 2015 FreeIPA Contributors see COPYING for license
#
from __future__ import absolute_import
from ipapython.install import cli
from ipapython.install.core import knob, extend_knob
from ipaplatform.paths import paths
from ipaserver.install.server import ServerReplicaInstall
class CompatServerReplicaInstall(ServerReplicaInstall):
ca_cert_files = None
all_ip_addresses = False
no_wait_for_dns = True
nisdomain = None
no_nisdomain = False
no_sudo = False
request_cert = False
ca_file = None
zonemgr = None
replica_install = True # Used in ServerInstallInterface.__init__
auto_password = knob(
str, None,
description="Password to join the IPA realm. Assumes bulk password "
"unless principal is also set. (domain level 1+) "
"Directory Manager (existing master) password. (domain "
"level 0)",
sensitive=True,
cli_names=['--password', '-p'],
cli_metavar='PASSWORD',
)
@property
def dm_password(self):
try:
return self.__dm_password
except AttributeError:
pass
return super(CompatServerReplicaInstall, self).dm_password
@dm_password.setter
def dm_password(self, value):
self.__dm_password = value
ip_addresses = extend_knob(
ServerReplicaInstall.ip_addresses,
description="Replica server IP Address. This option can be used "
"multiple times",
)
admin_password = ServerReplicaInstall.admin_password
admin_password = extend_knob(
admin_password,
cli_names=list(admin_password.cli_names) + ['-w'],
)
@admin_password.default_getter
def admin_password(self):
if self.principal:
return self.auto_password
return super(CompatServerReplicaInstall, self).admin_password
@property
def host_password(self):
admin_password = (
super(CompatServerReplicaInstall, self).admin_password)
if not self.principal or admin_password:
return self.auto_password
return super(CompatServerReplicaInstall, self).host_password
ReplicaInstall = cli.install_tool(
CompatServerReplicaInstall,
command_name='ipa-replica-install',
log_file_name=paths.IPAREPLICA_INSTALL_LOG,
console_format='%(message)s',
debug_option=True,
verbose=True,
)
def run():
ReplicaInstall.run_cli()
| 2,488
|
Python
|
.py
| 71
| 27.985915
| 76
| 0.673614
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,775
|
httpinstance.py
|
freeipa_freeipa/ipaserver/install/httpinstance.py
|
# Authors: Rob Crittenden <rcritten@redhat.com>
#
# Copyright (C) 2007 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function
from __future__ import absolute_import
import logging
import os
import glob
import shlex
import shutil
import tempfile
from augeas import Augeas
import dbus
from ipalib.install import certmonger
from ipapython import ipaldap
from ipaserver.install import replication
from ipaserver.install import service
from ipaserver.install import certs
from ipaserver.install import installutils
from ipapython import directivesetter
from ipapython import dogtag
from ipapython import ipautil
from ipapython.dn import DN
import ipapython.errors
from ipaserver.install import sysupgrade
from ipalib import api, x509
from ipalib.constants import IPAAPI_USER, MOD_SSL_VERIFY_DEPTH, IPA_CA_RECORD
from ipaplatform.constants import constants
from ipaplatform.tasks import tasks
from ipaplatform.paths import paths
from ipaplatform import services
logger = logging.getLogger(__name__)
HTTPD_USER = constants.HTTPD_USER
KDCPROXY_USER = constants.KDCPROXY_USER
OCSP_DIRECTIVE = 'SSLOCSPEnable'
OCSP_ENABLED = 'ocsp_enabled'
class WebGuiInstance(service.SimpleServiceInstance):
def __init__(self):
service.SimpleServiceInstance.__init__(self, "ipa_webgui")
class HTTPInstance(service.Service):
def __init__(self, fstore=None, cert_nickname='Server-Cert',
api=api):
super(HTTPInstance, self).__init__(
"httpd",
service_desc="the web interface",
fstore=fstore,
api=api,
service_prefix=u'HTTP',
service_user=HTTPD_USER,
keytab=paths.HTTP_KEYTAB)
self.cacert_nickname = None
self.cert_nickname = cert_nickname
self.ca_is_configured = True
self.keytab_user = constants.GSSPROXY_USER
subject_base = ipautil.dn_attribute_property('_subject_base')
def create_instance(self, realm, fqdn, domain_name, dm_password=None,
pkcs12_info=None,
subject_base=None, auto_redirect=True, ca_file=None,
ca_is_configured=None, promote=False,
master_fqdn=None):
self.fqdn = fqdn
self.realm = realm
self.domain = domain_name
self.dm_password = dm_password
self.suffix = ipautil.realm_to_suffix(self.realm)
self.pkcs12_info = pkcs12_info
self.cert = None
self.subject_base = subject_base
self.sub_dict = dict(
REALM=realm,
FQDN=fqdn,
DOMAIN=self.domain,
AUTOREDIR='' if auto_redirect else '#',
CRL_PUBLISH_PATH=paths.PKI_CA_PUBLISH_DIR,
FONTS_DIR=paths.FONTS_DIR,
FONTS_OPENSANS_DIR=paths.FONTS_OPENSANS_DIR,
FONTS_FONTAWESOME_DIR=paths.FONTS_FONTAWESOME_DIR,
GSSAPI_SESSION_KEY=paths.GSSAPI_SESSION_KEY,
IPA_CUSTODIA_SOCKET=paths.IPA_CUSTODIA_SOCKET,
IPA_CCACHES=paths.IPA_CCACHES,
WSGI_PREFIX_DIR=paths.WSGI_PREFIX_DIR,
WSGI_PROCESSES=constants.WSGI_PROCESSES,
)
self.ca_file = ca_file
if ca_is_configured is not None:
self.ca_is_configured = ca_is_configured
self.promote = promote
self.master_fqdn = master_fqdn
self.step("stopping httpd", self.__stop)
self.step("backing up ssl.conf", self.backup_ssl_conf)
self.step("disabling nss.conf", self.disable_nss_conf)
self.step("configuring mod_ssl certificate paths",
self.configure_mod_ssl_certs)
self.step("setting mod_ssl protocol list",
self.set_mod_ssl_protocol)
self.step("configuring mod_ssl log directory",
self.set_mod_ssl_logdir)
self.step("disabling mod_ssl OCSP", self.disable_mod_ssl_ocsp)
self.step("adding URL rewriting rules", self.__add_include)
self.step("configuring httpd", self.__configure_http)
self.step("setting up httpd keytab", self.request_service_keytab)
self.step("configuring Gssproxy", self.configure_gssproxy)
self.step("setting up ssl", self.__setup_ssl)
if self.ca_is_configured:
self.step("configure certmonger for renewals",
self.configure_certmonger_renewal_guard)
self.step("publish CA cert", self.__publish_ca_cert)
self.step("clean up any existing httpd ccaches",
self.remove_httpd_ccaches)
self.step("enable ccache sweep",
self.enable_ccache_sweep)
self.step("configuring SELinux for httpd", self.configure_selinux_for_httpd)
if not self.is_kdcproxy_configured():
self.step("create KDC proxy config", self.create_kdcproxy_conf)
self.step("enable KDC proxy", self.enable_kdcproxy)
self.step("starting httpd", self.start)
self.step("configuring httpd to start on boot", self.__enable)
self.step("enabling oddjobd", self.enable_and_start_oddjobd)
self.start_creation()
def __stop(self):
self.backup_state("running", self.is_running())
self.stop()
def __enable(self):
self.backup_state("enabled", self.is_enabled())
# We do not let the system start IPA components on its own,
# Instead we reply on the IPA init script to start only enabled
# components as found in our LDAP configuration tree
self.ldap_configure('HTTP', self.fqdn, None, self.suffix)
def configure_selinux_for_httpd(self):
try:
tasks.set_selinux_booleans(constants.SELINUX_BOOLEAN_HTTPD,
self.backup_state)
except ipapython.errors.SetseboolError as e:
self.print_msg(e.format_service_warning('web interface'))
def remove_httpd_ccaches(self):
# Clean up existing ccaches
# Make sure that empty env is passed to avoid passing KRB5CCNAME from
# current env
shutil.rmtree(paths.IPA_CCACHES)
ipautil.run(
[paths.SYSTEMD_TMPFILES, '--create', '--prefix', paths.IPA_CCACHES]
)
def enable_ccache_sweep(self):
ipautil.run(
[paths.SYSTEMCTL, 'enable', 'ipa-ccache-sweep.timer']
)
def __configure_http(self):
self.update_httpd_service_ipa_conf()
self.update_httpd_wsgi_conf()
# create /etc/httpd/alias, see https://pagure.io/freeipa/issue/7529
session_dir = os.path.dirname(self.sub_dict['GSSAPI_SESSION_KEY'])
if not os.path.isdir(session_dir):
os.makedirs(session_dir)
# Must be world-readable / executable
os.chmod(session_dir, 0o755)
# Restore SELinux context of session_dir /etc/httpd/alias, see
# https://pagure.io/freeipa/issue/7662
tasks.restore_context(session_dir)
target_fname = paths.HTTPD_IPA_CONF
http_txt = ipautil.template_file(
os.path.join(paths.USR_SHARE_IPA_DIR,
"ipa.conf.template"),
self.sub_dict)
self.fstore.backup_file(paths.HTTPD_IPA_CONF)
http_fd = open(target_fname, "w")
http_fd.write(http_txt)
http_fd.close()
os.chmod(target_fname, 0o644)
target_fname = paths.HTTPD_IPA_REWRITE_CONF
http_txt = ipautil.template_file(
os.path.join(paths.USR_SHARE_IPA_DIR,
"ipa-rewrite.conf.template"),
self.sub_dict)
self.fstore.backup_file(paths.HTTPD_IPA_REWRITE_CONF)
http_fd = open(target_fname, "w")
http_fd.write(http_txt)
http_fd.close()
os.chmod(target_fname, 0o644)
def configure_gssproxy(self):
tasks.configure_http_gssproxy_conf(IPAAPI_USER)
services.knownservices.gssproxy.restart()
def get_mod_nss_nickname(self):
cert = directivesetter.get_directive(paths.HTTPD_NSS_CONF,
'NSSNickname')
nickname = directivesetter.unquote_directive_value(cert,
quote_char="'")
return nickname
def backup_ssl_conf(self):
self.fstore.backup_file(paths.HTTPD_SSL_CONF)
self.fstore.backup_file(paths.HTTPD_SSL_SITE_CONF)
def disable_nss_conf(self):
"""
Backs up the original nss.conf file and replace it with the empty one.
Empty file avoids recreation of nss.conf in case the package is
reinstalled.
There is no safe way to co-exist since there is no safe port
to make mod_nss use, disable it completely.
"""
if os.path.exists(paths.HTTPD_NSS_CONF):
# check that we don't have a backup already
# (mod_nss -> mod_ssl upgrade scenario)
if not self.fstore.has_file(paths.HTTPD_NSS_CONF):
self.fstore.backup_file(paths.HTTPD_NSS_CONF)
open(paths.HTTPD_NSS_CONF, 'w').close()
def set_mod_ssl_protocol(self):
tasks.configure_httpd_protocol()
def set_mod_ssl_logdir(self):
tasks.setup_httpd_logging()
def disable_mod_ssl_ocsp(self):
if sysupgrade.get_upgrade_state('http', OCSP_ENABLED) is None:
self.__disable_mod_ssl_ocsp()
sysupgrade.set_upgrade_state('http', OCSP_ENABLED, False)
def __disable_mod_ssl_ocsp(self):
aug = Augeas(flags=Augeas.NO_LOAD | Augeas.NO_MODL_AUTOLOAD)
aug.set('/augeas/load/Httpd/lens', 'Httpd.lns')
aug.set('/augeas/load/Httpd/incl', paths.HTTPD_SSL_CONF)
aug.load()
path = '/files{}/VirtualHost'.format(paths.HTTPD_SSL_CONF)
ocsp_path = '{}/directive[.="{}"]'.format(path, OCSP_DIRECTIVE)
ocsp_arg = '{}/arg'.format(ocsp_path)
ocsp_comment = '{}/#comment[.="{}"]'.format(path, OCSP_DIRECTIVE)
ocsp_dir = aug.get(ocsp_path)
# there is SSLOCSPEnable directive in nss.conf file, comment it
# otherwise just do nothing
if ocsp_dir is not None:
ocsp_state = aug.get(ocsp_arg)
aug.remove(ocsp_arg)
aug.rename(ocsp_path, '#comment')
aug.set(ocsp_comment, '{} {}'.format(OCSP_DIRECTIVE, ocsp_state))
aug.save()
def __add_include(self):
"""This should run after __set_mod_nss_port so is already backed up"""
if installutils.update_file(paths.HTTPD_SSL_SITE_CONF,
'</VirtualHost>',
'Include {path}\n'
'</VirtualHost>'.format(
path=paths.HTTPD_IPA_REWRITE_CONF)
) != 0:
self.print_msg("Adding Include conf.d/ipa-rewrite to "
"%s failed." % paths.HTTPD_SSL_SITE_CONF)
def configure_certmonger_renewal_guard(self):
certmonger = services.knownservices.certmonger
certmonger_stopped = not certmonger.is_running()
if certmonger_stopped:
certmonger.start()
try:
bus = dbus.SystemBus()
obj = bus.get_object('org.fedorahosted.certmonger',
'/org/fedorahosted/certmonger')
iface = dbus.Interface(obj, 'org.fedorahosted.certmonger')
path = iface.find_ca_by_nickname('IPA')
if path:
ca_obj = bus.get_object('org.fedorahosted.certmonger', path)
ca_iface = dbus.Interface(ca_obj,
'org.freedesktop.DBus.Properties')
helper = ca_iface.Get('org.fedorahosted.certmonger.ca',
'external-helper')
if helper:
args = shlex.split(helper)
if args[0] != paths.IPA_SERVER_GUARD:
self.backup_state('certmonger_ipa_helper', helper)
args = [paths.IPA_SERVER_GUARD] + args
helper = ' '.join(shlex.quote(a) for a in args)
ca_iface.Set('org.fedorahosted.certmonger.ca',
'external-helper', helper)
finally:
if certmonger_stopped:
certmonger.stop()
def __setup_ssl(self):
key_passwd_file = paths.HTTPD_PASSWD_FILE_FMT.format(host=api.env.host)
with open(key_passwd_file, 'wb') as f:
os.fchmod(f.fileno(), 0o600)
pkey_passwd = ipautil.ipa_generate_password().encode('utf-8')
f.write(pkey_passwd)
if self.pkcs12_info:
p12_certs, p12_priv_keys = certs.pkcs12_to_certkeys(
*self.pkcs12_info)
keys_dict = {
k.public_key().public_numbers(): k
for k in p12_priv_keys
}
certs_keys = [
(c, keys_dict.get(c.public_key().public_numbers()))
for c in p12_certs
]
server_certs_keys = [
(c, k) for c, k in certs_keys if k is not None
]
if not server_certs_keys:
raise RuntimeError(
"Could not find a suitable server cert in import in %s"
% self.pkcs12_info[0]
)
# We only handle one server cert
self.cert = server_certs_keys[0][0]
x509.write_certificate(self.cert, paths.HTTPD_CERT_FILE)
x509.write_pem_private_key(
server_certs_keys[0][1],
paths.HTTPD_KEY_FILE,
passwd=pkey_passwd
)
if self.ca_is_configured:
self.start_tracking_certificates()
self.add_cert_to_service()
else:
if not self.promote:
ca_args = [
paths.CERTMONGER_DOGTAG_SUBMIT,
'--ee-url', 'https://%s:8443/ca/ee/ca' % self.fqdn,
'--certfile', paths.RA_AGENT_PEM,
'--keyfile', paths.RA_AGENT_KEY,
'--cafile', paths.IPA_CA_CRT,
'--agent-submit'
]
helper = " ".join(ca_args)
prev_helper = certmonger.modify_ca_helper('IPA', helper)
else:
prev_helper = None
try:
# In migration case, if CA server is older version it may not
# have codepaths to support the ipa-ca.$DOMAIN dnsName in HTTP
# cert. Therefore if request fails, try again without the
# ipa-ca.$DOMAIN dnsName.
args = dict(
certpath=(paths.HTTPD_CERT_FILE, paths.HTTPD_KEY_FILE),
principal=self.principal,
subject=str(DN(('CN', self.fqdn), self.subject_base)),
ca='IPA',
profile=dogtag.DEFAULT_PROFILE,
dns=[self.fqdn, f'{IPA_CA_RECORD}.{api.env.domain}'],
post_command='restart_httpd',
storage='FILE',
passwd_fname=key_passwd_file,
resubmit_timeout=api.env.certmonger_wait_timeout,
stop_tracking_on_error=True,
)
try:
certmonger.request_and_wait_for_cert(**args)
except Exception:
args['dns'] = [self.fqdn] # remove ipa-ca.$DOMAIN
args['stop_tracking_on_error'] = False
certmonger.request_and_wait_for_cert(**args)
finally:
if prev_helper is not None:
certmonger.modify_ca_helper('IPA', prev_helper)
self.cert = x509.load_certificate_from_file(
paths.HTTPD_CERT_FILE
)
if prev_helper is not None:
self.add_cert_to_service()
with open(paths.HTTPD_KEY_FILE, 'rb') as f:
priv_key = x509.load_pem_private_key(
f.read(), pkey_passwd, backend=x509.default_backend())
# Verify we have a valid server cert
if (priv_key.public_key().public_numbers()
!= self.cert.public_key().public_numbers()):
raise RuntimeError(
"The public key of the issued HTTPD service certificate "
"does not match its private key.")
sysupgrade.set_upgrade_state('ssl.conf', 'migrated_to_mod_ssl', True)
def configure_mod_ssl_certs(self):
"""Configure the mod_ssl certificate directives"""
directivesetter.set_directive(paths.HTTPD_SSL_SITE_CONF,
'SSLCertificateFile',
paths.HTTPD_CERT_FILE, False)
directivesetter.set_directive(paths.HTTPD_SSL_SITE_CONF,
'SSLCertificateKeyFile',
paths.HTTPD_KEY_FILE, False)
directivesetter.set_directive(
paths.HTTPD_SSL_CONF,
'SSLPassPhraseDialog',
'exec:{passread}'.format(passread=paths.IPA_HTTPD_PASSWD_READER),
False)
directivesetter.set_directive(paths.HTTPD_SSL_SITE_CONF,
'SSLCACertificateFile',
paths.IPA_CA_CRT, False)
# set SSLVerifyDepth for external CA installations
directivesetter.set_directive(paths.HTTPD_SSL_CONF,
'SSLVerifyDepth',
MOD_SSL_VERIFY_DEPTH,
quotes=False)
def __publish_ca_cert(self):
ca_subject = self.cert.issuer
certlist = x509.load_certificate_list_from_file(paths.IPA_CA_CRT)
ca_certs = [c for c in certlist if c.subject == ca_subject]
if not ca_certs:
raise RuntimeError("HTTPD cert was issued by an unknown CA.")
# at this time we can assume any CA cert will be valid since this is
# only run during installation
x509.write_certificate_list(certlist, paths.CA_CRT, mode=0o644)
def is_kdcproxy_configured(self):
"""Check if KDC proxy has already been configured in the past"""
return os.path.isfile(paths.HTTPD_IPA_KDCPROXY_CONF)
def enable_kdcproxy(self):
"""Add ipaConfigString=kdcProxyEnabled to cn=KDC"""
service.set_service_entry_config(
'KDC', self.fqdn, [u'kdcProxyEnabled'], self.suffix)
def create_kdcproxy_conf(self):
"""Create ipa-kdc-proxy.conf in /etc/ipa/kdcproxy"""
target_fname = paths.HTTPD_IPA_KDCPROXY_CONF
sub_dict = dict(KDCPROXY_CONFIG=paths.KDCPROXY_CONFIG)
http_txt = ipautil.template_file(
os.path.join(paths.USR_SHARE_IPA_DIR,
"ipa-kdc-proxy.conf.template"),
sub_dict)
self.fstore.backup_file(target_fname)
with open(target_fname, 'w') as f:
f.write(http_txt)
os.chmod(target_fname, 0o644)
def enable_and_start_oddjobd(self):
oddjobd = services.service('oddjobd', api)
self.sstore.backup_state('oddjobd', 'running', oddjobd.is_running())
self.sstore.backup_state('oddjobd', 'enabled', oddjobd.is_enabled())
try:
oddjobd.enable()
oddjobd.start()
except Exception as e:
logger.critical("Unable to start oddjobd: %s", str(e))
def update_httpd_service_ipa_conf(self):
tasks.configure_httpd_service_ipa_conf()
def update_httpd_wsgi_conf(self):
tasks.configure_httpd_wsgi_conf()
def uninstall(self):
if self.is_configured():
self.print_msg("Unconfiguring web server")
running = self.restore_state("running")
enabled = self.restore_state("enabled")
# Restore oddjobd to its original state
oddjobd = services.service('oddjobd', api)
if not self.sstore.restore_state('oddjobd', 'running'):
try:
oddjobd.stop()
except Exception:
pass
if not self.sstore.restore_state('oddjobd', 'enabled'):
try:
oddjobd.disable()
except Exception:
pass
self.stop_tracking_certificates()
helper = self.restore_state('certmonger_ipa_helper')
if helper:
bus = dbus.SystemBus()
obj = bus.get_object('org.fedorahosted.certmonger',
'/org/fedorahosted/certmonger')
iface = dbus.Interface(obj, 'org.fedorahosted.certmonger')
path = iface.find_ca_by_nickname('IPA')
if path:
ca_obj = bus.get_object('org.fedorahosted.certmonger', path)
ca_iface = dbus.Interface(ca_obj,
'org.freedesktop.DBus.Properties')
ca_iface.Set('org.fedorahosted.certmonger.ca',
'external-helper', helper)
for f in [paths.HTTPD_IPA_CONF, paths.HTTPD_SSL_CONF,
paths.HTTPD_SSL_SITE_CONF, paths.HTTPD_NSS_CONF]:
try:
self.fstore.restore_file(f)
except ValueError as error:
logger.debug("%s", error)
# Remove the configuration files we create
ipautil.remove_keytab(self.keytab)
remove_files = [
paths.HTTPD_CERT_FILE,
paths.HTTPD_KEY_FILE,
paths.HTTPD_PASSWD_FILE_FMT.format(host=api.env.host),
paths.HTTPD_IPA_REWRITE_CONF,
paths.HTTPD_IPA_CONF,
paths.HTTPD_IPA_PKI_PROXY_CONF,
paths.HTTPD_IPA_KDCPROXY_CONF_SYMLINK,
paths.HTTPD_IPA_KDCPROXY_CONF,
paths.GSSPROXY_CONF,
paths.GSSAPI_SESSION_KEY,
paths.HTTPD_PASSWORD_CONF,
paths.SYSTEMD_SYSTEM_HTTPD_IPA_CONF,
]
# NSS DB backups
remove_files.extend(
glob.glob(os.path.join(paths.HTTPD_ALIAS_DIR, '*.ipasave'))
)
if paths.HTTPD_IPA_WSGI_MODULES_CONF is not None:
remove_files.append(paths.HTTPD_IPA_WSGI_MODULES_CONF)
for filename in remove_files:
ipautil.remove_file(filename)
ipautil.remove_file(paths.HTTPD_NSS_CONF, only_if_empty=True)
for d in (
paths.SYSTEMD_SYSTEM_HTTPD_D_DIR,
paths.HTTPD_ALIAS_DIR
):
ipautil.remove_directory(d)
# Restore SELinux boolean states
boolean_states = {name: self.restore_state(name)
for name in constants.SELINUX_BOOLEAN_HTTPD}
try:
tasks.set_selinux_booleans(boolean_states)
except ipapython.errors.SetseboolError as e:
self.print_msg('WARNING: ' + str(e))
if running:
self.restart()
# disabled by default, by ldap_configure()
if enabled:
self.enable()
ipautil.run(
[paths.SYSTEMCTL, 'disable', 'ipa-ccache-sweep.timer']
)
ipautil.remove_file(paths.IPA_CCACHE_SWEEPER_GSSPROXY_SOCK)
for filename in os.listdir(paths.IPA_CCACHES):
ipautil.remove_file(os.path.join(paths.IPA_CCACHES, filename))
def stop_tracking_certificates(self):
try:
certmonger.stop_tracking(certfile=paths.HTTPD_CERT_FILE)
except RuntimeError as e:
logger.error("certmonger failed to stop tracking certificate: %s",
str(e))
def start_tracking_certificates(self):
key_passwd_file = paths.HTTPD_PASSWD_FILE_FMT.format(host=api.env.host)
cert = x509.load_certificate_from_file(paths.HTTPD_CERT_FILE)
if certs.is_ipa_issued_cert(api, cert):
request_id = certmonger.start_tracking(
certpath=(paths.HTTPD_CERT_FILE, paths.HTTPD_KEY_FILE),
post_command='restart_httpd', storage='FILE',
profile=dogtag.DEFAULT_PROFILE,
pinfile=key_passwd_file,
dns=[self.fqdn, f'{IPA_CA_RECORD}.{api.env.domain}'],
)
subject = str(DN(cert.subject))
certmonger.add_principal(request_id, self.principal)
certmonger.add_subject(request_id, subject)
else:
logger.debug("Will not track HTTP server cert %s as it is not "
"issued by IPA", cert.subject)
def request_service_keytab(self):
super(HTTPInstance, self).request_service_keytab()
if self.master_fqdn is not None:
service_dn = DN(('krbprincipalname', self.principal),
api.env.container_service,
self.suffix)
ldap_uri = ipaldap.get_ldap_uri(self.master_fqdn)
with ipaldap.LDAPClient(ldap_uri,
start_tls=not self.promote,
cacert=paths.IPA_CA_CRT) as remote_ldap:
if self.promote:
remote_ldap.gssapi_bind()
else:
remote_ldap.simple_bind(ipaldap.DIRMAN_DN,
self.dm_password)
replication.wait_for_entry(
remote_ldap,
service_dn,
timeout=api.env.replication_wait_timeout
)
def migrate_to_mod_ssl(self):
"""For upgrades only, migrate from mod_nss to mod_ssl"""
db = certs.CertDB(api.env.realm, nssdir=paths.HTTPD_ALIAS_DIR)
nickname = self.get_mod_nss_nickname()
with tempfile.NamedTemporaryFile() as temp:
pk12_password = ipautil.ipa_generate_password()
pk12_pwdfile = ipautil.write_tmp_file(pk12_password)
db.export_pkcs12(temp.name, pk12_pwdfile.name, nickname)
certs.install_pem_from_p12(temp.name,
pk12_password,
paths.HTTPD_CERT_FILE)
passwd_fname = paths.HTTPD_PASSWD_FILE_FMT.format(
host=api.env.host)
with open(passwd_fname, 'wb') as passwd_file:
os.fchmod(passwd_file.fileno(), 0o600)
passwd_file.write(
ipautil.ipa_generate_password().encode('utf-8'))
certs.install_key_from_p12(temp.name,
pk12_password,
paths.HTTPD_KEY_FILE,
out_passwd_fname=passwd_fname)
self.backup_ssl_conf()
self.configure_mod_ssl_certs()
self.set_mod_ssl_protocol()
self.set_mod_ssl_logdir()
self.__add_include()
self.cert = x509.load_certificate_from_file(paths.HTTPD_CERT_FILE)
if self.ca_is_configured:
db.untrack_server_cert(nickname)
self.start_tracking_certificates()
# remove nickname and CA certs from NSS db
self.disable_nss_conf()
| 27,922
|
Python
|
.py
| 597
| 33.842546
| 84
| 0.583174
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,776
|
schemaupdate.py
|
freeipa_freeipa/ipaserver/install/schemaupdate.py
|
# Authors: Petr Viktorin <pviktori@redhat.com>
#
# Copyright (C) 2013 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import logging
import pprint
import ldap.schema
import ipapython.version
from ipalib import api
from ipalib.constants import FQDN
from ipapython.dn import DN
from ipaserver.install.ldapupdate import connect
SCHEMA_ELEMENT_CLASSES = (
# All schema model classes this tool can modify
# Depends on order, attributes first, then objectclasses
('attributetypes', ldap.schema.models.AttributeType),
('objectclasses', ldap.schema.models.ObjectClass),
)
ORIGIN = 'IPA v%s' % ipapython.version.VERSION
logger = logging.getLogger(__name__)
def _get_oid_dependency_order(schema, cls):
"""
Returns a ordered list of OIDs sets, in order which respects inheritance in LDAP
OIDs in second set, depend on first set, etc.
:return [set(1st-tree-level), set(2nd-tree-level), ...]
"""
top_node = '_'
ordered_oid_groups = []
tree = schema.tree(cls) # tree structure of schema
# remove top_node from tree, it breaks ordering
# we don't need this, tree from file is not consistent
del tree[top_node]
unordered_oids = set(tree.keys())
# split into two groups, parents and child nodes, and iterate until
# child nodes are not empty
while unordered_oids:
parent_nodes = set()
child_nodes = set()
for node in unordered_oids:
if node not in child_nodes:
# if node was child once, must remain as child
parent_nodes.add(node)
for child_oid in tree[node]:
# iterate over all child nodes stored in tree[node] per node
# child node must be removed from parents
parent_nodes.discard(child_oid)
child_nodes.add(child_oid)
ordered_oid_groups.append(parent_nodes) # parents nodes are not dependent
assert len(child_nodes) < len(unordered_oids) # while iteration must be finite
unordered_oids = child_nodes # extract new parent nodes in next iteration
return ordered_oid_groups
def update_schema(schema_files, ldapi=False):
"""Update schema to match the given ldif files
Schema elements present in the LDIF files but missing from the DS schema
are added.
Schema elements that differ between LDIF files and DS schema are updated
to match the LDIF files. The comparison ignores tags that python-ldap's
schema parser does not understand (such as X-ORIGIN).
Extra elements present only in the DS schema are left untouched.
An X-ORIGIN tag containing the current IPA version is added to all new
and updated schema elements.
:param schema_files: List of filenames to update from
:param ldapi: if true, use ldapi to connect
:param dm_password: directory manager password
:return:
True if modifications were made
"""
SCHEMA_ELEMENT_CLASSES_KEYS = [x[0] for x in SCHEMA_ELEMENT_CLASSES]
conn = connect(ldapi=ldapi, realm=api.env.realm, fqdn=FQDN)
old_schema = conn.schema
schema_entry = conn.get_entry(DN(('cn', 'schema')),
SCHEMA_ELEMENT_CLASSES_KEYS)
modified = False
# The exact representation the DS gives us for each OID
# (for debug logging)
old_entries_by_oid = {cls(attr).oid: attr.decode('utf-8')
for (attrname, cls) in SCHEMA_ELEMENT_CLASSES
for attr in schema_entry[attrname]}
for filename in schema_files:
logger.debug('Processing schema LDIF file %s', filename)
url = "file://{}".format(filename)
_dn, new_schema = ldap.schema.subentry.urlfetch(url)
for attrname, cls in SCHEMA_ELEMENT_CLASSES:
for oids_set in _get_oid_dependency_order(new_schema, cls):
# Set of all elements of this class, as strings given by the DS
new_elements = []
for oid in oids_set:
new_obj = new_schema.get_obj(cls, oid)
old_obj = old_schema.get_obj(cls, oid)
# Compare python-ldap's sanitized string representations
# to see if the value is different
# This can give false positives, e.g. with case differences
# in case-insensitive names.
# But, false positives are harmless (and infrequent)
if not old_obj or str(new_obj) != str(old_obj):
# Note: An add will automatically replace any existing
# schema with the same OID. So, we only add.
value = add_x_origin(new_obj)
if old_obj:
old_attr = old_entries_by_oid.get(oid)
logger.debug('Replace: %s', old_attr)
logger.debug(' with: %s', value)
else:
logger.debug('Add: %s', value)
new_elements.append(value.encode('utf-8'))
modified = modified or new_elements
schema_entry[attrname].extend(new_elements)
# we need to iterate schema updates, due to dependencies (SUP)
# schema_entry doesn't respect order of objectclasses/attributes
# so updates must be executed with groups of independent OIDs
if new_elements:
modlist = schema_entry.generate_modlist()
logger.debug("Schema modlist:\n%s",
pprint.pformat(modlist))
conn.update_entry(schema_entry)
if not modified:
logger.debug('Not updating schema')
return modified
def add_x_origin(element):
"""Add X-ORIGIN tag to a schema element if it does not already contain one
"""
# Note that python-ldap drops X-ORIGIN when it parses schema elements,
# so we need to resort to string manipulation
element = str(element)
if 'X-ORIGIN' not in element:
assert element[-2:] == ' )'
element = element[:-1] + "X-ORIGIN '%s' )" % ORIGIN
return element
| 6,884
|
Python
|
.py
| 142
| 38.816901
| 87
| 0.640012
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,777
|
ipa_server_upgrade.py
|
freeipa_freeipa/ipaserver/install/ipa_server_upgrade.py
|
#
# Copyright (C) 2015 FreeIPA Contributors see COPYING for license
#
from __future__ import absolute_import
import logging
from ipalib import api
from ipaplatform.paths import paths
from ipapython import admintool
from ipaserver.install import installutils
from ipaserver.install import server
logger = logging.getLogger(__name__)
class ServerUpgrade(admintool.AdminTool):
log_file_name = paths.IPAUPGRADE_LOG
command_name = 'ipa-server-upgrade'
usage = "%prog [options]"
@classmethod
def add_options(cls, parser):
super(ServerUpgrade, cls).add_options(parser)
parser.add_option("--force", action="store_true",
dest="force", default=False,
help="force upgrade (alias for --skip-version-check)")
parser.add_option("--skip-version-check", action="store_true",
dest="skip_version_check", default=False,
help="skip version check. WARNING: this may break "
"your system")
def validate_options(self):
super(ServerUpgrade, self).validate_options(needs_root=True)
installutils.check_server_configuration()
if self.options.force:
self.options.skip_version_check = True
def setup_logging(self):
super(ServerUpgrade, self).setup_logging(log_file_mode='a')
def run(self):
super(ServerUpgrade, self).run()
api.bootstrap(in_server=True, context='updates', confdir=paths.ETC_IPA)
api.finalize()
try:
server.upgrade_check(self.options)
server.upgrade()
except RuntimeError as e:
raise admintool.ScriptError(str(e))
def handle_error(self, exception):
if not isinstance(exception, SystemExit):
# do not log this message when ipa is not installed
logger.error("IPA server upgrade failed: Inspect "
"/var/log/ipaupgrade.log and run command "
"ipa-server-upgrade manually.")
return installutils.handle_error(exception, self.log_file_name)
| 2,141
|
Python
|
.py
| 48
| 34.833333
| 80
| 0.645161
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,778
|
installutils.py
|
freeipa_freeipa/ipaserver/install/installutils.py
|
# Authors: Simo Sorce <ssorce@redhat.com>
#
# Copyright (C) 2007 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import socket
import getpass
import gssapi
import ldif
import os
import re
import fileinput
import sys
import tempfile
import shutil
import traceback
import textwrap
import warnings
from contextlib import contextmanager
from configparser import ConfigParser as SafeConfigParser
from configparser import NoOptionError
from dns import rrset, rdatatype, rdataclass
from dns.exception import DNSException
import ldap
import six
from ipalib import facts
from ipalib.install.kinit import kinit_password
import ipaplatform
from ipapython import ipautil, admintool, version, ipaldap
from ipapython.admintool import ScriptError, SERVER_NOT_CONFIGURED # noqa: E402
from ipapython.certdb import EXTERNAL_CA_TRUST_FLAGS
from ipalib.constants import FQDN, MAXHOSTNAMELEN
from ipalib.util import validate_hostname
from ipalib import api, errors, x509
from ipalib.install import dnsforwarders
from ipapython.dn import DN
from ipapython.dnsutil import DNSName, resolve
from ipaserver.install import certs, sysupgrade
from ipaplatform import services
from ipaplatform.paths import paths
from ipaplatform.tasks import tasks
if six.PY3:
unicode = str
logger = logging.getLogger(__name__)
class BadHostError(Exception):
pass
class HostLookupError(BadHostError):
pass
class HostForwardLookupError(HostLookupError):
pass
class HostReverseLookupError(HostLookupError):
pass
class HostnameLocalhost(HostLookupError):
pass
class UpgradeVersionError(Exception):
pass
class UpgradePlatformError(UpgradeVersionError):
pass
class UpgradeDataOlderVersionError(UpgradeVersionError):
pass
class UpgradeDataNewerVersionError(UpgradeVersionError):
pass
class UpgradeMissingVersionError(UpgradeVersionError):
pass
class ReplicaConfig:
def __init__(self, top_dir=None):
self.realm_name = ""
self.domain_name = ""
self.master_host_name = ""
self.dirman_password = ""
self.host_name = ""
self.dir = ""
self.subject_base = None
self.setup_ca = False
self.version = 0
self.top_dir = top_dir
subject_base = ipautil.dn_attribute_property('_subject_base')
def get_fqdn():
"""Get fully qualified domain name of current host
:note: used by ansible_freeipa
:deprecated: use ipalib.constants.FQDN
:return: str
"""
return FQDN
def verify_fqdn(host_name, no_host_dns=False, local_hostname=True):
"""
Run fqdn checks for given host:
- test hostname format
- test that hostname is fully qualified
- test forward and reverse hostname DNS lookup
Raises `BadHostError` or derived Exceptions if there is an error
:param host_name: The host name to verify.
:param no_host_dns: If true, skip DNS resolution tests of the host name.
:param local_hostname: If true, run additional checks for local hostnames
"""
if len(host_name.split(".")) < 2 or host_name == "localhost.localdomain":
raise BadHostError("Invalid hostname '%s', must be fully-qualified." % host_name)
if host_name != host_name.lower():
raise BadHostError("Invalid hostname '%s', must be lower-case." % host_name)
if ipautil.valid_ip(host_name):
raise BadHostError("IP address not allowed as a hostname")
try:
# make sure that the host name meets the requirements in ipalib
validate_hostname(host_name, maxlen=MAXHOSTNAMELEN)
except ValueError as e:
raise BadHostError("Invalid hostname '%s', %s" % (host_name, unicode(e)))
if local_hostname:
try:
logger.debug('Check if %s is a primary hostname for localhost',
host_name)
ex_name = socket.gethostbyaddr(host_name)
logger.debug('Primary hostname for localhost: %s', ex_name[0])
if host_name != ex_name[0]:
raise HostLookupError("The host name %s does not match the primary host name %s. "\
"Please check /etc/hosts or DNS name resolution" % (host_name, ex_name[0]))
except socket.gaierror:
pass
except socket.error as e:
logger.debug(
'socket.gethostbyaddr() error: %d: %s',
e.errno, e.strerror)
if no_host_dns:
print("Warning: skipping DNS resolution of host", host_name)
return
try:
logger.debug('Search DNS for %s', host_name)
hostaddr = socket.getaddrinfo(host_name, None)
except Exception as e:
logger.debug('Search failed: %s', e)
raise HostForwardLookupError("Unable to resolve host name, check /etc/hosts or DNS name resolution")
if len(hostaddr) == 0:
raise HostForwardLookupError("Unable to resolve host name, check /etc/hosts or DNS name resolution")
# Verify this is NOT a CNAME
try:
logger.debug('Check if %s is not a CNAME', host_name)
resolve(host_name, rdatatype.CNAME)
raise HostReverseLookupError("The IPA Server Hostname cannot be a CNAME, only A and AAAA names are allowed.")
except DNSException:
pass
# list of verified addresses to prevent multiple searches for the same address
verified = set()
for a in hostaddr:
address = a[4][0]
if address in verified:
continue
if address in ('127.0.0.1', '::1'):
raise HostForwardLookupError("The IPA Server hostname must not resolve to localhost (%s). A routable IP address must be used. Check /etc/hosts to see if %s is an alias for %s" % (address, host_name, address))
try:
logger.debug('Check reverse address of %s', address)
revname = socket.gethostbyaddr(address)[0]
except Exception as e:
logger.debug('Check failed: %s', e)
logger.error(
"Unable to resolve the IP address %s to a host name, "
"check /etc/hosts and DNS name resolution", address)
else:
logger.debug('Found reverse name: %s', revname)
if revname != host_name:
logger.error(
"The host name %s does not match the value %s obtained "
"by reverse lookup on IP address %s", host_name, revname,
address)
verified.add(address)
def record_in_hosts(ip, host_name=None, conf_file=paths.HOSTS):
"""
Search record in /etc/hosts - static table lookup for hostnames
In case of match, returns a tuple of ip address and a list of
hostname aliases
When no record is matched, None is returned
:param ip: IP address
:param host_name: Optional hostname to search
:param conf_file: Optional path to the lookup table
"""
hosts = open(conf_file, 'r').readlines()
for line in hosts:
line = line.rstrip('\n')
fields = line.partition('#')[0].split()
if len(fields) == 0:
continue
try:
hosts_ip = fields[0]
names = fields[1:]
if hosts_ip != ip:
continue
if host_name is not None:
if host_name in names:
return (hosts_ip, names)
else:
return None
return (hosts_ip, names)
except IndexError:
print("Warning: Erroneous line '%s' in %s" % (line, conf_file))
continue
return None
def add_record_to_hosts(ip, host_name, conf_file=paths.HOSTS):
hosts_fd = open(conf_file, 'r+')
hosts_fd.seek(0, 2)
hosts_fd.write(ip+'\t'+host_name+' '+host_name.split('.')[0]+'\n')
hosts_fd.close()
def read_ip_addresses():
ips = []
msg_first = "Please provide the IP address to be used for this host name"
msg_other = "Enter an additional IP address, or press Enter to skip"
while True:
msg = msg_other if ips else msg_first
ip = ipautil.user_input(msg, allow_empty=True)
if not ip:
break
try:
ip_parsed = ipautil.CheckedIPAddress(ip)
except Exception as e:
print("Error: Invalid IP Address %s: %s" % (ip, e))
continue
ips.append(ip_parsed)
return ips
def read_dns_forwarders():
addrs = []
if ipautil.user_input("Do you want to configure DNS forwarders?", True):
if dnsforwarders.detect_resolve1_resolv_conf():
servers = [
str(s) for s in dnsforwarders.get_resolve1_nameservers()
]
print(
"The following DNS servers are configured in "
"systemd-resolved: %s" % ", ".join(servers)
)
else:
servers = [
str(s) for s in dnsforwarders.get_dnspython_nameservers()
]
print(
"Following DNS servers are configured in /etc/resolv.conf: "
"%s" % ", ".join(servers)
)
if ipautil.user_input("Do you want to configure these servers as DNS "
"forwarders?", True):
addrs = servers[:]
print("All detected DNS servers were added. You can enter "
"additional addresses now:")
while True:
ip = ipautil.user_input("Enter an IP address for a DNS forwarder, "
"or press Enter to skip", allow_empty=True)
if not ip:
break
try:
ip_parsed = ipautil.CheckedIPAddress(ip, parse_netmask=False)
except Exception as e:
print("Error: Invalid IP Address %s: %s" % (ip, e))
print("DNS forwarder %s not added." % ip)
continue
if ip_parsed.is_loopback():
print("Error: %s is a loopback address" % ip)
print("DNS forwarder %s not added." % ip)
continue
print("DNS forwarder %s added. You may add another." % ip)
addrs.append(str(ip_parsed))
if not addrs:
print("No DNS forwarders configured")
else:
print("DNS forwarders: %s" % ", ".join(addrs))
return addrs
def get_password(prompt):
if os.isatty(sys.stdin.fileno()):
return getpass.getpass(prompt)
else:
sys.stdout.write(prompt)
sys.stdout.flush()
line = sys.stdin.readline()
if not line:
raise EOFError()
return line.rstrip()
def _read_password_default_validator(password):
if len(password) < 8:
raise ValueError("Password must be at least 8 characters long")
def validate_dm_password_ldap(password):
"""
Validate DM password by attempting to connect to LDAP. api.env has to
contain valid ldap_uri.
"""
client = ipaldap.LDAPClient(api.env.ldap_uri, cacert=paths.IPA_CA_CRT)
try:
client.simple_bind(ipaldap.DIRMAN_DN, password)
except errors.ACIError:
raise ValueError("Invalid Directory Manager password")
else:
client.unbind()
def read_password(user, confirm=True, validate=True, retry=True, validator=_read_password_default_validator):
correct = False
pwd = None
try:
while not correct:
if not retry:
correct = True
pwd = get_password(user + " password: ")
if not pwd:
continue
if validate:
try:
validator(pwd)
except ValueError as e:
print(str(e))
pwd = None
continue
if not confirm:
correct = True
continue
pwd_confirm = get_password("Password (confirm): ")
if pwd != pwd_confirm:
print("Password mismatch!")
print("")
pwd = None
else:
correct = True
except EOFError:
return None
finally:
print("")
return pwd
def update_file(filename, orig, subst):
if os.path.exists(filename):
st = os.stat(filename)
pattern = "%s" % re.escape(orig)
p = re.compile(pattern)
for line in fileinput.input(filename, inplace=1):
if not p.search(line):
sys.stdout.write(line)
else:
sys.stdout.write(p.sub(subst, line))
fileinput.close()
os.chown(filename, st.st_uid, st.st_gid) # reset perms
return 0
else:
print("File %s doesn't exist." % filename)
return 1
def kadmin(command):
return ipautil.run(
[
paths.KADMIN_LOCAL, "-q", command,
"-x", "ipa-setup-override-restrictions"
],
capture_output=True,
capture_error=True
)
def kadmin_addprinc(principal):
return kadmin("addprinc -randkey " + principal)
def kadmin_modprinc(principal, options):
return kadmin("modprinc " + options + " " + principal)
def create_keytab(path, principal):
try:
if os.path.isfile(path):
os.remove(path)
except os.error:
logger.critical("Failed to remove %s.", path)
return kadmin("ktadd -k " + path + " " + principal)
def resolve_ip_addresses_nss(fqdn):
"""Get list of IP addresses for given host (using NSS/getaddrinfo).
:returns:
list of IP addresses as UnsafeIPAddress objects
"""
# it would be good disable search list processing from resolv.conf
# to avoid cases where we get IP address for an totally different name
# but there is no way to do this using getaddrinfo parameters
try:
addrinfos = socket.getaddrinfo(fqdn, None,
socket.AF_UNSPEC, socket.SOCK_STREAM)
except socket.error as ex:
if ex.errno in (socket.EAI_NODATA, socket.EAI_NONAME):
logger.debug('Name %s does not have any address: %s', fqdn, ex)
return set()
else:
raise
# accept whatever we got from NSS
ip_addresses = set()
for ai in addrinfos:
try:
ip = ipautil.UnsafeIPAddress(ai[4][0])
except ValueError as ex:
# getaddinfo may return link-local address other similar oddities
# which are not accepted by CheckedIPAddress - skip these
logger.warning('Name %s resolved to an unacceptable IP '
'address %s: %s', fqdn, ai[4][0], ex)
else:
ip_addresses.add(ip)
logger.debug('Name %s resolved to %s', fqdn, ip_addresses)
return ip_addresses
def resolve_rrsets_nss(fqdn):
"""Get list of dnspython RRsets from NSS"""
if not isinstance(fqdn, DNSName):
fqdn = DNSName.from_text(fqdn)
ip_addresses = resolve_ip_addresses_nss(fqdn.to_text())
# split IP addresses into IPv4 and IPv6
ipv4 = []
ipv6 = []
for ip_address in ip_addresses:
# Skip reserved or link-local addresses
try:
ipautil.CheckedIPAddress(ip_address)
except ValueError as e:
logger.warning("Invalid IP address %s for %s: %s",
ip_address, fqdn, unicode(e))
continue
if ip_address.version == 4:
ipv4.append(str(ip_address))
elif ip_address.version == 6:
ipv6.append(str(ip_address))
# construct an RRset for each address type. TTL is irrelevant
ttl = 3600
rrs = []
if ipv4:
rrs.append(
rrset.from_text_list(
fqdn, ttl, rdataclass.IN, rdatatype.A, ipv4
)
)
if ipv6:
rrs.append(
rrset.from_text_list(
fqdn, ttl, rdataclass.IN, rdatatype.AAAA, ipv6
)
)
return rrs
def get_server_ip_address(host_name, unattended, setup_dns, ip_addresses):
hostaddr = resolve_ip_addresses_nss(host_name)
if hostaddr.intersection(
{ipautil.UnsafeIPAddress(ip) for ip in ['127.0.0.1', '::1']}):
print("The hostname resolves to the localhost address (127.0.0.1/::1)", file=sys.stderr)
print("Please change your /etc/hosts file so that the hostname", file=sys.stderr)
print("resolves to the ip address of your network interface.", file=sys.stderr)
print("The KDC service does not listen on localhost", file=sys.stderr)
print("", file=sys.stderr)
print("Please fix your /etc/hosts file and restart the setup program", file=sys.stderr)
raise ScriptError()
ips = []
if len(hostaddr):
for ha in hostaddr:
try:
ips.append(ipautil.CheckedIPAddress(ha))
except ValueError as e:
logger.warning("Invalid IP address %s for %s: %s",
ha, host_name, unicode(e))
if not ips and not ip_addresses:
if not unattended:
ip_addresses = read_ip_addresses()
if ip_addresses:
if setup_dns:
ips = ip_addresses
else:
# all specified addresses was resolved for this host
if set(ip_addresses) <= set(ips):
ips = ip_addresses
else:
print("Error: the hostname resolves to IP address(es) that are different", file=sys.stderr)
print("from those provided on the command line. Please fix your DNS", file=sys.stderr)
print("or /etc/hosts file and restart the installation.", file=sys.stderr)
print("Provided but not resolved address(es): %s" % \
", ".join(str(ip) for ip in (set(ip_addresses) - set(ips))), file=sys.stderr)
raise ScriptError()
if not ips:
print("No usable IP address provided nor resolved.", file=sys.stderr)
raise ScriptError()
for ip_address in ips:
# check /etc/hosts sanity
hosts_record = record_in_hosts(str(ip_address))
if hosts_record is not None:
primary_host = hosts_record[1][0]
if primary_host != host_name:
print("Error: there is already a record in /etc/hosts for IP address %s:" \
% ip_address, file=sys.stderr)
print(hosts_record[0], " ".join(hosts_record[1]), file=sys.stderr)
print("Chosen hostname %s does not match configured canonical hostname %s" \
% (host_name, primary_host), file=sys.stderr)
print("Please fix your /etc/hosts file and restart the installation.", file=sys.stderr)
raise ScriptError()
return ips
def update_hosts_file(ip_addresses, host_name, fstore):
"""
Update hosts with specified addresses
:param ip_addresses: list of IP addresses
:return:
"""
if not fstore.has_file(paths.HOSTS):
fstore.backup_file(paths.HOSTS)
for ip_address in ip_addresses:
if record_in_hosts(str(ip_address)):
continue
print("Adding [{address!s} {name}] to your /etc/hosts file".format(
address=ip_address, name=host_name))
add_record_to_hosts(str(ip_address), host_name)
def _ensure_nonempty_string(string, message):
if not isinstance(string, str) or not string:
raise ValueError(message)
def gpg_command(extra_args, password=None, workdir=None):
tempdir = tempfile.mkdtemp('', 'ipa-', workdir)
args = [
paths.GPG_AGENT,
'--batch',
'--homedir', tempdir,
'--daemon', paths.GPG2,
'--batch',
'--homedir', tempdir,
'--passphrase-fd', '0',
'--yes',
'--no-tty',
]
args.extend(extra_args)
try:
ipautil.run(args, stdin=password, skip_output=True)
finally:
shutil.rmtree(tempdir, ignore_errors=True)
# uses gpg to compress and encrypt a file
def encrypt_file(source, dest, password, workdir=None):
_ensure_nonempty_string(source, 'Missing Source File')
# stat it so that we get back an exception if it does no t exist
os.stat(source)
_ensure_nonempty_string(dest, 'Missing Destination File')
_ensure_nonempty_string(password, 'Missing Password')
extra_args = [
'-o', dest,
'-c', source,
]
gpg_command(extra_args, password, workdir)
def decrypt_file(source, dest, password, workdir=None):
_ensure_nonempty_string(source, 'Missing Source File')
# stat it so that we get back an exception if it does no t exist
os.stat(source)
_ensure_nonempty_string(dest, 'Missing Destination File')
_ensure_nonempty_string(password, 'Missing Password')
extra_args = [
'-o', dest,
'-d', source,
]
gpg_command(extra_args, password, workdir)
def expand_replica_info(filename, password):
"""
Decrypt and expand a replica installation file into a temporary
location. The caller is responsible to remove this directory.
"""
top_dir = tempfile.mkdtemp("ipa")
tarfile = top_dir+"/files.tar"
dir_path = top_dir + "/realm_info"
decrypt_file(filename, tarfile, password, top_dir)
ipautil.run([paths.TAR, "xf", tarfile, "-C", top_dir])
os.remove(tarfile)
return top_dir, dir_path
def read_replica_info(dir_path, rconfig):
"""
Read the contents of a replica installation file.
rconfig is a ReplicaConfig object
"""
filename = os.path.join(dir_path, "realm_info")
config = SafeConfigParser()
config.read(filename)
rconfig.realm_name = config.get("realm", "realm_name")
rconfig.master_host_name = config.get("realm", "master_host_name")
rconfig.domain_name = config.get("realm", "domain_name")
rconfig.host_name = config.get("realm", "destination_host")
rconfig.subject_base = config.get("realm", "subject_base")
try:
rconfig.version = int(config.get("realm", "version"))
except NoOptionError:
pass
def read_replica_info_dogtag_port(config_dir):
portfile = config_dir + "/dogtag_directory_port.txt"
default_port = 7389
if not os.path.isfile(portfile):
dogtag_master_ds_port = default_port
else:
with open(portfile) as fd:
try:
dogtag_master_ds_port = int(fd.read())
except (ValueError, IOError) as e:
logger.debug('Cannot parse dogtag DS port: %s', e)
logger.debug('Default to %d', default_port)
dogtag_master_ds_port = default_port
return dogtag_master_ds_port
def check_server_configuration():
"""
Check if IPA server is configured on the system.
This is done by checking if there are system restore (uninstall) files
present on the system. Note that this check can only be run with root
privileges.
When IPA is not configured, this function raises a RuntimeError exception.
Most convenient use case for the function is in install tools that require
configured IPA for its function.
"""
if not facts.is_ipa_configured():
raise ScriptError("IPA is not configured on this system.",
rval=SERVER_NOT_CONFIGURED)
def remove_file(filename):
"""Remove a file and log any exceptions raised.
"""
warnings.warn(
"Use 'ipapython.ipautil.remove_file'",
DeprecationWarning,
stacklevel=2
)
return ipautil.remove_file(filename)
def rmtree(path):
"""
Remove a directory structure and log any exceptions raised.
"""
warnings.warn(
"Use 'ipapython.ipautil.rmtree'",
DeprecationWarning,
stacklevel=2
)
return ipautil.rmtree(path)
def is_ipa_configured():
"""
Use the state to determine if IPA has been configured.
"""
warnings.warn(
"Use 'ipalib.facts.is_ipa_configured'",
DeprecationWarning,
stacklevel=2
)
return facts.is_ipa_configured()
def run_script(main_function, operation_name, log_file_name=None,
fail_message=None):
"""Run the given function as a command-line utility
This function:
- Runs the given function
- Formats any errors
- Exits with the appropriate code
:param main_function: Function to call
:param log_file_name: Name of the log file (displayed on unexpected errors)
:param operation_name: Name of the script
:param fail_message: Optional message displayed on failure
"""
logger.info('Starting script: %s', operation_name)
try:
try:
return_value = main_function()
except BaseException as e:
if (
isinstance(e, SystemExit) and
(e.code is None or e.code == 0) # pylint: disable=no-member
):
# Not an error after all
logger.info('The %s command was successful', operation_name)
else:
# Log at the DEBUG level, which is not output to the console
# (unless in debug/verbose mode), but is written to a logfile
# if one is open.
tb = sys.exc_info()[2]
logger.debug("%s", '\n'.join(traceback.format_tb(tb)))
logger.debug('The %s command failed, exception: %s: %s',
operation_name, type(e).__name__, e)
if fail_message and not isinstance(e, SystemExit):
print(fail_message)
raise
else:
if return_value:
logger.info('The %s command failed, return value %s',
operation_name, return_value)
else:
logger.info('The %s command was successful', operation_name)
sys.exit(return_value)
except BaseException as error:
message, exitcode = handle_error(error, log_file_name)
if message:
print(message, file=sys.stderr)
sys.exit(exitcode)
def handle_error(error, log_file_name=None):
"""Handle specific errors. Returns a message and return code"""
if isinstance(error, SystemExit):
if isinstance(error.code, int):
return None, error.code
elif error.code is None:
return None, 0
else:
return str(error), 1
if isinstance(error, RuntimeError):
return str(error), 1
if isinstance(error, KeyboardInterrupt):
return "Cancelled.", 1
if isinstance(error, admintool.ScriptError):
return error.msg, error.rval
if isinstance(error, socket.error):
return error, 1
if isinstance(error, errors.ACIError):
return str(error), 1
if isinstance(error, ldap.INVALID_CREDENTIALS):
return "Invalid password", 1
if isinstance(error, ldap.INSUFFICIENT_ACCESS):
return "Insufficient access", 1
if isinstance(error, ldap.LOCAL_ERROR):
return error.args[0].get('info', ''), 1
if isinstance(error, ldap.SERVER_DOWN):
return error.args[0]['desc'], 1
if isinstance(error, ldap.LDAPError):
message = 'LDAP error: %s\n%s\n%s' % (
type(error).__name__,
error.args[0]['desc'].strip(),
error.args[0].get('info', '').strip()
)
return message, 1
if isinstance(error, errors.LDAPError):
return "An error occurred while performing operations: %s" % error, 1
if isinstance(error, HostnameLocalhost):
message = textwrap.dedent("""
The hostname resolves to the localhost address (127.0.0.1/::1)
Please change your /etc/hosts file so that the hostname
resolves to the ip address of your network interface.
Please fix your /etc/hosts file and restart the setup program
""").strip()
return message, 1
if log_file_name:
message = "Unexpected error - see %s for details:" % log_file_name
else:
message = "Unexpected error"
message += '\n%s: %s' % (type(error).__name__, error)
return message, 1
def load_pkcs12(cert_files, key_password, key_nickname, ca_cert_files,
host_name=None, realm_name=None):
"""
Load and verify server certificate and private key from multiple files
The files are accepted in PEM and DER certificate, PKCS#7 certificate
chain, PKCS#8 and raw private key and PKCS#12 formats.
:param cert_files: Names of server certificate and private key files to
import
:param key_password: Password to decrypt private keys
:param key_nickname: Nickname of the private key to import from PKCS#12
files
:param ca_cert_files: Names of CA certificate files to import
:param host_name: Host name of the server
:returns: Temporary PKCS#12 file with the server certificate, private key
and CA certificate chain, password to unlock the PKCS#12 file and
the CA certificate of the CA that issued the server certificate
"""
with certs.NSSDatabase() as nssdb:
nssdb.create_db()
try:
nssdb.import_files(cert_files, True, key_password, key_nickname)
except RuntimeError as e:
raise ScriptError(str(e))
if ca_cert_files:
try:
nssdb.import_files(ca_cert_files)
except RuntimeError as e:
raise ScriptError(str(e))
for nickname, trust_flags in nssdb.list_certs():
if trust_flags.has_key:
key_nickname = nickname
continue
nssdb.trust_root_cert(nickname, EXTERNAL_CA_TRUST_FLAGS)
# Check we have the whole cert chain & the CA is in it
trust_chain = list(reversed(nssdb.get_trust_chain(key_nickname)))
ca_cert = None
for nickname in trust_chain[1:]:
cert = nssdb.get_cert(nickname)
if ca_cert is None:
ca_cert = cert
subject = DN(cert.subject)
issuer = DN(cert.issuer)
if subject == issuer:
break
else:
raise ScriptError(
"The full certificate chain is not present in %s" %
(", ".join(cert_files)))
# verify CA validity and pathlen. The trust_chain list is in reverse
# order. trust_chain[1] is the first intermediate CA cert and must
# have pathlen >= 0.
for minpathlen, nickname in enumerate(trust_chain[1:], start=0):
try:
nssdb.verify_ca_cert_validity(nickname, minpathlen)
except ValueError as e:
raise ScriptError(
"CA certificate %s in %s is not valid: %s" %
(subject, ", ".join(cert_files), e))
if host_name is not None:
try:
nssdb.verify_server_cert_validity(key_nickname, host_name)
except ValueError as e:
raise ScriptError(
"The server certificate in %s is not valid: %s" %
(", ".join(cert_files), e))
if realm_name is not None:
try:
nssdb.verify_kdc_cert_validity(key_nickname, realm_name)
except ValueError as e:
raise ScriptError(
"The KDC certificate in %s is not valid: %s" %
(", ".join(cert_files), e))
out_file = tempfile.NamedTemporaryFile()
out_password = ipautil.ipa_generate_password()
out_pwdfile = ipautil.write_tmp_file(out_password)
args = [
paths.PK12UTIL,
'-o', out_file.name,
'-n', key_nickname,
'-d', nssdb.secdir,
'-k', nssdb.pwd_file,
'-w', out_pwdfile.name,
]
ipautil.run(args)
return out_file, out_password, ca_cert
@contextmanager
def stopped_service(service, instance_name=""):
"""
Ensure that the specified service is stopped while the commands within
this context are executed.
Service is started at the end of the execution.
"""
if instance_name:
log_instance_name = "@{instance}".format(instance=instance_name)
else:
log_instance_name = ""
logger.debug('Ensuring that service %s%s is not running while '
'the next set of commands is being executed.', service,
log_instance_name)
service_obj = services.service(service, api)
# Figure out if the service is running, if not, yield
if not service_obj.is_running(instance_name):
logger.debug('Service %s%s is not running, continue.', service,
log_instance_name)
yield
else:
# Stop the service, do the required stuff and start it again
logger.debug('Stopping %s%s.', service, log_instance_name)
service_obj.stop(instance_name)
try:
yield
finally:
logger.debug('Starting %s%s.', service, log_instance_name)
service_obj.start(instance_name)
def check_entropy():
"""
Checks if the system has enough entropy, if not, displays warning message
"""
from . import service # pylint: disable=cyclic-import
try:
with open(paths.ENTROPY_AVAIL, 'r') as efname:
if int(efname.read()) < 200:
emsg = 'WARNING: Your system is running out of entropy, ' \
'you may experience long delays'
service.print_msg(emsg)
logger.debug("%s", emsg)
except IOError as e:
logger.debug(
"Could not open %s: %s", paths.ENTROPY_AVAIL, e)
except ValueError as e:
logger.debug("Invalid value in %s %s", paths.ENTROPY_AVAIL, e)
def is_hidepid():
"""Determine if /proc is mounted with hidepid=1/2 option"""
try:
os.lstat('/proc/1/stat')
except (FileNotFoundError, PermissionError):
return True
return False
def in_container():
"""Determine if we're running in a container.
virt-what will return the underlying machine information so
isn't usable here.
systemd-detect-virt requires the whole systemd subsystem which
isn't a reasonable require in a container.
"""
data_sched = None
if not is_hidepid():
try:
with open('/proc/1/sched', 'r') as sched:
data_sched = sched.readline()
except (FileNotFoundError, PermissionError):
pass
data_cgroup = None
try:
with open('/proc/self/cgroup', 'r') as cgroup:
data_cgroup = cgroup.readline()
except (FileNotFoundError, PermissionError):
pass
checks = [
data_sched and data_sched.split()[0] not in ('systemd', 'init',),
data_cgroup and data_cgroup.split()[0] in ('libpod'),
os.path.exists('/.dockerenv'),
os.path.exists('/.dockerinit'),
os.getenv('container', None) is not None
]
return any(checks)
def check_available_memory(ca=False):
"""
Raise an exception if there isn't enough memory for IPA to install.
In a container then psutil will most likely return the host memory
and not the container. If in a container use the cgroup values which
also may not be constrained but it's the best approximation.
2GB is the rule-of-thumb minimum but the server is installable with
much less.
The CA uses ~150MB in a fresh install.
Use Kb instead of KiB to leave a bit of slush for the OS
"""
available = None
minimum_suggested = 1000 * 1000 * 1000 * 1.2
if not ca:
minimum_suggested -= 150 * 1000 * 1000
if in_container():
logger.debug("container detected")
# cgroup v1
if os.path.exists(
'/sys/fs/cgroup/memory/memory.limit_in_bytes'
) and os.path.exists('/sys/fs/cgroup/memory/memory.usage_in_bytes'):
logger.debug("cgroup v1")
limit_file = '/sys/fs/cgroup/memory/memory.limit_in_bytes'
usage_file = '/sys/fs/cgroup/memory/memory.usage_in_bytes'
# cgroup v2
elif os.path.exists(
'/sys/fs/cgroup/memory.current'
) and os.path.exists('/sys/fs/cgroup/memory.max'):
logger.debug("cgroup v2")
limit_file = '/sys/fs/cgroup/memory.max'
usage_file = '/sys/fs/cgroup/memory.current'
else:
raise ScriptError(
"Unable to determine the amount of available RAM"
)
with open(limit_file) as fd:
limit = fd.readline().rstrip()
with open(usage_file) as fd:
used = int(fd.readline())
# In cgroup v2 if there is no limit on the container then
# the maximum host memory is available. Fall back to the psutil
# method for determining availability.
if limit != 'max':
available = int(limit) - used
logger.debug("Max RAM %s, used RAM %s", limit, used)
if available is None:
# delay import of psutil. On import it opens files in /proc and
# can trigger a SELinux violation.
import psutil
available = psutil.virtual_memory().available
logger.debug(psutil.virtual_memory())
if available is None:
raise ScriptError(
"Unable to determine the amount of available RAM"
)
logger.debug("Available memory is %sB", available)
if available < minimum_suggested:
raise ScriptError(
"Less than the minimum 1.2GB of RAM is available, "
"%.2fGB available. Use --skip-mem-check to suppress this check."
% (available / (1024 * 1024 * 1024))
)
def load_external_cert(files, ca_subject):
"""
Load and verify external CA certificate chain from multiple files.
The files are accepted in PEM and DER certificate and PKCS#7 certificate
chain formats.
:param files: Names of files to import
:param ca_subject: IPA CA subject DN
:returns: Temporary file with the IPA CA certificate and temporary file
with the external CA certificate chain
"""
with certs.NSSDatabase() as nssdb:
nssdb.create_db()
try:
nssdb.import_files(files)
except RuntimeError as e:
raise ScriptError(str(e))
ca_subject = DN(ca_subject)
ca_nickname = None
cache = {}
for nickname, _trust_flags in nssdb.list_certs():
cert = nssdb.get_cert(nickname)
subject = DN(cert.subject)
issuer = DN(cert.issuer)
cache[nickname] = (cert, subject, issuer)
if subject == ca_subject:
ca_nickname = nickname
nssdb.trust_root_cert(nickname, EXTERNAL_CA_TRUST_FLAGS)
if ca_nickname is None:
raise ScriptError(
"IPA CA certificate with subject '%s' "
"was not found in %s." % (ca_subject, (",".join(files))))
trust_chain = list(reversed(nssdb.get_trust_chain(ca_nickname)))
ca_cert_chain = []
for nickname in trust_chain:
cert, subject, issuer = cache[nickname]
ca_cert_chain.append(cert)
if subject == issuer:
break
else:
raise ScriptError(
"CA certificate chain in %s is incomplete: "
"missing certificate with subject '%s'" %
(", ".join(files), issuer))
# verify CA validity and pathlen. The trust_chain list is in reverse
# order. The first entry is the signed IPA-CA and must have a
# pathlen of >= 0.
for minpathlen, nickname in enumerate(trust_chain, start=0):
try:
nssdb.verify_ca_cert_validity(nickname, minpathlen)
except ValueError as e:
cert, subject, issuer = cache[nickname]
raise ScriptError(
"CA certificate %s in %s is not valid: %s" %
(subject, ", ".join(files), e))
cert_file = tempfile.NamedTemporaryFile()
cert_file.write(ca_cert_chain[0].public_bytes(x509.Encoding.PEM) + b'\n')
cert_file.flush()
ca_file = tempfile.NamedTemporaryFile()
x509.write_certificate_list(
ca_cert_chain[1:],
ca_file.name,
mode=0o644
)
ca_file.flush()
return cert_file, ca_file
def get_current_platform():
"""Get current platform (without container suffix)
'fedora' and 'fedora_container' are considered the same platform. This
normalization ensures that older freeipa-container images can be upgraded
without a platform mismatch.
"""
platform = ipaplatform.NAME
if platform.endswith('_container'):
platform = platform[:-10]
return platform
def store_version():
"""Store current data version and platform. This is required for check if
upgrade is required.
"""
sysupgrade.set_upgrade_state('ipa', 'data_version',
version.VENDOR_VERSION)
sysupgrade.set_upgrade_state('ipa', 'platform', get_current_platform())
def check_version():
"""
:raise UpgradePlatformError: if platform is not the same
:raise UpgradeDataOlderVersionError: if data needs to be upgraded
:raise UpgradeDataNewerVersionError: older version of IPA was detected than data
:raise UpgradeMissingVersionError: if platform or version is missing
"""
state_platform = sysupgrade.get_upgrade_state('ipa', 'platform')
current_platform = get_current_platform()
if state_platform is not None:
if state_platform != current_platform:
raise UpgradePlatformError(
"platform mismatch (expected '%s', current '%s')" % (
state_platform, current_platform
)
)
else:
raise UpgradeMissingVersionError("no platform stored")
data_version = sysupgrade.get_upgrade_state('ipa', 'data_version')
if data_version is not None:
parsed_data_ver = tasks.parse_ipa_version(data_version)
parsed_ipa_ver = tasks.parse_ipa_version(version.VENDOR_VERSION)
if parsed_data_ver < parsed_ipa_ver:
raise UpgradeDataOlderVersionError(
"data needs to be upgraded (expected version '%s', current "
"version '%s')" % (version.VENDOR_VERSION, data_version)
)
elif parsed_data_ver > parsed_ipa_ver:
raise UpgradeDataNewerVersionError(
"data are in newer version than IPA (data version '%s', IPA "
"version '%s')" % (data_version, version.VENDOR_VERSION)
)
else:
raise UpgradeMissingVersionError("no data_version stored")
def realm_to_serverid(realm_name):
warnings.warn(
"Use 'ipapython.ipaldap.realm_to_serverid'",
DeprecationWarning,
stacklevel=2
)
return ipaldap.realm_to_serverid(realm_name)
def realm_to_ldapi_uri(realm_name):
warnings.warn(
"Use 'ipapython.ipaldap.realm_to_ldapi_uri'",
DeprecationWarning,
stacklevel=2
)
return ipaldap.realm_to_ldapi_uri(realm_name)
def check_creds(options, realm_name):
# Check if ccache is available
default_cred = None
try:
logger.debug('KRB5CCNAME set to %s',
os.environ.get('KRB5CCNAME', None))
# get default creds, will raise if none found
default_cred = gssapi.creds.Credentials()
principal = str(default_cred.name)
except gssapi.raw.misc.GSSError as e:
logger.debug('Failed to find default ccache: %s', e)
principal = None
# Check if the principal matches the requested one (if any)
if principal is not None and options.principal is not None:
op = options.principal
if op.find('@') == -1:
op = '%s@%s' % (op, realm_name)
if principal != op:
logger.debug('Specified principal %s does not match '
'available credentials (%s)',
options.principal, principal)
principal = None
if principal is None:
(ccache_fd, ccache_name) = tempfile.mkstemp()
os.close(ccache_fd)
options.created_ccache_file = ccache_name
if options.principal is not None:
principal = options.principal
else:
principal = 'admin'
stdin = None
if principal.find('@') == -1:
principal = '%s@%s' % (principal, realm_name)
if options.admin_password is not None:
stdin = options.admin_password
else:
if not options.unattended:
try:
stdin = getpass.getpass("Password for %s: " % principal)
except EOFError:
stdin = None
if not stdin:
logger.error(
"Password must be provided for %s.", principal)
raise ScriptError("Missing password for %s" % principal)
else:
if sys.stdin.isatty():
logger.error("Password must be provided in "
"non-interactive mode.")
logger.info("This can be done via "
"echo password | ipa-client-install "
"... or with the -w option.")
raise ScriptError("Missing password for %s" % principal)
else:
stdin = sys.stdin.readline()
# set options.admin_password for future use
options.admin_password = stdin
try:
kinit_password(principal, stdin, ccache_name)
except RuntimeError as e:
logger.error("Kerberos authentication failed: %s", e)
raise ScriptError("Invalid credentials: %s" % e)
os.environ['KRB5CCNAME'] = ccache_name
class ModifyLDIF(ldif.LDIFParser):
"""
Allows to modify LDIF file.
Operations keep the order in which were specified per DN.
Warning: only modifications of existing DNs are supported
"""
def __init__(self, input_file, output_file):
"""
:param input_file: an LDIF
:param output_file: an LDIF file
"""
ldif.LDIFParser.__init__(self, input_file)
self.writer = ldif.LDIFWriter(output_file)
self.dn_updated = set()
self.modifications = {} # keep modify operations in original order
def add_value(self, dn, attr, values):
"""
Add value to LDIF.
:param dn: DN of entry (must exists)
:param attr: attribute name
:param value: value to be added
"""
assert isinstance(values, list)
self.modifications.setdefault(dn, []).append(
dict(
op="add",
attr=attr,
values=values,
)
)
def remove_value(self, dn, attr, values=None):
"""
Remove value from LDIF.
:param dn: DN of entry
:param attr: attribute name
:param value: value to be removed, if value is None, attribute will
be removed
"""
assert values is None or isinstance(values, list)
self.modifications.setdefault(dn, []).append(
dict(
op="del",
attr=attr,
values=values,
)
)
def replace_value(self, dn, attr, values):
"""
Replace values in LDIF with new value.
:param dn: DN of entry
:param attr: attribute name
:param value: new value for atribute
"""
assert isinstance(values, list)
self.remove_value(dn, attr)
self.add_value(dn, attr, values)
def modifications_from_ldif(self, ldif_file):
"""
Parse ldif file. Default operation is add, only changetypes "add"
and "modify" are supported.
:param ldif_file: an opened file for read
:raises: ValueError
"""
parser = ldif.LDIFRecordList(ldif_file)
parser.parse()
last_dn = None
for dn, entry in parser.all_records:
if dn is None:
# ldif parser return None, if records belong to previous DN
dn = last_dn
else:
last_dn = dn
if "replace" in entry:
for attr in entry["replace"]:
attr = attr.decode('utf-8')
try:
self.replace_value(dn, attr, entry[attr])
except KeyError:
raise ValueError("replace: {dn}, {attr}: values are "
"missing".format(dn=dn, attr=attr))
elif "delete" in entry:
for attr in entry["delete"]:
attr = attr.decode('utf-8')
self.remove_value(dn, attr, entry.get(attr, None))
elif "add" in entry:
for attr in entry["add"]:
attr = attr.decode('utf-8')
try:
self.replace_value(dn, attr, entry[attr])
except KeyError:
raise ValueError("add: {dn}, {attr}: values are "
"missing".format(dn=dn, attr=attr))
else:
logger.error("Ignoring entry: %s : only modifications "
"are allowed (missing \"changetype: "
"modify\")", dn)
def handle(self, dn, entry):
if dn in self.modifications:
self.dn_updated.add(dn)
for mod in self.modifications.get(dn, []):
attr_name = mod["attr"]
values = mod["values"]
if mod["op"] == "del":
# delete
attribute = entry.setdefault(attr_name, [])
if values is None:
attribute = []
else:
attribute = [v for v in attribute if v not in values]
if not attribute: # empty
del entry[attr_name]
elif mod["op"] == "add":
# add
attribute = entry.setdefault(attr_name, [])
attribute.extend([v for v in values if v not in attribute])
else:
assert False, "Unknown operation: %r" % mod["op"]
self.writer.unparse(dn, entry)
def parse(self):
ldif.LDIFParser.parse(self)
# check if there are any remaining modifications
remaining_changes = set(self.modifications.keys()) - self.dn_updated
for dn in remaining_changes:
logger.error(
"DN: %s does not exists or haven't been updated", dn)
def remove_keytab(keytab_path):
"""
Remove Kerberos keytab and issue a warning if the procedure fails
:param keytab_path: path to the keytab file
"""
warnings.warn(
"Use 'ipapython.ipautil.remove_keytab'",
DeprecationWarning,
stacklevel=2
)
return ipautil.remove_keytab(keytab_path)
def remove_ccache(ccache_path=None, run_as=None):
"""
remove Kerberos credential cache, essentially a wrapper around kdestroy.
:param ccache_path: path to the ccache file
:param run_as: run kdestroy as this user
"""
warnings.warn(
"Use 'ipapython.ipautil.remove_ccache'",
DeprecationWarning,
stacklevel=2
)
return ipautil.remove_ccache(ccache_path=ccache_path, run_as=run_as)
def restart_dirsrv(instance_name="", capture_output=True):
"""
Restart Directory server and perform ldap reconnect.
"""
api.Backend.ldap2.disconnect()
services.knownservices.dirsrv.restart(instance_name=instance_name,
capture_output=capture_output,
wait=True, ldapi=True)
api.Backend.ldap2.connect()
def default_subject_base(realm_name):
return DN(('O', realm_name))
def default_ca_subject_dn(subject_base):
return DN(('CN', 'Certificate Authority'), subject_base)
def validate_mask():
mask = os.umask(0)
os.umask(mask)
mask_str = None
if mask & 0b111101101 > 0:
mask_str = "{:04o}".format(mask)
return mask_str
def get_replication_plugin_name(dirsrv_get_entry):
# Support renaming of a replication plugin in 389-ds
# IPA topology plugin depends on the replication plugin but
# 389-ds cannot handle older alias querying in the plugin
# configuration with 'nsslapd-plugin-depends-on-named: ..' attribute
#
# dirsrv_get_entry: function (dn, attrs) can return different types
# depending on the function. The 389-ds connection returns bytes
# and ipaldap will return a list of string values.
try:
entry = dirsrv_get_entry(
DN('cn=Multisupplier Replication Plugin,cn=plugins,cn=config'),
['cn'])
except (errors.NotFound, ldap.NO_SUCH_OBJECT):
return 'Multimaster Replication Plugin'
else:
cn = entry['cn']
if isinstance(cn, list):
return cn[0]
elif isinstance(cn, bytes):
return cn.decode('utf-8')
else:
raise RuntimeError(
'LDAP query returned unknown type for cn %s: %s' %
(cn, type(cn))
)
| 54,233
|
Python
|
.py
| 1,345
| 30.810409
| 220
| 0.607926
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,779
|
ipa_idrange_fix.py
|
freeipa_freeipa/ipaserver/install/ipa_idrange_fix.py
|
"""Tool to analyze and fix IPA ID ranges"""
#
# Copyright (C) 2024 FreeIPA Contributors see COPYING for license
#
import logging
import ldap
from ipalib import api, errors
from ipapython.admintool import AdminTool
from ipapython.dn import DN
from ipapython import ipautil
from typing import List, Tuple
logger = logging.getLogger(__name__)
class IDRange:
"""Class for ID range entity"""
def __init__(self):
self.last_id: int = None
self.last_base_rid: int = None
self.last_secondary_rid: int = None
self.name: str = None
self.size: int = None
self.first_id: int = None
self.base_rid: int = None
self.secondary_base_rid: int = None
self.type: str = None
self.suffix: str = None
self.dn: str = None
def _count(self) -> None:
"""Function to calculate last IDs for the range"""
self.last_id = self.first_id + self.size - 1
if self.type == "ipa-local":
self.last_base_rid = (
self.base_rid + self.size
if self.base_rid is not None
else None
)
self.last_secondary_rid = (
self.secondary_base_rid + self.size
if self.secondary_base_rid is not None
else None
)
def __repr__(self):
return (
f"IDRange(name='{self.name}', "
f"type={self.type}, "
f"size={self.size}, "
f"first_id={self.first_id}, "
f"base_rid={self.base_rid}, "
f"secondary_base_rid={self.secondary_base_rid})"
)
def __eq__(self, other):
return self.first_id == other.first_id
class IDentity:
"""A generic class for ID entity - users or groups"""
def __init__(self, **kwargs):
self.dn: str = kwargs.get('dn')
self.name: str = kwargs.get('name')
self.user: str = kwargs.get('user')
self.number: int = kwargs.get('number')
def __str__(self):
if self.user:
return (f"user '{self.name}', uid={self.number}")
return (f"group '{self.name}', gid={self.number}")
def debug(self):
if self.user:
return (
f"user(username='{self.name}', "
f"uid={self.number}, "
f"{self.dn})"
)
return (
f"group(groupname='{self.name}', "
f"gid={self.number}, "
f"{self.dn})"
)
def __eq__(self, other):
return self.number == other.number and self.user == other.user
class IPAIDRangeFix(AdminTool):
"""Tool to analyze and fix IPA ID ranges"""
command_name = "ipa-idrange-fix"
log_file_name = "/var/log/ipa-idrange-fix.log"
usage = "%prog"
description = "Analyze and fix IPA ID ranges"
@classmethod
def add_options(cls, parser, debug_option=False):
super(IPAIDRangeFix, cls).add_options(parser)
parser.add_option(
"--ridoffset",
dest="ridoffset",
type=int,
default=100000,
metavar=100000,
help="Offset for a next base RID from previous RID range. \
Needed for future range size expansions. Has to be > 0",
)
parser.add_option(
"--rangegap",
dest="rangegap",
type=int,
default=200000,
metavar=200000,
help="Threshold for a gap between out-of-range IDs to be \
considered a different range. Has to be > 0",
)
parser.add_option(
"--minrange",
dest="minrange",
type=int,
default=10,
metavar=10,
help="Minimal considered range size for out-of-range IDs.\
All ranges with amount of IDs lower than this number will be discarded and \
IDs will be listed to be moved manually. Has to be > 1",
)
parser.add_option(
"--allowunder1000",
dest="allowunder1000",
action="store_true",
default=False,
help="Allow idranges to start below 1000. Be careful to not \
overlap IPA users/groups with existing system-local ones!",
)
parser.add_option(
"--norounding",
dest="norounding",
action="store_true",
default=False,
help="Disable IDrange rounding attempt in order to get ranges \
exactly covering just IDs provided",
)
parser.add_option(
"--unattended",
dest="unattended",
action="store_true",
default=False,
help="Automatically fix all range issues found without asking \
for confirmation",
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.realm: str = None
self.suffix: DN = None
self.proposals_rid: List[IDRange] = []
self.proposals_new: List[IDRange] = []
self.outliers: List[IDentity] = []
self.under1000: List[IDentity] = []
self.id_ranges: List[IDRange] = []
def validate_options(self, needs_root=True):
super().validate_options(needs_root)
def run(self):
api.bootstrap(in_server=True)
api.finalize()
self.realm = api.env.realm
self.suffix = ipautil.realm_to_suffix(self.realm)
try:
api.Backend.ldap2.connect()
# Reading range data
self.id_ranges = read_ranges(self.suffix)
# Evaluating existing ranges, if something is off, exit
if self.evaluate_ranges() != 0:
return 1
# reading out of range IDs
ids_out_of_range = read_outofrange_identities(
self.suffix, self.id_ranges
)
# Evaluating out of range IDs
self.evaluate_identities(ids_out_of_range)
# Print the proposals
self.print_intentions()
# If there are no proposals, we have nothing to do, exiting
if (len(self.proposals_rid) == 0
and len(self.proposals_new) == 0):
logger.info("\nNo changes proposed, nothing to do.")
return 0
logger.info("\nID ranges table after proposed changes:")
draw_ascii_table(self.id_ranges)
if self.options.unattended:
logger.info(
"Unattended mode, proceeding with applying changes!"
)
else:
response = ipautil.user_input('Enter "yes" to proceed')
if response.lower() != "yes":
logger.info("Not proceeding.")
return 0
logger.info("Proceeding.")
# Applying changes
for id_range in self.proposals_rid:
apply_ridbases(id_range)
for id_range in self.proposals_new:
create_range(id_range)
logger.info("All changes applied successfully!")
finally:
if api.Backend.ldap2.isconnected():
api.Backend.ldap2.disconnect()
return 0
def evaluate_ranges(self) -> int:
"""Function to evaluate existing ID ranges"""
if len(self.id_ranges) == 0:
logger.error("No ID ranges found!")
return 1
draw_ascii_table(self.id_ranges)
if not ranges_overlap_check(self.id_ranges):
logger.error(
"Ranges overlap detected, cannot proceed! Please adjust \
existing ranges manually."
)
return 1
# Checking RID bases for existing ranges
id_ranges_nobase = get_ranges_no_base(self.id_ranges)
if len(id_ranges_nobase) > 0:
logger.info(
"Found %s ranges without base RIDs", len(id_ranges_nobase)
)
for id_range in id_ranges_nobase:
logger.debug(
"Range '%s' has RID base %s and secondary RID base %s",
id_range.name,
id_range.base_rid,
id_range.secondary_base_rid,
)
propose_rid_ranges(
self.id_ranges,
self.options.ridoffset,
self.proposals_rid
)
else:
logger.info(
"All ID ranges have base RIDs set, RID adjustments are \
not needed."
)
return 0
def evaluate_identities(self, ids_out_of_range: List[IDentity]) -> None:
"""Function to evaluate out of range IDs"""
if len(ids_out_of_range) == 0:
logger.info("No out of range IDs found!")
else:
logger.info(
"Found overall %s IDs out of existing ID ranges.\n",
len(ids_out_of_range),
)
# ruling out IDs under 1000 if flag is not set
if not self.options.allowunder1000:
self.under1000, ids_out_of_range = separate_under1000(
ids_out_of_range
)
if len(self.under1000) > 0:
logger.info(
"Found IDs under 1000, which is not recommeneded \
(if you definitely need ranges proposed for those, use --allowunder1000):"
)
for identity in self.under1000:
logger.info("%s", identity)
# Get initial divide of IDs into groups
groups = group_identities_by_threshold(
ids_out_of_range, self.options.rangegap
)
# Get outliers from too small groups and clean groups for
# further processing
self.outliers, cleangroups = separate_ranges_and_outliers(
groups, self.options.minrange
)
# Print the outliers, they have to be moved manually
if len(self.outliers) > 0:
logger.info(
"\nIdentities that don't fit the criteria to get a new "
"range found! Current attributes:\n"
"Minimal range size: %s\n"
"Maximum gap between IDs: %s\n"
"Try adjusting --minrange, --rangegap or move the "
"following identities into already existing ranges:",
self.options.minrange,
self.options.rangegap
)
for identity in self.outliers:
logger.info("%s", identity)
if len(cleangroups) > 0:
# Get IDrange name base
basename = get_rangename_base(self.id_ranges)
# Create proposals for new ranges from groups
for group in cleangroups:
newrange = propose_range(
group,
self.id_ranges,
self.options.ridoffset,
basename,
self.options.norounding,
self.options.allowunder1000
)
if newrange is not None:
self.proposals_new.append(newrange)
self.id_ranges.append(newrange)
self.id_ranges.sort(key=lambda x: x.first_id)
else:
logger.info(
"\nNo IDs fit the criteria for a new ID range to propose!"
)
def print_intentions(self) -> None:
"""Function to print out the summary of the proposed changes"""
logger.info("\nSummary:")
if len(self.outliers) > 0:
logger.info("Outlier IDs that are too far away to get a range:")
for identity in self.outliers:
logger.info("%s", identity)
if len(self.under1000) > 0:
if self.options.allowunder1000:
logger.info("IDs under 1000 were treated like normal IDs.")
else:
logger.info("IDs under 1000:")
for identity in self.under1000:
logger.info("%s", identity)
else:
logger.info("No IDs under 1000 found.")
if len(self.proposals_rid) > 0:
logger.info("Proposed changes to existing ranges:")
for id_range in self.proposals_rid:
logger.info(
"Range '%s' - base RID: %s, secondary base RID: %s",
id_range.name,
id_range.base_rid,
id_range.secondary_base_rid,
)
else:
logger.info("No changes proposed for existing ranges.")
if len(self.proposals_new) > 0:
logger.info("Proposed new ranges:")
for id_range in self.proposals_new:
logger.info("%s", id_range)
else:
logger.info("No new ranges proposed.")
# Working with output
# region
def draw_ascii_table(id_ranges: List[IDRange], stdout: bool = False) -> None:
"""Function to draw a table with ID ranges in ASCII"""
table: str = "\n"
# Calculate the maximum width required for each column using column names
max_widths = {
column: max(
len(str(column)),
max(
(
len(str(getattr(id_range, column)))
if getattr(id_range, column) is not None
else 0
)
for id_range in id_ranges
),
)
for column in [
"name",
"type",
"size",
"first_id",
"last_id",
"base_rid",
"last_base_rid",
"secondary_base_rid",
"last_secondary_rid",
]
}
# Draw the table header
header = "| "
for column, width in max_widths.items():
header += f"{column.ljust(width)} | "
horizontal_line = "-" * (len(header) - 1)
table += horizontal_line + "\n"
table += header + "\n"
table += horizontal_line + "\n"
# Draw the table rows
for id_range in id_ranges:
row = "| "
for column, width in max_widths.items():
value = getattr(id_range, column)
if value is not None:
row += f"{str(value).rjust(width)} | "
else:
# Adding the separator
row += " " * (width + 1) + "| "
table += row + "\n"
table += horizontal_line + "\n"
if stdout:
print(table)
else:
logger.info(table)
# endregion
# Reading from LDAP
# region
def read_ranges(suffix) -> List[IDRange]:
"""Function to read ID ranges from LDAP"""
id_ranges: IDRange = []
try:
ranges = api.Backend.ldap2.get_entries(
DN(api.env.container_ranges, suffix),
ldap.SCOPE_ONELEVEL,
"(objectclass=ipaIDRange)",
)
except errors.NotFound:
logger.error("LDAPError: No ranges found!")
except errors.ExecutionError as e:
logger.error("Exception while reading users: %s", e)
else:
for entry in ranges:
sv = entry.single_value
id_range = IDRange()
id_range.name = sv.get("cn")
id_range.size = int(sv.get("ipaidrangesize"))
id_range.first_id = int(sv.get("ipabaseid"))
id_range.base_rid = (
int(sv.get("ipabaserid")) if sv.get("ipabaserid") else None
)
id_range.secondary_base_rid = (
int(sv.get("ipasecondarybaserid"))
if sv.get("ipasecondarybaserid")
else None
)
id_range.suffix = suffix
id_range.type = sv.get("iparangetype")
id_range.dn = entry.dn
id_range._count()
logger.debug("ID range found: %s", id_range)
id_ranges.append(id_range)
id_ranges.sort(key=lambda x: x.first_id)
return id_ranges
def read_outofrange_identities(suffix, id_ranges) -> List[IDentity]:
"""Function to read out of range users and groups from LDAP"""
users_outofrange = read_ldap_ids(
DN(api.env.container_user, suffix),
True,
id_ranges
)
logger.info("Users out of range found: %s", len(users_outofrange))
del_outofrange = read_ldap_ids(
DN(api.env.container_deleteuser, suffix),
True,
id_ranges
)
logger.info("Preserved users out of range found: %s", len(del_outofrange))
groups_outofrange = read_ldap_ids(
DN(api.env.container_group, suffix),
False,
id_ranges
)
logger.info("Groups out of range found: %s", len(groups_outofrange))
outofrange = users_outofrange + del_outofrange + groups_outofrange
outofrange.sort(key=lambda x: x.number)
return outofrange
def read_ldap_ids(container_dn, user: bool, id_ranges) -> List[IDentity]:
"""Function to read IDs from containter in LDAP"""
id_entities = []
if user:
id_name = "user"
ldap_filter = get_outofrange_filter(
id_ranges,
"posixaccount",
"uidNumber"
)
else:
id_name = "group"
ldap_filter = get_outofrange_filter(
id_ranges,
"posixgroup",
"gidNumber"
)
logger.debug("Searching %ss in %s with filter: %s", id_name, container_dn,
ldap_filter)
try:
identities = api.Backend.ldap2.get_entries(
container_dn,
ldap.SCOPE_ONELEVEL,
ldap_filter,
)
for entry in identities:
id_entities.append(read_identity(entry, user))
except errors.NotFound:
logger.debug("No out of range %ss found in %s!", id_name, container_dn)
except errors.ExecutionError as e:
logger.error("Exception while reading %s: %s", container_dn, e)
return id_entities
def read_identity(ldapentry, user: bool = True) -> IDentity:
"""Function to convert LDAP entry to IDentity object"""
sv = ldapentry.single_value
id_entity = IDentity()
id_entity.dn = ldapentry.dn
id_entity.name = sv.get("cn")
id_entity.number = (
int(sv.get("uidNumber")) if user else int(sv.get("gidNumber"))
)
id_entity.user = user
logger.debug("Out of range found: %s", id_entity.debug())
return id_entity
def get_outofrange_filter(
id_ranges_all: List[IDRange], object_class: str, posix_id: str
) -> str:
"""Function to create LDAP filter for out of range users and groups"""
# we need to look only for ipa-local ranges
id_ranges = get_ipa_local_ranges(id_ranges_all)
ldap_filter = f"(&(objectClass={object_class})(|"
# adding gaps in ranges to the filter
for i in range(len(id_ranges) + 1):
if i == 0:
start_condition = f"({posix_id}>=1)"
else:
start_condition = f"({posix_id}>={id_ranges[i - 1].last_id + 1})"
if i < len(id_ranges):
end_condition = f"({posix_id}<={id_ranges[i].first_id - 1})"
else:
end_condition = f"({posix_id}<=2147483647)"
ldap_filter += f"(&{start_condition}{end_condition})"
ldap_filter += "))"
return ldap_filter
# endregion
# Writing to LDAP
# region
def apply_ridbases(id_range: IDRange) -> None:
"""Funtion to apply RID bases to the range in LDAP"""
try:
api.Backend.ldap2.modify_s(
id_range.dn,
[
(ldap.MOD_ADD, "ipaBaseRID", str(id_range.base_rid)),
(
ldap.MOD_ADD,
"ipaSecondaryBaseRID",
str(id_range.secondary_base_rid),
),
],
)
logger.info("RID bases updated for range '%s'", id_range.name)
except ldap.CONSTRAINT_VIOLATION as e:
logger.error(
"Failed to add RID bases to the range '%s': %s",
id_range.name,
e
)
raise RuntimeError("Constraint violation.\n") from e
except Exception as e:
logger.error(
"Exception while updating RID bases for range '%s': %s",
id_range.name,
e,
)
raise RuntimeError("Failed to update RID bases.\n") from e
def create_range(id_range: IDRange) -> None:
"""Function to create a new range in LDAP"""
try:
logger.info("Creating range '%s'...", id_range.name)
entry = api.Backend.ldap2.make_entry(
DN(id_range.dn),
objectclass=["ipaIDRange", "ipaDomainIDRange"],
ipaidrangesize=[str(id_range.size)],
ipabaseid=[str(id_range.first_id)],
ipabaserid=[str(id_range.base_rid)],
ipasecondarybaserid=[str(id_range.secondary_base_rid)],
iparangetype=[id_range.type],
)
api.Backend.ldap2.add_entry(entry)
logger.info("Range '%s' created successfully", id_range.name)
except Exception as e:
logger.error(
"Exception while creating range '%s': %s",
id_range.name,
e
)
raise RuntimeError("Failed to create range.\n") from e
# endregion
# Working with ranges
# region
def get_ipa_local_ranges(id_ranges: List[IDRange]) -> List[IDRange]:
"""Function to get only ipa-local ranges from the list of ranges"""
ipa_local_ranges = []
for id_range in id_ranges:
if id_range.type == "ipa-local":
ipa_local_ranges.append(id_range)
return ipa_local_ranges
def range_overlap_check(
range1_start: int, range1_end: int, range2_start: int, range2_end: int
) -> bool:
"""Function to check if two ranges overlap"""
# False when overlapping
return not (range1_start <= range2_end and range2_start <= range1_end)
def range_overlap_check_idrange(range1: IDRange, range2: IDRange) -> bool:
"""Function to check if two ranges overlap"""
# False when overlapping
return range_overlap_check(
range1.first_id, range1.last_id, range2.first_id, range2.last_id)
def newrange_overlap_check(
id_ranges: List[IDRange], newrange: IDRange
) -> bool:
"""Function to check if proposed range overlaps with existing ones"""
for id_range in id_ranges:
if not range_overlap_check_idrange(id_range, newrange):
return False
return True
def ranges_overlap_check(id_ranges: List[IDRange]) -> bool:
"""Function to check if any of the existing ranges overlap"""
if len(id_ranges) < 2:
return True
for i in range(len(id_ranges) - 1):
for j in range(i + 1, len(id_ranges)):
if not range_overlap_check_idrange(id_ranges[i], id_ranges[j]):
logger.error(
"Ranges '%s' and '%s' overlap!",
id_ranges[i].name,
id_ranges[j].name,
)
return False
return True
# endregion
# Working with RID bases
# region
def propose_rid_ranges(
id_ranges: List[IDRange], delta: int, proposals: List[IDRange]
) -> None:
"""
Function to propose RID bases for ranges that don't have them set.
- delta represents how far we start new base off existing range,
used in order to allow for future expansion of existing ranges up
to [delta] IDs.
"""
ipa_local_ranges = get_ipa_local_ranges(id_ranges)
for id_range in ipa_local_ranges:
proposed_base_rid = 0
proposed_secondary_base_rid = 0
# Calculate proposed base RID and secondary base RID
if id_range.base_rid is None:
result, proposed_base_rid = propose_rid_base(
id_range, ipa_local_ranges, delta, True
)
if result:
id_range.base_rid = proposed_base_rid
id_range.last_base_rid = proposed_base_rid + id_range.size
else:
# if this fails too, we print the warning and abandon the idea
logger.warning(
"Warning: Proposed base RIDs %s for '%s' both failed, \
please adjust manually",
proposed_base_rid,
id_range.name,
)
continue
if id_range.secondary_base_rid is None:
result, proposed_secondary_base_rid = propose_rid_base(
id_range, ipa_local_ranges, delta, False, proposed_base_rid
)
if result:
id_range.secondary_base_rid = proposed_secondary_base_rid
id_range.last_secondary_rid = (
proposed_secondary_base_rid + id_range.size
)
else:
# if this fails too, we print the warning and abandon the idea
logger.warning(
"Warning: Proposed secondary base RIDs %s for '%s' \
both failed, please adjust manually",
proposed_secondary_base_rid,
id_range.name,
)
continue
# Add range to the proposals if we changed something successfully
if proposed_base_rid > 0 or proposed_secondary_base_rid > 0:
logger.debug(
"Proposed RIDs for range '%s': pri %s, sec %s",
id_range.name,
proposed_base_rid,
proposed_secondary_base_rid,
)
proposals.append(id_range)
def propose_rid_base(
idrange: IDRange,
ipa_local_ranges: List[IDRange],
delta: int,
primary: bool = True,
previous_base_rid: int = -1
) -> Tuple[bool, str]:
"""
Function to propose a base RID for a range, primary or secondary.
We are getting the biggest base RID + size + delta and try
if it's a viable option, check same kind first, then the other.
"""
proposed_base_rid = max_rid(ipa_local_ranges, primary) + delta
if proposed_base_rid == previous_base_rid:
proposed_base_rid += idrange.size + delta
if check_rid_base(ipa_local_ranges, proposed_base_rid, idrange.size):
return True, proposed_base_rid
# if we fail, we try the same with biggest of a different kind
proposed_base_rid_orig = proposed_base_rid
proposed_base_rid = max_rid(ipa_local_ranges, not primary) + delta
if proposed_base_rid == previous_base_rid:
proposed_base_rid += idrange.size + delta
if check_rid_base(ipa_local_ranges, proposed_base_rid, idrange.size):
return True, proposed_base_rid
# if it fails, we return both RID proposals for the range
return False, f"{proposed_base_rid_orig} and {proposed_base_rid}"
def max_rid(id_ranges: List[IDRange], primary: bool = True) -> int:
"""Function to get maximum RID of primary or secondary RIDs"""
maximum_rid = 0
for id_range in id_ranges:
# looking only for primary RIDs
if primary:
if id_range.last_base_rid is not None:
maximum_rid = max(maximum_rid, id_range.last_base_rid)
# looking only for secondary RIDs
else:
if id_range.last_secondary_rid is not None:
maximum_rid = max(maximum_rid, id_range.last_secondary_rid)
return maximum_rid
def check_rid_base(id_ranges: List[IDRange], base: int, size: int) -> bool:
"""Function to check if proposed RID base is viable"""
end = base + size + 1
# Checking sanity of RID range
if base + size > 2147483647:
return False
if base < 1000:
return False
# Checking RID range overlaps
for id_range in id_ranges:
# we are interested only in ipa-local ranges
if id_range.type != "ipa-local":
continue
# if there is no base rid set, there is no secondary base rid set,
# so nothing to overlap with
if id_range.base_rid is None:
continue
# checking for an overlap
if not range_overlap_check(
base, end, id_range.base_rid, id_range.last_base_rid
):
logger.debug(
"RID check failure: proposed Primary %s + %s, \
intersects with %s-%s from range '%s'",
base,
size,
id_range.base_rid,
id_range.last_base_rid,
id_range.name,
)
return False
# if there is no secondary base rid set, nothing to overlap with
if id_range.secondary_base_rid is None:
continue
# if either start of end of the range fails inside existing range,
# or existing range is inside proposed one, we have an overlap
if not range_overlap_check(
base, end, id_range.secondary_base_rid, id_range.last_secondary_rid
):
logger.debug(
"RID check failure: proposed Secondary %s + %s, \
intersects with %s-%s from range '%s'",
base,
size,
id_range.secondary_base_rid,
id_range.last_secondary_rid,
id_range.name,
)
return False
return True
def get_ranges_no_base(id_ranges: List[IDRange]) -> List[IDRange]:
"""Function to get ranges without either of base RIDs set"""
ipa_local_ranges = get_ipa_local_ranges(id_ranges)
ranges_no_base = []
for id_range in ipa_local_ranges:
if id_range.base_rid is None or id_range.secondary_base_rid is None:
ranges_no_base.append(id_range)
return ranges_no_base
# endregion
# Working with IDentities out of range
# region
def group_identities_by_threshold(
identities: List[IDentity], threshold: int
) -> List[List[IDentity]]:
"""Function to group out of range IDs by threshold"""
groups: List[List[IDentity]] = []
currentgroup: List[IDentity] = []
if len(identities) == 0:
return groups
for i in range(len(identities) - 1):
# add id to current group
currentgroup.append(identities[i])
# If the difference with the next one is greater than the threshold,
# start a new group
if identities[i + 1].number - identities[i].number > threshold:
groups.append(currentgroup)
currentgroup = []
# Add the last ID number to the last group
currentgroup.append(identities[-1])
groups.append(currentgroup)
return groups
def separate_under1000(
identities: List[IDentity],
) -> Tuple[List[IDentity], List[IDentity]]:
"""Function to separate IDs under 1000, expects sorted list"""
for i, identity in enumerate(identities):
if identity.number >= 1000:
return identities[:i], identities[i:]
return identities, []
def separate_ranges_and_outliers(
groups: List[List[IDentity]], minrangesize=int
) -> Tuple[List[List[IDentity]], List[List[IDentity]]]:
"""Function to separate IDs into outliers and IDs that can get ranges"""
outliers = []
cleangroups = []
for group in groups:
# if group is smaller than minrangesize, add it's memebers to ourliers
if group[-1].number - group[0].number + 1 < minrangesize:
for identity in group:
outliers.append(identity)
# if the group is OK, add it to cleaned groups
else:
cleangroups.append(group)
return outliers, cleangroups
def round_idrange(start: int, end: int, under1000: bool) -> Tuple[int, int]:
"""Function to round up range margins to look pretty"""
# calculating power of the size
sizepower = len(str(end - start + 1))
# multiplier for the nearest rounded number
multiplier = 10 ** (sizepower - 1)
# getting rounded range margins
rounded_start = (start // multiplier) * multiplier
if not under1000:
rounded_start = max(rounded_start, 1000)
else:
rounded_start = max(rounded_start, 1)
rounded_end = ((end + multiplier) // multiplier) * multiplier - 1
return rounded_start, rounded_end
def get_rangename_base(id_ranges: List[IDRange]) -> str:
"""Function to get a base name for new range proposals"""
base_name = ""
# we want to use default range name as a base for new ranges
for id_range in id_ranges:
if id_range.base_rid == 1000:
base_name = id_range.name
# if we didn't find it, propose generic name
if base_name == "":
base_name = "Auto_added_range"
return base_name
def get_rangename(id_ranges: List[IDRange], basename: str) -> str:
"""
Function to get a new range name, we add the counter as 3-digit number
extension and make sure it's unique
"""
counter = 1
full_name = f"{basename}_{counter:03}"
while any(id_range.name == full_name for id_range in id_ranges):
counter += 1
full_name = f"{basename}_{counter:03}"
return full_name
def propose_range(
group: List[IDentity],
id_ranges: List[IDRange],
delta: int,
basename: str,
norounding: bool,
allowunder1000: bool
) -> IDRange:
"""Function to propose a new range for group of IDs out of ranges"""
startid = group[0].number
endid = group[-1].number
logger.debug(
"Proposing a range for existing IDs out of ranges with start id %s \
and end id %s...",
startid,
endid,
)
# creating new range
newrange = IDRange()
newrange.type = "ipa-local"
newrange.name = get_rangename(id_ranges, basename)
newrange.suffix = id_ranges[0].suffix
newrange.dn = f"cn={newrange.name},cn=ranges,cn=etc,{newrange.suffix}"
if norounding:
newrange.first_id = startid
newrange.last_id = endid
newrange.size = newrange.last_id - newrange.first_id + 1
else:
# first trying to round up ranges to look pretty
newrange.first_id, newrange.last_id = round_idrange(
startid,
endid,
allowunder1000
)
newrange.size = newrange.last_id - newrange.first_id + 1
# if this creates an overlap, try without rounding
if not newrange_overlap_check(id_ranges, newrange):
newrange.first_id = startid
newrange.last_id = endid
newrange.size = newrange.last_id - newrange.first_id + 1
# if we still failed, abandon idea
if not newrange_overlap_check(id_ranges, newrange):
logger.error(
"ERROR! Failed to create idrange for existing IDs out of \
ranges with start id %s and end id %s, it overlaps with existing range!",
startid,
endid,
)
return None
# creating RID bases
ipa_local_ranges = get_ipa_local_ranges(id_ranges)
result, proposed_base_rid = propose_rid_base(
newrange, ipa_local_ranges, delta, True
)
if result:
newrange.base_rid = proposed_base_rid
newrange.last_base_rid = proposed_base_rid + newrange.size
else:
# if this fails we print the warning
logger.warning(
"Warning! Proposed base RIDs %s for new range start id %s and \
end id %s both failed, please adjust manually",
proposed_base_rid,
newrange.first_id,
newrange.last_id,
)
result, proposed_secondary_base_rid = propose_rid_base(
newrange, ipa_local_ranges, delta, False, proposed_base_rid
)
if result:
newrange.secondary_base_rid = proposed_secondary_base_rid
newrange.last_secondary_rid = (
proposed_secondary_base_rid + newrange.size
)
else:
# if this fails we print the warning
logger.warning(
"Warning! Proposed secondary base RIDs %s for new range start id \
%s and end id %s both failed, please adjust manually",
proposed_secondary_base_rid,
newrange.first_id,
newrange.last_id,
)
logger.debug("Proposed range: %s", newrange)
return newrange
# endregion
| 36,032
|
Python
|
.py
| 931
| 28.629431
| 79
| 0.578476
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,780
|
ipa_winsync_migrate.py
|
freeipa_freeipa/ipaserver/install/ipa_winsync_migrate.py
|
# Authors: Tomas Babej <tbabej@redhat.com>
#
# Copyright (C) 2015 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import
import logging
import gssapi
import sys
import six
from ipalib import api
from ipalib import errors
from ipaplatform.paths import paths
from ipapython import admintool
from ipapython.dn import DN
from ipapython.ipautil import realm_to_suffix, posixify
from ipaserver.install import replication, installutils
if six.PY3:
unicode = str
logger = logging.getLogger(__name__)
DEFAULT_TRUST_VIEW_NAME = u'Default Trust View'
class WinsyncMigrate(admintool.AdminTool):
"""
Tool to migrate winsync users.
"""
command_name = 'ipa-winsync-migrate'
usage = "ipa-winsync-migrate"
description = (
"This tool creates user ID overrides for all the users "
"that were previously synced from AD domain using the "
"winsync replication agreement. It requires that trust "
"with the AD forest has already been established and "
"the users in question are resolvable using SSSD. "
"For more information, see `man ipa-winsync-migrate`."
)
@classmethod
def add_options(cls, parser):
"""
Adds command line options to the tool.
"""
super(WinsyncMigrate, cls).add_options(parser)
parser.add_option(
"--realm",
dest="realm",
help="The AD realm the winsynced users belong to")
parser.add_option(
"--server",
dest="server",
help="The AD DC the winsync agreement is established with")
parser.add_option(
"-U", "--unattended",
dest="interactive",
action="store_false",
default=True,
help="Never prompt for user input")
def validate_options(self):
"""
Validates the options passed by the user:
- Checks that trust has been established with
the realm passed via --realm option
"""
super(WinsyncMigrate, self).validate_options(needs_root=True)
if self.options.realm is None:
raise admintool.ScriptError(
"AD realm the winsynced users belong to needs to be "
"specified.")
else:
try:
api.Command['trust_show'](unicode(self.options.realm))
except errors.NotFound:
raise admintool.ScriptError(
"Trust with the given realm %s could not be found. "
"Please establish the trust prior to migration."
% self.options.realm)
except Exception as e:
raise admintool.ScriptError(
"An error occured during detection of the established "
"trust with %s: %s" % (self.options.realm, str(e)))
if self.options.server is None:
raise admintool.ScriptError(
"The AD DC the winsync agreement is established with "
"needs to be specified.")
else:
# Validate the replication agreement between given host and localhost
try:
manager = replication.ReplicationManager(
api.env.realm,
api.env.host,
None) # Use GSSAPI instead of raw directory manager access
replica_type = manager.get_agreement_type(self.options.server)
except errors.ACIError as e:
raise admintool.ScriptError(
"Used Kerberos account does not have privileges to access "
"the replication agreement info: %s" % str(e))
except errors.NotFound as e:
raise admintool.ScriptError(
"The replication agreement between %s and %s could not "
"be detected" % (api.env.host, self.options.server))
# Check that the replication agreement is indeed WINSYNC
if replica_type != replication.WINSYNC:
raise admintool.ScriptError(
"Replication agreement between %s and %s is not winsync."
% (api.env.host, self.options.server))
# Save the reference to the replication manager in the object
self.manager = manager
def delete_winsync_agreement(self):
"""
Deletes the winsync agreement between the current master and the
given AD server.
"""
try:
self.manager.delete_agreement(self.options.server)
self.manager.delete_referral(self.options.server)
dn = DN(('cn', self.options.server),
('cn', 'replicas'),
('cn', 'ipa'),
('cn', 'etc'),
realm_to_suffix(api.env.realm))
entries = self.manager.conn.get_entries(dn,
self.ldap.SCOPE_SUBTREE)
if entries:
entries.sort(key=len, reverse=True)
for entry in entries:
self.ldap.delete_entry(entry)
except Exception as e:
raise admintool.ScriptError(
"Deletion of the winsync agreement failed: %s" % str(e))
def create_id_user_override(self, entry):
"""
Creates ID override corresponding to this user entry.
"""
user_identifier = u"%s@%s" % (entry['uid'][0], self.options.realm)
kwargs = {
'uid': entry['uid'][0],
'uidnumber': entry['uidnumber'][0],
'gidnumber': entry['gidnumber'][0],
'gecos': entry['gecos'][0],
'loginshell': entry['loginshell'][0]
}
try:
api.Command['idoverrideuser_add'](
DEFAULT_TRUST_VIEW_NAME,
user_identifier,
**kwargs
)
except Exception as e:
logger.warning("Migration failed: %s (%s)",
user_identifier, str(e))
else:
logger.debug("Migrated: %s", user_identifier)
def find_winsync_users(self):
"""
Finds all users that were mirrored from AD using winsync.
"""
user_filter = "(&(objectclass=ntuser)(ntUserDomainId=*))"
user_base = DN(api.env.container_user, api.env.basedn)
entries, _truncated = self.ldap.find_entries(
filter=user_filter,
base_dn=user_base,
paged_search=True)
for entry in entries:
logger.debug("Discovered entry: %s", entry)
return entries
def migrate_memberships(self, user_entry, winsync_group_prefix,
object_membership_command,
object_info_command,
user_dn_attribute,
object_group_membership_key,
object_container_dn):
"""
Migrates user memberships to theier external identities.
All migrated users for the given object are migrated to a common
external group which is then assigned to the given object as a
(user) member group.
"""
def winsync_group_name(object_entry):
"""
Returns the generated name of group containing migrated external
users.
The group name is of the form:
"<prefix>_<object name>_winsync_external"
Object name is converted to posix-friendly string by omitting
and/or replacing characters. This may lead to collisions, i.e.
if both 'trust_admins' and 'trust admin' groups have winsync
users being migrated.
"""
return u"{0}_{1}_winsync_external".format(
winsync_group_prefix,
posixify(object_entry['cn'][0])
)
def create_winsync_group(object_entry, suffix=0):
"""
Creates the group containing migrated external users that were
previously available via winsync.
"""
name = winsync_group_name(object_entry)
# Only non-trivial suffix is appended at the end
if suffix != 0:
name += str(suffix)
try:
api.Command['group_add'](name, external=True)
except errors.DuplicateEntry:
# If there is a collision, let's try again with a higher suffix
create_winsync_group(object_entry, suffix=suffix+1)
else:
# In case of no collision, add the membership
api.Command[object_membership_command](object_entry['cn'][0], group=[name])
# Search for all objects containing the given user as a direct member
member_filter = self.ldap.make_filter_from_attr(user_dn_attribute,
user_entry.dn)
try:
objects, _truncated = self.ldap.find_entries(
member_filter,
base_dn=object_container_dn)
except errors.EmptyResult:
# If there's nothing to migrate, then let's get out of here
return
# The external user cannot be added directly as member of the IPA
# objects, hence we need to wrap all the external users into one
# new external group, which will be then added to the original IPA
# object as a member.
for obj in objects:
# Check for existence of winsync external group
name = winsync_group_name(obj)
info = api.Command[object_info_command](obj['cn'][0])['result']
# If it was not created yet, do it now
if name not in info.get(object_group_membership_key, []):
create_winsync_group(obj)
# Add the user to the external group. Membership is migrated
# at this point.
user_identifier = u"%s@%s" % (user_entry['uid'][0], self.options.realm)
api.Command['group_add_member'](name, ipaexternalmember=[user_identifier])
def migrate_group_memberships(self, user_entry):
return self.migrate_memberships(user_entry,
winsync_group_prefix="group",
user_dn_attribute="member",
object_membership_command="group_add_member",
object_info_command="group_show",
object_group_membership_key="member_group",
object_container_dn=DN(api.env.container_group, api.env.basedn),
)
def migrate_role_memberships(self, user_entry):
return self.migrate_memberships(user_entry,
winsync_group_prefix="role",
user_dn_attribute="member",
object_membership_command="role_add_member",
object_info_command="role_show",
object_group_membership_key="member_group",
object_container_dn=DN(api.env.container_rolegroup, api.env.basedn),
)
def migrate_hbac_memberships(self, user_entry):
return self.migrate_memberships(user_entry,
winsync_group_prefix="hbacrule",
user_dn_attribute="memberuser",
object_membership_command="hbacrule_add_user",
object_info_command="hbacrule_show",
object_group_membership_key="memberuser_group",
object_container_dn=DN(api.env.container_hbac, api.env.basedn),
)
def migrate_selinux_memberships(self, user_entry):
return self.migrate_memberships(user_entry,
winsync_group_prefix="selinux",
user_dn_attribute="memberuser",
object_membership_command="selinuxusermap_add_user",
object_info_command="selinuxusermap_show",
object_group_membership_key="memberuser_group",
object_container_dn=DN(api.env.container_selinux, api.env.basedn),
)
def warn_passsync(self):
logger.warning("Migration completed. Please note that if PassSync "
"was configured on the given Active Directory server, "
"it needs to be manually removed, otherwise it may try "
"to reset password for accounts that are no longer "
"existent.")
@classmethod
def main(cls, argv):
"""
Sets up API and LDAP connection for the tool, then runs the rest of
the plumbing.
"""
# Check if the IPA server is configured before attempting to migrate
try:
installutils.check_server_configuration()
except admintool.ScriptError as e:
sys.exit(e)
# Finalize API
api.bootstrap(in_server=True, context='server', confdir=paths.ETC_IPA)
api.finalize()
# Setup LDAP connection
try:
api.Backend.ldap2.connect()
cls.ldap = api.Backend.ldap2
except gssapi.exceptions.GSSError as e:
sys.exit("Must have Kerberos credentials to migrate Winsync users. Error: %s" % e)
except errors.ACIError as e:
sys.exit("Outdated Kerberos credentials. Use kdestroy and kinit to update your ticket.")
except errors.DatabaseError as e:
sys.exit("Cannot connect to the LDAP database. Please check if IPA is running.")
super(WinsyncMigrate, cls).main(argv)
def run(self):
super(WinsyncMigrate, self).run()
# Stop winsync agreement with the given host
self.delete_winsync_agreement()
# Create ID overrides replacing the user winsync entries
entries = self.find_winsync_users()
for entry in entries:
self.create_id_user_override(entry)
self.migrate_group_memberships(entry)
self.migrate_role_memberships(entry)
self.migrate_hbac_memberships(entry)
self.migrate_selinux_memberships(entry)
self.ldap.delete_entry(entry)
self.warn_passsync()
| 14,726
|
Python
|
.py
| 326
| 33.395706
| 100
| 0.599916
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,781
|
ipactl.py
|
freeipa_freeipa/ipaserver/install/ipactl.py
|
# Authors: Simo Sorce <ssorce@redhat.com>
#
# Copyright (C) 2008-2019 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function
import sys
import os
import json
import ldapurl
from ipaserver.install import service, installutils
from ipaserver.install.dsinstance import config_dirname
from ipaserver.install.installutils import ScriptError
from ipaserver.masters import ENABLED_SERVICE, HIDDEN_SERVICE
from ipalib import api, errors
from ipalib.facts import is_ipa_configured
from ipapython.ipaldap import LDAPClient, realm_to_serverid
from ipapython.ipautil import wait_for_open_ports, wait_for_open_socket
from ipapython.ipautil import run
from ipapython import config
from ipaplatform.tasks import tasks
from ipapython.dn import DN
from ipaplatform import services
from ipaplatform.paths import paths
MSG_HINT_IGNORE_SERVICE_FAILURE = (
"Hint: You can use --ignore-service-failure option for forced start in "
"case that a non-critical service failed"
)
class IpactlError(ScriptError):
pass
def check_IPA_configuration():
if not is_ipa_configured():
# LSB status code 6: program is not configured
raise IpactlError(
"IPA is not configured "
"(see man pages of ipa-server-install for help)",
6,
)
def deduplicate(lst):
"""Remove duplicates and preserve order.
Returns copy of list with preserved order and removed duplicates.
"""
new_lst = []
s = set(lst)
for i in lst:
if i in s:
s.remove(i)
new_lst.append(i)
return new_lst
def is_dirsrv_debugging_enabled():
"""
Check the 389-ds instance to see if debugging is enabled.
If so we suppress that in our output.
returns True or False
"""
debugging = False
serverid = realm_to_serverid(api.env.realm)
dselist = [config_dirname(serverid)]
for dse in dselist:
try:
fd = open(dse + "dse.ldif", "r")
except IOError:
continue
lines = fd.readlines()
fd.close()
for line in lines:
if line.lower().startswith("nsslapd-errorlog-level"):
_option, value = line.split(":")
if int(value) > 0:
debugging = True
return debugging
def get_capture_output(service, debug):
"""
We want to display any output of a start/stop command with the
exception of 389-ds when debugging is enabled because it outputs
tons and tons of information.
"""
if service == "dirsrv" and not debug and is_dirsrv_debugging_enabled():
print(" debugging enabled, suppressing output.")
return True
else:
return False
def parse_options():
usage = "%prog start|stop|restart|status\n"
parser = config.IPAOptionParser(
usage=usage, formatter=config.IPAFormatter()
)
parser.add_option(
"-d",
"--debug",
action="store_true",
dest="debug",
help="Display debugging information",
)
parser.add_option(
"-f",
"--force",
action="store_true",
dest="force",
help="Force IPA to start. Combine options "
"--skip-version-check and --ignore-service-failures",
)
parser.add_option(
"--ignore-service-failures",
action="store_true",
dest="ignore_service_failures",
help="If any service start fails, do not rollback the "
"services, continue with the operation",
)
parser.add_option(
"--skip-version-check",
action="store_true",
dest="skip_version_check",
default=False,
help="skip version check",
)
options, args = parser.parse_args()
safe_options = parser.get_safe_opts(options)
if options.force:
options.ignore_service_failures = True
options.skip_version_check = True
return safe_options, options, args
def emit_err(err):
sys.stderr.write(err + "\n")
def version_check():
try:
installutils.check_version()
except (
installutils.UpgradeMissingVersionError,
installutils.UpgradeDataOlderVersionError,
) as exc:
emit_err("IPA version error: %s" % exc)
except installutils.UpgradeVersionError as e:
emit_err("IPA version error: %s" % e)
else:
return
emit_err(
"Automatically running upgrade, for details see {}".format(
paths.IPAUPGRADE_LOG
)
)
emit_err("Be patient, this may take a few minutes.")
# Fork out to call ipa-server-upgrade so that logging is sane.
result = run(
[paths.IPA_SERVER_UPGRADE], raiseonerr=False, capture_error=True
)
if result.returncode != 0:
emit_err("Automatic upgrade failed: %s" % result.error_output)
emit_err(
"See the upgrade log for more details and/or run {} again".format(
paths.IPA_SERVER_UPGRADE
)
)
raise IpactlError("Aborting ipactl")
def get_config(dirsrv):
base = DN(
("cn", api.env.host),
("cn", "masters"),
("cn", "ipa"),
("cn", "etc"),
api.env.basedn,
)
srcfilter = LDAPClient.combine_filters(
[
LDAPClient.make_filter({"objectClass": "ipaConfigObject"}),
LDAPClient.make_filter(
{"ipaConfigString": [ENABLED_SERVICE, HIDDEN_SERVICE]},
rules=LDAPClient.MATCH_ANY,
),
],
rules=LDAPClient.MATCH_ALL,
)
attrs = ["cn", "ipaConfigString"]
if not dirsrv.is_running():
raise IpactlError(
"Failed to get list of services to probe status:\n"
"Directory Server is stopped",
3,
)
try:
# The start/restart functions already wait for the server to be
# started. What we are doing with this wait is really checking to see
# if the server is listening at all.
lurl = ldapurl.LDAPUrl(api.env.ldap_uri)
if lurl.urlscheme == "ldapi":
wait_for_open_socket(
lurl.hostport, timeout=api.env.startup_timeout
)
else:
(host, port) = lurl.hostport.split(":")
wait_for_open_ports(
host, [int(port)], timeout=api.env.startup_timeout
)
con = LDAPClient(api.env.ldap_uri)
con.external_bind()
res = con.get_entries(
base,
filter=srcfilter,
attrs_list=attrs,
scope=con.SCOPE_SUBTREE,
time_limit=10,
)
except errors.NetworkError:
# LSB status code 3: program is not running
raise IpactlError(
"Failed to get list of services to probe status:\n"
"Directory Server is stopped",
3,
)
except errors.NotFound:
masters_list = []
dn = DN(
("cn", "masters"), ("cn", "ipa"), ("cn", "etc"), api.env.basedn
)
attrs = ["cn"]
try:
entries = con.get_entries(
dn, con.SCOPE_ONELEVEL, attrs_list=attrs
)
except Exception as e:
masters_list.append(
"No master found because of error: %s" % str(e)
)
else:
for master_entry in entries:
masters_list.append(master_entry.single_value["cn"])
masters = "\n".join(masters_list)
raise IpactlError(
"Failed to get list of services to probe status!\n"
"Configured hostname '%s' does not match any master server in "
"LDAP:\n%s"
% (api.env.host, masters)
)
except Exception as e:
raise IpactlError(
"Unknown error when retrieving list of services from LDAP: %s"
% str(e)
)
svc_list = []
for entry in res:
name = entry.single_value["cn"]
for p in entry["ipaConfigString"]:
if p.startswith("startOrder "):
try:
order = int(p.split()[1])
except ValueError:
raise IpactlError(
"Expected order as integer in: %s:%s" % (name, p)
)
svc_list.append([order, name])
ordered_list = []
for order, svc in sorted(svc_list):
if svc in service.SERVICE_LIST:
ordered_list.append(service.SERVICE_LIST[svc].systemd_name)
return deduplicate(ordered_list)
def get_config_from_file(rval):
"""
Get the list of configured services from the cached file.
:param rval: The return value for any exception that is raised.
"""
svc_list = []
try:
f = open(tasks.get_svc_list_file(), "r")
svc_list = json.load(f)
except Exception as e:
raise IpactlError(
"Unknown error when retrieving list of services from file: %s"
% str(e),
4
)
# the framework can start/stop a number of related services we are not
# authoritative for, so filter the list through SERVICES_LIST and order it
# accordingly too.
def_svc_list = []
for svc in service.SERVICE_LIST:
s = service.SERVICE_LIST[svc]
def_svc_list.append([s[1], s[0]])
ordered_list = []
for _order, svc in sorted(def_svc_list):
if svc in svc_list:
ordered_list.append(svc)
return deduplicate(ordered_list)
def stop_services(svc_list):
for svc in svc_list:
svc_off = services.service(svc, api=api)
try:
svc_off.stop(capture_output=False)
except Exception:
pass
def stop_dirsrv(dirsrv):
try:
dirsrv.stop(capture_output=False)
except Exception:
pass
def ipa_start(options):
if not options.skip_version_check:
version_check()
else:
print("Skipping version check")
if os.path.isfile(tasks.get_svc_list_file()):
emit_err("Existing service file detected!")
emit_err("Assuming stale, cleaning and proceeding")
# remove file with list of started services
# This is ok as systemd will just skip services
# that are already running and just return, so that the
# stop() method of the base class will simply fill in the
# service file again
os.unlink(paths.SVC_LIST_FILE)
dirsrv = services.knownservices.dirsrv
try:
print("Starting Directory Service")
dirsrv.start(
capture_output=get_capture_output("dirsrv", options.debug)
)
except Exception as e:
raise IpactlError("Failed to start Directory Service: " + str(e))
try:
svc_list = get_config(dirsrv)
except Exception as e:
emit_err("Failed to read data from service file: " + str(e))
emit_err("Shutting down")
if not options.ignore_service_failures:
stop_dirsrv(dirsrv)
if isinstance(e, IpactlError):
# do not display any other error message
raise IpactlError(rval=e.rval)
else:
raise IpactlError()
if len(svc_list) == 0:
# no service to start
return
for svc in svc_list:
svchandle = services.service(svc, api=api)
try:
print("Starting %s Service" % svc)
svchandle.start(
capture_output=get_capture_output(svc, options.debug)
)
except Exception:
emit_err("Failed to start %s Service" % svc)
# if ignore_service_failures is specified, skip rollback and
# continue with the next service
if options.ignore_service_failures:
emit_err(
"Forced start, ignoring %s Service, "
"continuing normal operation"
% svc
)
continue
emit_err("Shutting down")
stop_services(svc_list)
stop_dirsrv(dirsrv)
emit_err(MSG_HINT_IGNORE_SERVICE_FAILURE)
raise IpactlError("Aborting ipactl")
def ipa_stop(options):
dirsrv = services.knownservices.dirsrv
try:
svc_list = get_config_from_file(rval=4)
except Exception as e:
# Issue reading the file ? Let's try to get data from LDAP as a
# fallback
try:
dirsrv.start(capture_output=False)
svc_list = get_config(dirsrv)
except Exception as e:
emit_err("Failed to read data from Directory Service: " + str(e))
emit_err("Shutting down")
try:
# just try to stop it, do not read a result
dirsrv.stop()
finally:
raise IpactlError()
for svc in reversed(svc_list):
svchandle = services.service(svc, api=api)
try:
print("Stopping %s Service" % svc)
svchandle.stop(capture_output=False)
except Exception:
emit_err("Failed to stop %s Service" % svc)
try:
print("Stopping Directory Service")
dirsrv.stop(capture_output=False)
except Exception:
raise IpactlError("Failed to stop Directory Service")
# remove file with list of started services
try:
os.unlink(paths.SVC_LIST_FILE)
except OSError:
pass
def ipa_restart(options):
if not options.skip_version_check:
try:
version_check()
except Exception as e:
try:
ipa_stop(options)
except Exception:
# We don't care about errors that happened while stopping.
# We need to raise the upgrade error.
pass
raise e
else:
print("Skipping version check")
dirsrv = services.knownservices.dirsrv
new_svc_list = []
dirsrv_restart = True
if not dirsrv.is_running():
try:
print("Starting Directory Service")
dirsrv.start(
capture_output=get_capture_output("dirsrv", options.debug)
)
dirsrv_restart = False
except Exception as e:
raise IpactlError("Failed to start Directory Service: " + str(e))
try:
new_svc_list = get_config(dirsrv)
except Exception as e:
emit_err("Failed to read data from Directory Service: " + str(e))
emit_err("Shutting down")
try:
dirsrv.stop(capture_output=False)
except Exception:
pass
if isinstance(e, IpactlError):
# do not display any other error message
raise IpactlError(rval=e.rval)
else:
raise IpactlError()
old_svc_list = []
try:
old_svc_list = get_config_from_file(rval=4)
except Exception as e:
emit_err("Failed to get service list from file: " + str(e))
# fallback to what's in LDAP
old_svc_list = new_svc_list
# match service to start/stop
svc_list = []
for s in new_svc_list:
if s in old_svc_list:
svc_list.append(s)
# remove commons
for s in svc_list:
if s in old_svc_list:
old_svc_list.remove(s)
for s in svc_list:
if s in new_svc_list:
new_svc_list.remove(s)
if len(old_svc_list) != 0:
# we need to definitely stop some services
for svc in reversed(old_svc_list):
svchandle = services.service(svc, api=api)
try:
print("Stopping %s Service" % svc)
svchandle.stop(capture_output=False)
except Exception:
emit_err("Failed to stop %s Service" % svc)
try:
if dirsrv_restart:
print("Restarting Directory Service")
dirsrv.restart(
capture_output=get_capture_output("dirsrv", options.debug)
)
except Exception as e:
emit_err("Failed to restart Directory Service: " + str(e))
emit_err("Shutting down")
if not options.ignore_service_failures:
stop_services(reversed(svc_list))
stop_dirsrv(dirsrv)
raise IpactlError("Aborting ipactl")
if len(svc_list) != 0:
# there are services to restart
for svc in svc_list:
svchandle = services.service(svc, api=api)
try:
print("Restarting %s Service" % svc)
svchandle.restart(
capture_output=get_capture_output(svc, options.debug)
)
except Exception:
emit_err("Failed to restart %s Service" % svc)
# if ignore_service_failures is specified,
# skip rollback and continue with the next service
if options.ignore_service_failures:
emit_err(
"Forced restart, ignoring %s Service, "
"continuing normal operation"
% svc
)
continue
emit_err("Shutting down")
stop_services(svc_list)
stop_dirsrv(dirsrv)
emit_err(MSG_HINT_IGNORE_SERVICE_FAILURE)
raise IpactlError("Aborting ipactl")
if len(new_svc_list) != 0:
# we still need to start some services
for svc in new_svc_list:
svchandle = services.service(svc, api=api)
try:
print("Starting %s Service" % svc)
svchandle.start(
capture_output=get_capture_output(svc, options.debug)
)
except Exception:
emit_err("Failed to start %s Service" % svc)
# if ignore_service_failures is specified, skip rollback and
# continue with the next service
if options.ignore_service_failures:
emit_err(
"Forced start, ignoring %s Service, "
"continuing normal operation"
% svc
)
continue
emit_err("Shutting down")
stop_services(svc_list)
stop_dirsrv(dirsrv)
emit_err(MSG_HINT_IGNORE_SERVICE_FAILURE)
raise IpactlError("Aborting ipactl")
def ipa_status(options):
"""Report status of IPA-owned processes
The LSB defines the possible status values as:
0 program is running or service is OK
1 program is dead and /var/run pid file exists
2 program is dead and /var/lock lock file exists
3 program is not running
4 program or service status is unknown
5-99 reserved for future LSB use
100-149 reserved for distribution use
150-199 reserved for application use
200-254 reserved
We only really care about 0, 3 and 4.
"""
socket_activated = ('ipa-ods-exporter', 'ipa-otpd',)
try:
dirsrv = services.knownservices.dirsrv
if dirsrv.is_running():
svc_list = get_config(dirsrv)
else:
svc_list = get_config_from_file(rval=1)
except IpactlError as e:
if os.path.exists(tasks.get_svc_list_file()):
raise e
else:
svc_list = []
except Exception as e:
raise IpactlError(
"Failed to get list of services to probe status: " + str(e),
4
)
stopped = 0
dirsrv = services.knownservices.dirsrv
try:
if dirsrv.is_running():
print("Directory Service: RUNNING")
else:
print("Directory Service: STOPPED")
stopped = 1
except Exception as e:
raise IpactlError("Failed to get Directory Service status", 4)
if len(svc_list) == 0:
raise IpactlError(
(
"Directory Service must be running in order to "
"obtain status of other services"
),
3,
)
for svc in svc_list:
svchandle = services.service(svc, api=api)
try:
if svchandle.is_running():
print("%s Service: RUNNING" % svc)
else:
print("%s Service: STOPPED" % svc)
if svc not in socket_activated:
stopped += 1
except Exception:
emit_err("Failed to get %s Service status" % svc)
if stopped > 0:
raise IpactlError("%d service(s) are not running" % stopped, 3)
def main():
if not os.getegid() == 0:
# LSB status code 4: user had insufficient privilege
raise IpactlError("You must be root to run ipactl.", 4)
_safe_options, options, args = parse_options()
if len(args) != 1:
# LSB status code 2: invalid or excess argument(s)
raise IpactlError("You must specify one action", 2)
elif args[0] not in ("start", "stop", "restart", "status"):
raise IpactlError("Unrecognized action [" + args[0] + "]", 2)
# check if IPA is configured at all
try:
check_IPA_configuration()
except IpactlError as e:
if args[0].lower() == "status":
# Different LSB return code for status command:
# 4 - program or service status is unknown
# This should differentiate uninstalled IPA from status
# code 3 - program is not running
e.rval = 4
raise e
else:
raise e
api.bootstrap(
in_server=True,
context="ipactl",
confdir=paths.ETC_IPA,
debug=options.debug,
)
api.finalize()
if "." not in api.env.host:
raise IpactlError(
"Invalid hostname '%s' in IPA configuration!\n"
"The hostname must be fully-qualified" % api.env.host
)
if args[0].lower() == "start":
ipa_start(options)
elif args[0].lower() == "stop":
ipa_stop(options)
elif args[0].lower() == "restart":
ipa_restart(options)
elif args[0].lower() == "status":
ipa_status(options)
| 22,802
|
Python
|
.py
| 638
| 26.275862
| 78
| 0.589366
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,782
|
krbinstance.py
|
freeipa_freeipa/ipaserver/install/krbinstance.py
|
# Authors: Simo Sorce <ssorce@redhat.com>
#
# Copyright (C) 2007 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import
from __future__ import print_function
import logging
import os
import socket
import dbus
import dns.name
from pkg_resources import parse_version
from ipalib import x509
from ipalib.install import certstore
from ipaserver.install import service
from ipaserver.install import installutils
from ipapython import ipaldap
from ipapython import ipautil
from ipapython import kernel_keyring
from ipapython.version import KRB5_BUILD_VERSION
from ipalib import api, errors
from ipalib.constants import ANON_USER
from ipalib.install import certmonger
from ipapython.dn import DN
from ipapython.dogtag import KDC_PROFILE
from ipaserver.install import replication
from ipaserver.install import certs
from ipaserver.masters import (
find_providing_servers,
PAC_TKT_SIGN_SUPPORTED,
PKINIT_ENABLED,
)
from ipaplatform.constants import constants
from ipaplatform.tasks import tasks
from ipaplatform.paths import paths
logger = logging.getLogger(__name__)
MASTER_KEY_TYPE = 'aes256-sha2'
SUPPORTED_ENCTYPES = ('aes256-sha2:special', 'aes128-sha2:special',
'aes256-sha2:normal', 'aes128-sha2:normal',
'aes256-cts:special', 'aes128-cts:special',
'aes256-cts:normal', 'aes128-cts:normal',
'camellia256-cts:special', 'camellia128-cts:special',
'camellia256-cts:normal', 'camellia128-cts:normal')
def get_pkinit_request_ca():
"""
Return the certmonger CA name which is serving the PKINIT certificate
request. If the certificate is not tracked by Certmonger, return None
"""
pkinit_request_id = certmonger.get_request_id(
{'cert-file': paths.KDC_CERT})
if pkinit_request_id is None:
return None
return certmonger.get_request_value(pkinit_request_id, 'ca-name')
def is_pkinit_enabled():
"""
check whether PKINIT is enabled on the master by checking for the presence
of KDC certificate and it's tracking CA
"""
if os.path.exists(paths.KDC_CERT):
pkinit_request_ca = get_pkinit_request_ca()
if pkinit_request_ca and pkinit_request_ca != "SelfSign":
return True
return False
class KpasswdInstance(service.SimpleServiceInstance):
def __init__(self):
service.SimpleServiceInstance.__init__(self, "kadmin")
class KrbInstance(service.Service):
def __init__(self, fstore=None):
super(KrbInstance, self).__init__(
"krb5kdc",
service_desc="Kerberos KDC",
fstore=fstore
)
self.fqdn = None
self.realm = None
self.domain = None
self.host = None
self.admin_password = None
self.master_password = None
self.suffix = None
self.subject_base = None
self.kdc_password = None
self.sub_dict = None
self.pkcs12_info = None
self.master_fqdn = None
self.config_pkinit = None
suffix = ipautil.dn_attribute_property('_suffix')
subject_base = ipautil.dn_attribute_property('_subject_base')
def init_info(self, realm_name, host_name, setup_pkinit=False,
subject_base=None):
self.fqdn = host_name
self.realm = realm_name
self.suffix = ipautil.realm_to_suffix(realm_name)
self.subject_base = subject_base
self.config_pkinit = setup_pkinit
def get_realm_suffix(self):
return DN(('cn', self.realm), ('cn', 'kerberos'), self.suffix)
def move_service_to_host(self, principal):
"""
Used to move a host/ service principal created by kadmin.local from
cn=kerberos to reside under the host entry.
"""
service_dn = DN(('krbprincipalname', principal), self.get_realm_suffix())
service_entry = api.Backend.ldap2.get_entry(service_dn)
api.Backend.ldap2.delete_entry(service_entry)
# Create a host entry for this master
host_dn = DN(
('fqdn', self.fqdn), ('cn', 'computers'), ('cn', 'accounts'),
self.suffix)
host_entry = api.Backend.ldap2.make_entry(
host_dn,
objectclass=[
'top', 'ipaobject', 'nshost', 'ipahost', 'ipaservice',
'pkiuser', 'krbprincipalaux', 'krbprincipal',
'krbticketpolicyaux', 'ipasshhost'],
krbextradata=service_entry['krbextradata'],
krblastpwdchange=service_entry['krblastpwdchange'],
krbprincipalname=service_entry['krbprincipalname'],
krbcanonicalname=service_entry['krbcanonicalname'],
krbprincipalkey=service_entry['krbprincipalkey'],
serverhostname=[self.fqdn.split('.',1)[0]],
cn=[self.fqdn],
fqdn=[self.fqdn],
ipauniqueid=['autogenerate'],
managedby=[host_dn],
)
if 'krbpasswordexpiration' in service_entry:
host_entry['krbpasswordexpiration'] = service_entry[
'krbpasswordexpiration']
if 'krbticketflags' in service_entry:
host_entry['krbticketflags'] = service_entry['krbticketflags']
api.Backend.ldap2.add_entry(host_entry)
# Add the host to the ipaserver host group
self._ldap_update(['20-ipaservers_hostgroup.update'])
def pac_tkt_sign_support_enable(self):
"""
Advertise PAC ticket signature support in master's KDC entry in LDAP
"""
service.set_service_entry_config(
'KDC', self.fqdn, [PAC_TKT_SIGN_SUPPORTED], self.suffix)
def __common_setup(self, realm_name, host_name, domain_name, admin_password):
self.fqdn = host_name
self.realm = realm_name.upper()
self.host = host_name.split(".")[0]
self.ip = socket.getaddrinfo(host_name, None, socket.AF_UNSPEC, socket.SOCK_STREAM)[0][4][0]
self.domain = domain_name
self.suffix = ipautil.realm_to_suffix(self.realm)
self.kdc_password = ipautil.ipa_generate_password()
self.admin_password = admin_password
self.dm_password = admin_password
self.__setup_sub_dict()
self.backup_state("running", self.is_running())
try:
self.stop()
except Exception:
# It could have been not running
pass
def __common_post_setup(self):
self.step("creating anonymous principal", self.add_anonymous_principal)
self.step("starting the KDC", self.__start_instance)
self.step("configuring KDC to start on boot", self.__enable)
def create_instance(self, realm_name, host_name, domain_name, admin_password, master_password, setup_pkinit=False, pkcs12_info=None, subject_base=None):
self.master_password = master_password
self.pkcs12_info = pkcs12_info
self.subject_base = subject_base
self.config_pkinit = setup_pkinit
self.__common_setup(realm_name, host_name, domain_name, admin_password)
self.step("adding kerberos container to the directory", self.__add_krb_container)
self.step("configuring KDC", self.__configure_instance)
self.step("initialize kerberos container", self.__init_ipa_kdb)
self.step("adding default ACIs", self.__add_default_acis)
self.step("creating a keytab for the directory", self.__create_ds_keytab)
self.step("creating a keytab for the machine", self.__create_host_keytab)
self.step("adding the password extension to the directory", self.__add_pwd_extop_module)
self.__common_post_setup()
if KRB5_BUILD_VERSION >= parse_version('1.20'):
self.step("enable PAC ticket signature support",
self.pac_tkt_sign_support_enable)
self.start_creation()
self.kpasswd = KpasswdInstance()
self.kpasswd.create_instance('KPASSWD', self.fqdn, self.suffix,
realm=self.realm)
def create_replica(self, realm_name,
master_fqdn, host_name,
domain_name, admin_password,
setup_pkinit=False, pkcs12_info=None,
subject_base=None):
self.pkcs12_info = pkcs12_info
self.subject_base = subject_base
self.master_fqdn = master_fqdn
self.config_pkinit = setup_pkinit
self.__common_setup(realm_name, host_name, domain_name, admin_password)
self.step("configuring KDC", self.__configure_instance)
self.step("adding the password extension to the directory", self.__add_pwd_extop_module)
self.__common_post_setup()
if KRB5_BUILD_VERSION >= parse_version('1.20'):
self.step("enable PAC ticket signature support",
self.pac_tkt_sign_support_enable)
self.start_creation()
self.kpasswd = KpasswdInstance()
self.kpasswd.create_instance('KPASSWD', self.fqdn, self.suffix)
def __enable(self):
self.backup_state("enabled", self.is_enabled())
# We do not let the system start IPA components on its own,
# Instead we reply on the IPA init script to start only enabled
# components as found in our LDAP configuration tree
self.ldap_configure('KDC', self.fqdn, None, self.suffix)
def __start_instance(self):
try:
self.start()
except Exception:
logger.critical("krb5kdc service failed to start")
def __setup_sub_dict(self):
if os.path.exists(paths.COMMON_KRB5_CONF_DIR):
includes = 'includedir {}'.format(paths.COMMON_KRB5_CONF_DIR)
else:
includes = ''
fips_enabled = tasks.is_fips_enabled()
self.sub_dict = dict(FQDN=self.fqdn,
IP=self.ip,
PASSWORD=self.kdc_password,
SUFFIX=self.suffix,
DOMAIN=self.domain,
HOST=self.host,
SERVER_ID=ipaldap.realm_to_serverid(self.realm),
REALM=self.realm,
KRB5KDC_KADM5_ACL=paths.KRB5KDC_KADM5_ACL,
DICT_WORDS=paths.DICT_WORDS,
KRB5KDC_KADM5_KEYTAB=paths.KRB5KDC_KADM5_KEYTAB,
KDC_CERT=paths.KDC_CERT,
KDC_KEY=paths.KDC_KEY,
CACERT_PEM=paths.CACERT_PEM,
KDC_CA_BUNDLE_PEM=paths.KDC_CA_BUNDLE_PEM,
CA_BUNDLE_PEM=paths.CA_BUNDLE_PEM,
INCLUDES=includes,
FIPS='#' if fips_enabled else '')
if fips_enabled:
supported_enctypes = list(
filter(lambda e: not e.startswith('camellia'),
SUPPORTED_ENCTYPES))
else:
supported_enctypes = SUPPORTED_ENCTYPES
self.sub_dict['SUPPORTED_ENCTYPES'] = ' '.join(supported_enctypes)
self.sub_dict['MASTER_KEY_TYPE'] = MASTER_KEY_TYPE
# IPA server/KDC is not a subdomain of default domain
# Proper domain-realm mapping needs to be specified
domain = dns.name.from_text(self.domain)
fqdn = dns.name.from_text(self.fqdn)
if not fqdn.is_subdomain(domain):
logger.debug("IPA FQDN '%s' is not located in default domain '%s'",
fqdn, domain)
server_domain = fqdn.parent().to_unicode(omit_final_dot=True)
logger.debug("Domain '%s' needs additional mapping in krb5.conf",
server_domain)
dr_map = " .%(domain)s = %(realm)s\n %(domain)s = %(realm)s\n" \
% dict(domain=server_domain, realm=self.realm)
else:
dr_map = ""
self.sub_dict['OTHER_DOMAIN_REALM_MAPS'] = dr_map
# Configure KEYRING CCACHE if supported
if kernel_keyring.is_persistent_keyring_supported():
logger.debug("Enabling persistent keyring CCACHE")
self.sub_dict['OTHER_LIBDEFAULTS'] = \
" default_ccache_name = KEYRING:persistent:%{uid}\n"
else:
logger.debug("Persistent keyring CCACHE is not enabled")
self.sub_dict['OTHER_LIBDEFAULTS'] = ''
# Create kadm5.acl if it doesn't exist
if not os.path.exists(paths.KRB5KDC_KADM5_ACL):
open(paths.KRB5KDC_KADM5_ACL, 'a').close()
os.chmod(paths.KRB5KDC_KADM5_ACL, 0o600)
def __add_krb_container(self):
self._ldap_mod("kerberos.ldif", self.sub_dict)
def __add_default_acis(self):
self._ldap_mod("default-aci.ldif", self.sub_dict)
def __template_file(self, path, chmod=0o644, client_template=False):
if client_template:
sharedir = paths.USR_SHARE_IPA_CLIENT_DIR
else:
sharedir = paths.USR_SHARE_IPA_DIR
template = os.path.join(
sharedir, os.path.basename(path) + ".template")
conf = ipautil.template_file(template, self.sub_dict)
self.fstore.backup_file(path)
with open(path, 'w') as f:
if chmod is not None:
os.fchmod(f.fileno(), chmod)
f.write(conf)
def __init_ipa_kdb(self):
# kdb5_util may take a very long time when entropy is low
installutils.check_entropy()
#populate the directory with the realm structure
args = ["kdb5_util", "create", "-s",
"-r", self.realm,
"-x", "ipa-setup-override-restrictions"]
dialogue = (
# Enter KDC database master key:
self.master_password + '\n',
# Re-enter KDC database master key to verify:
self.master_password + '\n',
)
try:
ipautil.run(args, nolog=(self.master_password,), stdin=''.join(dialogue))
except ipautil.CalledProcessError as error:
logger.debug("kdb5_util failed with %s", error)
raise RuntimeError("Failed to initialize kerberos container")
def __configure_instance(self):
self.__template_file(paths.KRB5KDC_KDC_CONF, chmod=None)
self.__template_file(paths.KRB5_CONF)
self.__template_file(paths.KRB5_FREEIPA_SERVER)
self.__template_file(paths.KRB5_FREEIPA, client_template=True)
self.__template_file(paths.HTML_KRB5_INI)
self.__template_file(paths.KRB_CON)
self.__template_file(paths.HTML_KRBREALM_CON)
MIN_KRB5KDC_WITH_WORKERS = "1.9"
cpus = os.sysconf('SC_NPROCESSORS_ONLN')
workers = False
result = ipautil.run([paths.KLIST, '-V'],
raiseonerr=False, capture_output=True)
if result.returncode == 0:
verstr = result.output.split()[-1]
ver = tasks.parse_ipa_version(verstr)
min = tasks.parse_ipa_version(MIN_KRB5KDC_WITH_WORKERS)
if ver >= min:
workers = True
# Write down config file
# We write realm and also number of workers (for multi-CPU systems)
replacevars = {'KRB5REALM':self.realm}
appendvars = {}
if workers and cpus > 1:
appendvars = {'KRB5KDC_ARGS': "'-w %s'" % str(cpus)}
ipautil.backup_config_and_replace_variables(self.fstore, paths.SYSCONFIG_KRB5KDC_DIR,
replacevars=replacevars,
appendvars=appendvars)
tasks.restore_context(paths.SYSCONFIG_KRB5KDC_DIR)
#add the password extop module
def __add_pwd_extop_module(self):
self._ldap_mod("pwd-extop-conf.ldif", self.sub_dict)
def __create_ds_keytab(self):
ldap_principal = "ldap/" + self.fqdn + "@" + self.realm
installutils.kadmin_addprinc(ldap_principal)
self.move_service(ldap_principal)
self.fstore.backup_file(paths.DS_KEYTAB)
installutils.create_keytab(paths.DS_KEYTAB, ldap_principal)
constants.DS_USER.chown(paths.DS_KEYTAB)
def __create_host_keytab(self):
host_principal = "host/" + self.fqdn + "@" + self.realm
installutils.kadmin_addprinc(host_principal)
self.fstore.backup_file(paths.KRB5_KEYTAB)
installutils.create_keytab(paths.KRB5_KEYTAB, host_principal)
# Make sure access is strictly reserved to root only for now
os.chown(paths.KRB5_KEYTAB, 0, 0)
os.chmod(paths.KRB5_KEYTAB, 0o600)
self.move_service_to_host(host_principal)
def _wait_for_replica_kdc_entry(self):
master_dn = self.api.Object.server.get_dn(self.fqdn)
kdc_dn = DN(('cn', 'KDC'), master_dn)
ldap_uri = ipaldap.get_ldap_uri(self.master_fqdn)
with ipaldap.LDAPClient(
ldap_uri, cacert=paths.IPA_CA_CRT, start_tls=True
) as remote_ldap:
remote_ldap.gssapi_bind()
replication.wait_for_entry(
remote_ldap,
kdc_dn,
timeout=api.env.replication_wait_timeout
)
def _call_certmonger(self, certmonger_ca='IPA'):
subject = str(DN(('cn', self.fqdn), self.subject_base))
krbtgt = "krbtgt/" + self.realm + "@" + self.realm
certpath = (paths.KDC_CERT, paths.KDC_KEY)
prev_helper = None
try:
# on the first CA-ful master without '--no-pkinit', we issue the
# certificate by contacting Dogtag directly
ca_instances = find_providing_servers(
'CA', conn=self.api.Backend.ldap2, api=self.api)
use_dogtag_submit = all(
[self.master_fqdn is None,
self.pkcs12_info is None,
self.config_pkinit,
len(ca_instances) == 0])
if use_dogtag_submit:
ca_args = [
paths.CERTMONGER_DOGTAG_SUBMIT,
'--ee-url', 'https://%s:8443/ca/ee/ca' % self.fqdn,
'--certfile', paths.RA_AGENT_PEM,
'--keyfile', paths.RA_AGENT_KEY,
'--cafile', paths.IPA_CA_CRT,
'--agent-submit'
]
helper = " ".join(ca_args)
prev_helper = certmonger.modify_ca_helper(
certmonger_ca, helper
)
certmonger.request_and_wait_for_cert(
certpath=certpath,
subject=subject,
principal=krbtgt,
ca=certmonger_ca,
dns=[self.fqdn],
storage='FILE',
profile=KDC_PROFILE,
post_command='renew_kdc_cert',
perms=(0o644, 0o600),
resubmit_timeout=api.env.certmonger_wait_timeout
)
except dbus.DBusException as e:
# if the certificate is already tracked, ignore the error
name = e.get_dbus_name()
if name != 'org.fedorahosted.certmonger.duplicate':
logger.error("Failed to initiate the request: %s", e)
return
finally:
if prev_helper is not None:
certmonger.modify_ca_helper(certmonger_ca, prev_helper)
def pkinit_enable(self):
"""
advertise enabled PKINIT feature in master's KDC entry in LDAP
"""
service.set_service_entry_config(
'KDC', self.fqdn, [PKINIT_ENABLED], self.suffix)
def pkinit_disable(self):
"""
unadvertise enabled PKINIT feature in master's KDC entry in LDAP
"""
ldap = api.Backend.ldap2
dn = DN(('cn', 'KDC'), ('cn', self.fqdn), api.env.container_masters,
self.suffix)
entry = ldap.get_entry(dn, ['ipaConfigString'])
config = entry.setdefault('ipaConfigString', [])
config = [value for value in config
if value.lower() != PKINIT_ENABLED.lower()]
entry['ipaConfigString'][:] = config
try:
ldap.update_entry(entry)
except errors.EmptyModlist:
pass
def _install_pkinit_ca_bundle(self):
ca_certs = certstore.get_ca_certs(self.api.Backend.ldap2,
self.api.env.basedn,
self.api.env.realm,
False)
ca_certs = [c for c, _n, t, _u in ca_certs if t is not False]
x509.write_certificate_list(ca_certs, paths.CACERT_PEM, mode=0o644)
def issue_selfsigned_pkinit_certs(self):
self._call_certmonger(certmonger_ca="SelfSign")
with open(paths.CACERT_PEM, 'w'):
pass
def issue_ipa_ca_signed_pkinit_certs(self):
try:
self._call_certmonger()
self._install_pkinit_ca_bundle()
self.pkinit_enable()
except RuntimeError as e:
logger.warning("PKINIT certificate request failed: %s", e)
logger.warning("Failed to configure PKINIT")
self.print_msg("Full PKINIT configuration did not succeed")
self.print_msg(
"The setup will only install bits "
"essential to the server functionality")
self.print_msg(
"You can enable PKINIT after the "
"setup completed using 'ipa-pkinit-manage'")
self.stop_tracking_certs()
self.issue_selfsigned_pkinit_certs()
def install_external_pkinit_certs(self):
certs.install_pem_from_p12(self.pkcs12_info[0],
self.pkcs12_info[1],
paths.KDC_CERT)
# The KDC cert needs to be readable by everyone
os.chmod(paths.KDC_CERT, 0o644)
certs.install_key_from_p12(self.pkcs12_info[0],
self.pkcs12_info[1],
paths.KDC_KEY)
self._install_pkinit_ca_bundle()
self.pkinit_enable()
def setup_pkinit(self):
if self.pkcs12_info:
self.install_external_pkinit_certs()
elif self.config_pkinit:
self.issue_ipa_ca_signed_pkinit_certs()
def enable_ssl(self):
"""
generate PKINIT certificate for KDC. If `--no-pkinit` was specified,
only configure local self-signed KDC certificate for use as a FAST
channel generator for WebUI. Do not advertise the installation steps in
this case.
"""
if self.master_fqdn is not None:
self._wait_for_replica_kdc_entry()
if self.config_pkinit:
self.steps = []
self.step("installing X509 Certificate for PKINIT",
self.setup_pkinit)
self.start_creation()
else:
self.issue_selfsigned_pkinit_certs()
try:
self.restart()
except Exception:
logger.critical("krb5kdc service failed to restart")
raise
def get_anonymous_principal_name(self):
return "%s@%s" % (ANON_USER, self.realm)
def add_anonymous_principal(self):
# Create the special anonymous principal
princ_realm = self.get_anonymous_principal_name()
dn = DN(('krbprincipalname', princ_realm), self.get_realm_suffix())
try:
self.api.Backend.ldap2.get_entry(dn)
except errors.NotFound:
installutils.kadmin_addprinc(princ_realm)
self._ldap_mod("anon-princ-aci.ldif", self.sub_dict)
try:
self.api.Backend.ldap2.set_entry_active(dn, True)
except errors.AlreadyActive:
pass
def stop_tracking_certs(self):
certmonger.stop_tracking(certfile=paths.KDC_CERT)
def delete_pkinit_cert(self):
ipautil.remove_file(paths.KDC_CERT)
ipautil.remove_file(paths.KDC_KEY)
def uninstall(self):
if self.is_configured():
self.print_msg("Unconfiguring %s" % self.service_name)
running = self.restore_state("running")
enabled = self.restore_state("enabled")
try:
self.stop()
except Exception:
pass
for f in [paths.KRB5KDC_KDC_CONF, paths.KRB5_CONF]:
try:
self.fstore.restore_file(f)
except ValueError as error:
logger.debug("%s", error)
# disabled by default, by ldap_configure()
if enabled:
self.enable()
# stop tracking and remove certificates
self.stop_tracking_certs()
ipautil.remove_file(paths.CACERT_PEM)
self.delete_pkinit_cert()
if running:
self.restart()
self.kpasswd = KpasswdInstance()
self.kpasswd.uninstall()
ipautil.remove_file(paths.KRB5_KEYTAB)
ipautil.remove_file(paths.KRB5_FREEIPA)
ipautil.remove_file(paths.KRB5_FREEIPA_SERVER)
| 25,787
|
Python
|
.py
| 565
| 34.269027
| 156
| 0.60168
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,783
|
ipa_kra_install.py
|
freeipa_freeipa/ipaserver/install/ipa_kra_install.py
|
# Authors: Ade Lee <alee@redhat.com>
#
# Copyright (C) 2014 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function, absolute_import
import logging
import sys
import tempfile
from optparse import SUPPRESS_HELP # pylint: disable=deprecated-module
from textwrap import dedent
from ipalib import api
from ipalib.constants import DOMAIN_LEVEL_1
from ipaplatform.paths import paths
from ipapython import admintool
from ipaserver.install import service
from ipaserver.install import cainstance
from ipaserver.install import custodiainstance
from ipaserver.install import krainstance
from ipaserver.install import dsinstance
from ipaserver.install import installutils
from ipaserver.install import dogtaginstance
from ipaserver.install import kra
from ipaserver.install.installutils import ReplicaConfig
from ipaserver.masters import find_providing_server
logger = logging.getLogger(__name__)
class KRAInstall(admintool.AdminTool):
command_name = 'ipa-kra-install'
usage = "%prog [options]"
description = "Install a master or replica KRA."
@classmethod
def add_options(cls, parser, debug_option=True):
super(KRAInstall, cls).add_options(parser, debug_option=True)
parser.add_option(
"--no-host-dns", dest="no_host_dns", action="store_true",
default=False,
help="Do not use DNS for hostname lookup during installation")
parser.add_option(
"-p", "--password",
dest="password", sensitive=True,
help="Directory Manager (existing master) password")
parser.add_option(
"-U", "--unattended",
dest="unattended", action="store_true", default=False,
help="unattended installation never prompts the user")
parser.add_option(
"--uninstall",
dest="uninstall", action="store_true", default=False,
help=SUPPRESS_HELP)
parser.add_option(
"--pki-config-override", dest="pki_config_override",
default=None,
help="Path to ini file with config overrides.")
parser.add_option(
"--token-password", dest="token_password",
default=None,
sensitive=True,
help=(
"The password to the PKCS#11 token."))
parser.add_option(
"--token-password-file", dest="token_password_file",
default=None,
help=(
"The full path containing the PKCS#11 token "
"password."))
def validate_options(self, needs_root=True):
super(KRAInstall, self).validate_options(needs_root=True)
installutils.check_server_configuration()
api.bootstrap(in_server=True, confdir=paths.ETC_IPA)
api.finalize()
@classmethod
def get_command_class(cls, options, args):
if options.uninstall:
sys.exit(
'ERROR: Standalone KRA uninstallation was removed in '
'IPA 4.5 as it had never worked properly and only caused '
'issues.')
else:
return KRAInstaller
class KRAInstaller(KRAInstall):
log_file_name = paths.IPASERVER_KRA_INSTALL_LOG
INSTALLER_START_MESSAGE = '''
===================================================================
This program will setup Dogtag KRA for the IPA Server.
'''
FAIL_MESSAGE = '''
Your system may be partly configured.
If you run into issues, you may have to re-install IPA on this server.
'''
def validate_options(self, needs_root=True):
super(KRAInstaller, self).validate_options(needs_root=True)
if self.options.unattended and self.options.password is None:
self.option_parser.error(
"Directory Manager password must be specified using -p"
" in unattended mode"
)
if len(self.args) > 0:
self.option_parser.error("Too many arguments provided")
def ask_for_options(self):
super(KRAInstaller, self).ask_for_options()
if not self.options.unattended and self.options.password is None:
self.options.password = installutils.read_password(
"Directory Manager", confirm=False,
validate=False, retry=False)
if self.options.password is None:
raise admintool.ScriptError(
"Directory Manager password required")
cai = cainstance.CAInstance()
if (
not self.options.unattended
and cai.hsm_enabled
and self.options.token_password is None
and self.options.token_password_file is None
):
self.options.token_password = installutils.read_password(
f"HSM token '{cai.token_name}'", confirm=False,
validate=False, retry=False)
def run(self):
super(KRAInstaller, self).run()
# Verify DM password. This has to be called after ask_for_options(),
# so it can't be placed in validate_options().
try:
installutils.validate_dm_password_ldap(self.options.password)
except ValueError:
raise admintool.ScriptError(
"Directory Manager password is invalid")
if not cainstance.is_ca_installed_locally():
raise RuntimeError("Dogtag CA is not installed. "
"Please install a CA first with the "
"`ipa-ca-install` command.")
# check if KRA is not already installed
_kra = krainstance.KRAInstance(api)
if _kra.is_installed():
raise admintool.ScriptError("KRA already installed")
# this check can be done only when CA is installed
self.installing_replica = dogtaginstance.is_installing_replica("KRA")
if self.installing_replica:
domain_level = dsinstance.get_domain_level(api)
if domain_level < DOMAIN_LEVEL_1:
raise RuntimeError(
"Unsupported domain level %d." % domain_level)
if self.args:
raise RuntimeError("Too many parameters provided.")
self.options.dm_password = self.options.password
self.options.setup_ca = False
self.options.setup_kra = True
api.Backend.ldap2.connect()
if self.installing_replica:
config = ReplicaConfig()
config.kra_host_name = None
config.realm_name = api.env.realm
config.host_name = api.env.host
config.domain_name = api.env.domain
config.dirman_password = self.options.password
config.ca_ds_port = 389
config.top_dir = tempfile.mkdtemp("ipa")
config.dir = config.top_dir
config.setup_kra = True
if config.subject_base is None:
attrs = api.Backend.ldap2.get_ipa_config()
config.subject_base = attrs.get('ipacertificatesubjectbase')[0]
if config.kra_host_name is None:
config.kra_host_name = find_providing_server(
'KRA', api.Backend.ldap2, [api.env.ca_host]
)
if config.kra_host_name is None:
# all CA/KRA servers are down or unreachable.
raise admintool.ScriptError(
"Failed to find an active KRA server!"
)
custodia = custodiainstance.get_custodia_instance(
config, custodiainstance.CustodiaModes.KRA_PEER)
else:
config = None
custodia = None
try:
kra.install_check(api, config, self.options)
except RuntimeError as e:
raise admintool.ScriptError(str(e))
print(dedent(self.INSTALLER_START_MESSAGE))
try:
kra.install(api, config, self.options, custodia=custodia)
except BaseException:
logger.error('%s', dedent(self.FAIL_MESSAGE))
raise
# pki-spawn restarts 389-DS, reconnect
api.Backend.ldap2.close()
api.Backend.ldap2.connect()
# Enable configured services and update DNS SRV records
service.sync_services_state(api.env.host)
api.Command.dns_update_system_records()
api.Backend.ldap2.disconnect()
| 9,069
|
Python
|
.py
| 205
| 34.24878
| 79
| 0.631991
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,784
|
cainstance.py
|
freeipa_freeipa/ipaserver/install/cainstance.py
|
# Authors: Rob Crittenden <rcritten@redhat.com>
# Ade Lee <alee@redhat.com>
# Andrew Wnuk <awnuk@redhat.com>
#
# Copyright (C) 2009 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function, absolute_import
import base64
import binascii
import enum
import logging
import dbus
import os
import re
import shutil
import sys
import syslog
import time
import tempfile
from configparser import RawConfigParser
from pkg_resources import parse_version
from ipalib import api
from ipalib import x509
from ipalib import errors
import ipalib.constants
from ipalib.install import certmonger
from ipaplatform import services
from ipaplatform.paths import paths
from ipaplatform.tasks import tasks
from ipapython import directivesetter
from ipapython import dogtag
from ipapython import ipautil
from ipapython.certdb import get_ca_nickname
from ipapython.dn import DN, RDN
from ipapython.ipa_log_manager import standard_logging_setup
from ipaserver.secrets.kem import IPAKEMKeys
from ipaserver.install import certs
from ipaserver.install import dsinstance
from ipaserver.install import installutils
from ipaserver.install import replication
from ipaserver.install import sysupgrade
from ipaserver.install.dogtaginstance import DogtagInstance, INTERNAL_TOKEN
from ipaserver.masters import ENABLED_SERVICE
logger = logging.getLogger(__name__)
ADMIN_GROUPS = [
'Enterprise CA Administrators',
'Enterprise KRA Administrators',
'Security Domain Administrators'
]
ACME_AGENT_GROUP = 'Enterprise ACME Administrators'
PROFILES_DN = DN(('ou', 'certificateProfiles'), ('ou', 'ca'), ('o', 'ipaca'))
ACME_CONFIG_FILES = (
('pki-acme-configsources.conf.template',
paths.PKI_ACME_CONFIGSOURCES_CONF),
('pki-acme-database.conf.template', paths.PKI_ACME_DATABASE_CONF),
('pki-acme-engine.conf.template', paths.PKI_ACME_ENGINE_CONF),
('pki-acme-issuer.conf.template', paths.PKI_ACME_ISSUER_CONF),
('pki-acme-realm.conf.template', paths.PKI_ACME_REALM_CONF),
)
def check_ports():
"""Check that dogtag ports (8080, 8443) are available.
Returns True when ports are free, False if they are taken.
"""
return all([ipautil.check_port_bindable(8443),
ipautil.check_port_bindable(8080)])
def get_preop_pin(instance_root, instance_name):
# Only used for Dogtag 9
preop_pin = None
filename = instance_root + "/" + instance_name + "/conf/CS.cfg"
# read the config file and get the preop pin
try:
f = open(filename)
except IOError as e:
logger.error("Cannot open configuration file.%s", str(e))
raise e
data = f.read()
data = data.split('\n')
pattern = re.compile("preop.pin=(.*)")
for line in data:
match = re.search(pattern, line)
if match:
preop_pin = match.group(1)
break
if preop_pin is None:
raise RuntimeError(
"Unable to find preop.pin in %s. Is your CA already configured?" %
filename)
return preop_pin
def import_pkcs12(input_file, input_passwd, cert_database,
cert_passwd):
ipautil.run([paths.PK12UTIL, "-d", cert_database,
"-i", input_file,
"-k", cert_passwd,
"-w", input_passwd])
def get_value(s):
"""
Parse out a name/value pair from a Javascript variable.
"""
try:
expr = s.split('=', 1)
value = expr[1]
value = value.replace('\"', '')
value = value.replace(';', '')
value = value.replace('\\n', '\n')
value = value.replace('\\r', '\r')
return value
except IndexError:
return None
def find_substring(data, value):
"""
Scan through a list looking for a string that starts with value.
"""
for d in data:
if d.startswith(value):
return get_value(d)
return None
def get_defList(data):
"""
Return a dictionary of defList name/value pairs.
A certificate signing request is specified as a series of these.
"""
varname = None
value = None
skip = False
defdict = {}
for d in data:
if d.startswith("defList = new Object"):
varname = None
value = None
skip = False
if d.startswith("defList.defId"):
varname = get_value(d)
if d.startswith("defList.defVal"):
value = get_value(d)
if skip:
varname = None
value = None
skip = False
if d.startswith("defList.defConstraint"):
ctype = get_value(d)
if ctype == "readonly":
skip = True
if varname and value:
defdict[varname] = value
varname = None
value = None
return defdict
def get_outputList(data):
"""
Return a dictionary of outputList name/value pairs.
The output from issuing a certificate is a series of these.
"""
varname = None
value = None
outputdict = {}
for d in data:
if d.startswith("outputList = new"):
varname = None
value = None
if d.startswith("outputList.outputId"):
varname = get_value(d)
if d.startswith("outputList.outputVal"):
value = get_value(d)
if varname and value:
outputdict[varname] = value
varname = None
value = None
return outputdict
def get_crl_files(path=None):
"""
Traverse dogtag's CRL files in default CRL publish directory or in chosen
target directory.
@param path Custom target directory
"""
if path is None:
path = paths.PKI_CA_PUBLISH_DIR
if not os.path.exists(path):
return
files = os.listdir(path)
for f in files:
if f == "MasterCRL.bin":
yield os.path.join(path, f)
elif f.endswith(".der"):
yield os.path.join(path, f)
def is_step_one_done():
"""Read CS.cfg and determine if step one of an external CA install is done
"""
path = paths.CA_CS_CFG_PATH
if not os.path.exists(path):
return False
test = directivesetter.get_directive(path, 'preop.ca.type', '=')
if test == "otherca":
return True
return False
def is_ca_installed_locally():
"""Check if CA is installed locally by checking for existence of CS.cfg
:return:True/False
"""
return os.path.exists(paths.CA_CS_CFG_PATH)
class InconsistentCRLGenConfigException(Exception):
pass
class CAInstance(DogtagInstance):
"""
When using a dogtag CA the DS database contains just the
server cert for DS. The mod_nss database will contain the RA agent
cert that will be used to do authenticated requests against dogtag.
This is done because we use python-nss and will inherit the opened
NSS database in mod_python. In nsslib.py we do an nssinit but this will
return success if the database is already initialized. It doesn't care
if the database is different or not.
external is a state machine:
0 = not an externally signed CA
1 = generating CSR to be signed
2 = have signed cert, continue installation
"""
server_cert_name = 'Server-Cert cert-pki-ca'
# Mapping of nicknames for tracking requests, and the profile to
# use for that certificate. 'configure_renewal()' reads this
# dict. The profile MUST be specified.
tracking_reqs = {
'auditSigningCert cert-pki-ca': 'caSignedLogCert',
'ocspSigningCert cert-pki-ca': 'caOCSPCert',
'subsystemCert cert-pki-ca': 'caSubsystemCert',
'caSigningCert cert-pki-ca': 'caCACert',
server_cert_name: 'caServerCert',
}
token_names = {
# Server-Cert always on internal token
server_cert_name: INTERNAL_TOKEN,
}
# The following must be aligned with the RewriteRule defined in
# install/share/ipa-pki-proxy.conf.template
crl_rewrite_pattern = r"^\s*(RewriteRule\s+\^/ipa/crl/MasterCRL.bin\s.*)$"
crl_rewrite_comment = r"^#\s*RewriteRule\s+\^/ipa/crl/MasterCRL.bin\s.*$"
crl_rewriterule = "\nRewriteRule ^/ipa/crl/MasterCRL.bin " \
"http://{}/ca/ee/ca/getCRL?" \
"op=getCRL&crlIssuingPoint=MasterCRL " \
"[L,R=301,NC]"
def __init__(self, realm=None, host_name=None, custodia=None):
super(CAInstance, self).__init__(
realm=realm,
subsystem="CA",
service_desc="certificate server",
host_name=host_name,
service_prefix=ipalib.constants.PKI_GSSAPI_SERVICE_NAME,
config=paths.CA_CS_CFG_PATH,
)
# for external CAs
self.external = 0
self.csr_file = None
self.cert_file = None
self.cert_chain_file = None
self.basedn = DN(('o', 'ipaca'))
if realm is not None:
self.canickname = get_ca_nickname(realm)
else:
self.canickname = None
self.ra_cert = None
self.requestId = None
self.no_db_setup = False
self.keytab = os.path.join(
paths.PKI_TOMCAT, self.service_prefix + '.keytab')
# Custodia instance for RA key retrieval
self._custodia = custodia
def configure_instance(self, host_name, dm_password, admin_password,
pkcs12_info=None, master_host=None, csr_file=None,
cert_file=None, cert_chain_file=None,
master_replication_port=389,
subject_base=None, ca_subject=None,
ca_signing_algorithm=None,
ca_type=None, external_ca_profile=None,
ra_p12=None, ra_only=False,
promote=False, use_ldaps=False,
pki_config_override=None,
random_serial_numbers=False,
token_name=None, token_library_path=None,
token_password=None):
"""Create a CA instance.
To create a clone, pass in pkcs12_info.
Creating a CA with an external signer is a 2-step process. In
step 1 we generate a CSR. In step 2 we are given the cert and
chain and actually proceed to create the CA. For step 1 set
csr_file. For step 2 set cert_file and cert_chain_file.
"""
self.fqdn = host_name
self.dm_password = dm_password
self.admin_user = "admin"
self.admin_groups = ADMIN_GROUPS
# NOTE: "admin_password" refers to the password for PKI
# "admin" account. This is not necessarily the same as
# the IPA admin password. Indeed, ca.configure_instance
# gets called with admin_password=dm_password.
#
self.admin_password = admin_password
self.pkcs12_info = pkcs12_info
if self.pkcs12_info is not None:
self.clone = True
self.master_host = master_host
self.master_replication_port = master_replication_port
self.ra_p12 = ra_p12
self.subject_base = \
subject_base or installutils.default_subject_base(self.realm)
self.ca_subject = \
ca_subject or installutils.default_ca_subject_dn(self.subject_base)
self.ca_signing_algorithm = ca_signing_algorithm
if ca_type is not None:
self.ca_type = ca_type
else:
self.ca_type = x509.ExternalCAType.GENERIC.value
self.external_ca_profile = external_ca_profile
self.random_serial_numbers = random_serial_numbers
self.no_db_setup = promote
self.use_ldaps = use_ldaps
self.pki_config_override = pki_config_override
self.tokenname = token_name
self.token_library_path = token_library_path
self.token_password = token_password
# Determine if we are installing as an externally-signed CA and
# what stage we're in.
if csr_file is not None:
self.csr_file = csr_file
self.external = 1
elif cert_file is not None:
self.cert_file = cert_file
self.cert_chain_file = cert_chain_file
self.external = 2
if self.clone:
has_ra_cert = os.path.exists(paths.RA_AGENT_PEM)
else:
has_ra_cert = False
if not ra_only:
if promote:
# Setup Database
self.step("creating certificate server db", self.__create_ds_db)
self.step("ignore time skew for initial replication",
self.replica_ignore_initial_time_skew)
self.step("setting up initial replication", self.__setup_replication)
self.step("revert time skew after initial replication",
self.replica_revert_time_skew)
self.step("creating ACIs for admin", self.add_ipaca_aci)
self.step("creating installation admin user", self.setup_admin)
self.step("configuring certificate server instance",
self.__spawn_instance)
# Config file and ACL modifications require either restart or
# offline update of Dogtag.
self.step("stopping certificate server instance to update CS.cfg",
self.stop_instance)
self.step("backing up CS.cfg", self.safe_backup_config)
self.step("Add ipa-pki-wait-running", self.add_ipa_wait)
self.step("secure AJP connector", self.secure_ajp_connector)
self.step("reindex attributes", self.reindex_task)
self.step("exporting Dogtag certificate store pin",
self.create_certstore_passwdfile)
self.step("disabling nonces", self.__disable_nonce)
self.step("set up CRL publishing", self.__enable_crl_publish)
self.step("enable PKIX certificate path discovery and validation",
self.enable_pkix)
self.step("authorizing RA to modify profiles",
configure_profiles_acl)
self.step("authorizing RA to manage lightweight CAs",
configure_lightweight_ca_acls)
self.step("Ensure lightweight CAs container exists",
ensure_lightweight_cas_container)
self.step("Enable lightweight CA monitor",
enable_lightweight_ca_monitor)
self.step(
"Ensuring backward compatibility",
self.__dogtag10_migration)
if promote:
self.step("destroying installation admin user",
self.teardown_admin)
# Materialize config changes and new ACLs
self.step("starting certificate server instance",
self.start_instance)
if promote:
self.step("Finalize replication settings",
self.finalize_replica_config)
# Step 1 of external is getting a CSR so we don't need to do these
# steps until we get a cert back from the external CA.
if self.external != 1:
if not has_ra_cert:
self.step("configure certmonger for renewals",
self.configure_certmonger_renewal_helpers)
if not self.clone:
self.step("requesting RA certificate from CA", self.__request_ra_certificate)
elif promote:
self.step("Importing RA key", self.__import_ra_key)
else:
self.step("importing RA certificate from PKCS #12 file",
self.__import_ra_cert)
if not ra_only:
if not self.clone:
self.step("publishing the CA certificate",
self.__export_ca_chain)
self.step("adding RA agent as a trusted user", self.__create_ca_agent)
self.step("configure certificate renewals", self.configure_renewal)
self.step("Configure HTTP to proxy connections",
self.http_proxy)
self.step("updating IPA configuration", update_ipa_conf)
self.step("enabling CA instance", self.__enable_instance)
if not promote:
if self.clone:
# DL0 workaround; see docstring of __expose_ca_in_ldap
self.step("exposing CA instance on LDAP",
self.__expose_ca_in_ldap)
self.step("importing IPA certificate profiles",
import_included_profiles)
self.step("migrating certificate profiles to LDAP",
migrate_profiles_to_ldap)
self.step("adding default CA ACL", ensure_default_caacl)
self.step("adding 'ipa' CA entry", ensure_ipa_authority_entry)
if not self.clone:
self.step("Recording random serial number state",
self.__store_random_serial_number_state)
self.step("Recording HSM configuration state",
self.__store_hsm_configuration_state)
else:
# Re-import profiles in the promote case to pick up any
# that will only be triggered by an upgrade.
self.step("importing IPA certificate profiles",
import_included_profiles)
self.step("configuring certmonger renewal for lightweight CAs",
self.add_lightweight_ca_tracking_requests)
if minimum_acme_support():
self.step("deploying ACME service", self.setup_acme)
if ra_only:
runtime = None
else:
runtime = 180
try:
self.start_creation(runtime=runtime)
finally:
if self.external == 1:
# Don't remove client DB in external CA step 1
# https://pagure.io/freeipa/issue/7742
logger.debug("Keep pkispawn files for step 2")
else:
self.clean_pkispawn_files()
def __spawn_instance(self):
"""
Create and configure a new CA instance using pkispawn.
Creates the config file with IPA specific parameters
and passes it to the base class to call pkispawn
"""
cfg = dict(
pki_ds_secure_connection=self.use_ldaps
)
if self.tokenname:
module_name = os.path.basename(
self.token_library_path
).split('.', 1)[0]
cfg['pki_hsm_enable'] = True
cfg['pki_hsm_modulename'] = module_name
cfg['pki_hsm_libfile'] = self.token_library_path
cfg['pki_token_name'] = self.tokenname
cfg['pki_token_password'] = self.token_password
cfg['pki_sslserver_token'] = 'internal'
if self.ca_signing_algorithm is not None:
cfg['ipa_ca_signing_algorithm'] = self.ca_signing_algorithm
cfg['pki_random_serial_numbers_enable'] = self.random_serial_numbers
if self.random_serial_numbers:
cfg['pki_request_id_generator'] = 'random'
cfg['pki_cert_id_generator'] = 'random'
else:
cfg['pki_request_id_generator'] = 'legacy'
cfg['pki_cert_id_generator'] = 'legacy'
if not (os.path.isdir(paths.PKI_TOMCAT_ALIAS_DIR) and
os.path.isfile(paths.PKI_TOMCAT_PASSWORD_CONF)):
# generate pin which we know can be used for FIPS NSS database
pki_pin = ipautil.ipa_generate_password()
cfg['pki_server_database_password'] = pki_pin
else:
pki_pin = None
# When spawning a CA instance, always point to IPA_CA_CRT if it
# exists. Later, when we're performing step 2 of an external CA
# installation, we'll overwrite this key to point to the real
# external CA.
if os.path.exists(paths.IPA_CA_CRT):
cfg['pki_cert_chain_path'] = paths.IPA_CA_CRT
# Use IP address instead of default localhost4 and localhost6
# because /etc/hosts does not always define them
cfg['pki_ajp_host_ipv4'] = "127.0.0.1"
cfg['pki_ajp_host_ipv6'] = "::1"
if self.clone:
if self.no_db_setup:
cfg.update(
pki_ds_create_new_db=False,
pki_clone_setup_replication=False,
pki_clone_reindex_data=True,
)
cafile = self.pkcs12_info[0]
# if paths.TMP_CA_P12 exists and is not owned by root,
# shutil.copy will fail if when fs.protected_regular=1
# so remove the file first
if cafile:
ipautil.remove_file(paths.TMP_CA_P12)
shutil.copy(cafile, paths.TMP_CA_P12)
self.service_user.chown(paths.TMP_CA_P12)
clone_pkcs12_path = paths.TMP_CA_P12
else:
clone_pkcs12_path = None
if self.random_serial_numbers:
cfg.update(
pki_random_serial_numbers_enable=True,
pki_request_id_generator="random",
pki_cert_id_generator="random",
)
else:
cfg.update(
pki_random_serial_numbers_enable=False,
pki_request_id_generator="legacy",
pki_cert_id_generator="legacy",
)
self._configure_clone(
cfg,
security_domain_hostname=self.master_host,
clone_pkcs12_path=clone_pkcs12_path,
)
# External CA
if self.external == 1:
cfg.update(
pki_external=True,
pki_ca_signing_csr_path=self.csr_file,
)
if self.ca_type == x509.ExternalCAType.MS_CS.value:
# Include MS template name extension in the CSR
template = self.external_ca_profile
if template is None:
# default template name
template = x509.MSCSTemplateV1(u"SubCA")
ext_data = binascii.hexlify(template.get_ext_data())
cfg.update(
pki_req_ext_add=True,
pki_req_ext_oid=template.ext_oid,
pki_req_ext_critical=False,
pki_req_ext_data=ext_data.decode('ascii'),
)
elif self.external == 2:
cert_file = tempfile.NamedTemporaryFile()
with open(self.cert_file, 'rb') as f:
ext_cert = x509.load_unknown_x509_certificate(f.read())
cert_file.write(ext_cert.public_bytes(x509.Encoding.PEM))
ipautil.flush_sync(cert_file)
self.service_user.chown(cert_file.name)
result = ipautil.run(
[paths.OPENSSL, 'crl2pkcs7',
'-certfile', self.cert_chain_file,
'-nocrl'],
capture_output=True)
cert_chain = result.output
# Dogtag chokes on the header and footer, remove them
# https://bugzilla.redhat.com/show_bug.cgi?id=1127838
cert_chain = re.search(
r'(?<=-----BEGIN PKCS7-----).*?(?=-----END PKCS7-----)',
cert_chain, re.DOTALL).group(0)
cert_chain_file = ipautil.write_tmp_file(cert_chain)
cfg.update(
pki_external=True,
pki_ca_signing_cert_path=cert_file.name,
pki_cert_chain_path=cert_chain_file.name,
pki_external_step_two=True,
)
nolog_list = [self.dm_password, self.admin_password, pki_pin]
if self.token_password:
nolog_list.append(self.token_password)
config = self._create_spawn_config(cfg)
self.set_hsm_state(config)
with tempfile.NamedTemporaryFile('w') as f:
config.write(f)
f.flush()
self.service_user.chown(f.fileno())
self.backup_state('installed', True)
DogtagInstance.spawn_instance(
self, f.name,
nolog_list=nolog_list
)
if self.external == 1:
print("The next step is to get %s signed by your CA and re-run %s as:" % (self.csr_file, sys.argv[0]))
print("%s --external-cert-file=/path/to/signed_certificate --external-cert-file=/path/to/external_ca_certificate" % sys.argv[0])
sys.exit(0)
else:
if config.getboolean(
self.subsystem, 'pki_backup_keys', fallback=True
):
shutil.move(
paths.CA_BACKUP_KEYS_P12, paths.CACERT_P12
)
logger.debug("completed creating ca instance")
def add_ipa_wait(self):
"""Add ipa-pki-wait-running to pki-tomcatd service
"""
conf = paths.SYSTEMD_PKI_TOMCAT_IPA_CONF
directory = os.path.dirname(conf)
if not os.path.isdir(directory):
os.mkdir(directory)
with open(conf, 'w') as f:
os.fchmod(f.fileno(), 0o644)
f.write('[Service]\n')
f.write('Environment=LC_ALL=C.UTF-8\n')
f.write('ExecStartPost={}\n'.format(paths.IPA_PKI_WAIT_RUNNING))
tasks.systemd_daemon_reload()
def safe_backup_config(self):
"""
Safely handle exceptions if backup_config fails
The parent class raises an exception if the configuration
cannot be backed up. Catch that and log the message but
don't stop the current installer.
"""
try:
super(CAInstance, self).backup_config()
except Exception as e:
logger.warning("Failed to backup CS.cfg: %s", e)
def create_certstore_passwdfile(self):
"""
This method creates a 'pwdfile.txt' file in the Dogtag certificate
store so that this file can be assumed and used for NSSDatabase/CertDB
operations in 'certutil' calls.
"""
passwd = None
token = INTERNAL_TOKEN
with open(paths.PKI_TOMCAT_PASSWORD_CONF, 'r') as f:
for line in f:
(tok, pin) = line.split('=', 1)
if token == tok:
passwd = pin.strip()
break
else:
raise RuntimeError(
"The password to the 'internal' token of the Dogtag "
"certificate store was not found.")
db = certs.CertDB(self.realm, nssdir=paths.PKI_TOMCAT_ALIAS_DIR)
db.create_passwd_file(passwd)
def __update_topology(self):
self._ldap_update(
[paths.CA_TOPOLOGY_ULDIF],
basedir=None,
)
def __disable_nonce(self):
# Turn off Nonces
update_result = installutils.update_file(
self.config, 'ca.enableNonces=true',
'ca.enableNonces=false')
if update_result != 0:
raise RuntimeError("Disabling nonces failed")
self.service_user.chown(self.config)
def enable_pkix(self):
directivesetter.set_directive(paths.SYSCONFIG_PKI_TOMCAT,
'NSS_ENABLE_PKIX_VERIFY', '1',
quotes=False, separator='=')
def __import_ra_cert(self):
"""
Helper method for IPA domain level 0 replica install
"""
self.import_ra_cert(self.ra_p12, self.dm_password)
def import_ra_cert(self, rafile, password=''):
"""
Cloned RAs will use the same RA agent cert as the master so we
need to import from a PKCS#12 file.
Used when setting up replication
"""
with ipautil.write_tmp_file(password + '\n') as f:
pwdarg = 'file:{file}'.format(file=f.name)
# get the private key from the file
ipautil.run([paths.OPENSSL,
"pkcs12",
"-in", rafile,
"-nocerts", "-nodes",
"-out", paths.RA_AGENT_KEY,
"-passin", pwdarg])
# get the certificate from the pkcs12 file
ipautil.run([paths.OPENSSL,
"pkcs12",
"-in", rafile,
"-clcerts", "-nokeys",
"-out", paths.RA_AGENT_PEM,
"-passin", pwdarg])
self._set_ra_cert_perms()
self.configure_agent_renewal()
def __import_ra_key(self):
import_ra_key(self._custodia)
@staticmethod
def _set_ra_cert_perms():
"""
Sets the correct permissions for the RA_AGENT_PEM, RA_AGENT_KEY files
"""
group = ipalib.constants.IPAAPI_GROUP
for fname in (paths.RA_AGENT_PEM, paths.RA_AGENT_KEY):
group.chgrp(fname)
os.chmod(fname, 0o440)
tasks.restore_context(fname)
def __create_ca_agent(self):
"""
Create CA agent, assign a certificate, and add the user to
the appropriate groups for accessing CA services.
"""
conn = api.Backend.ldap2
# create ipara user with RA certificate
user_dn = DN(('uid', "ipara"), ('ou', 'People'), self.basedn)
entry = conn.make_entry(
user_dn,
objectClass=['top', 'person', 'organizationalPerson',
'inetOrgPerson', 'cmsuser'],
uid=["ipara"],
sn=["ipara"],
cn=["ipara"],
usertype=["agentType"],
userstate=["1"],
userCertificate=[self.ra_cert],
description=['2;%s;%s;%s' % (
self.ra_cert.serial_number,
DN(self.ca_subject),
DN(('CN', 'IPA RA'), self.subject_base))])
conn.add_entry(entry)
# add ipara user to Certificate Manager Agents group
group_dn = DN(('cn', 'Certificate Manager Agents'), ('ou', 'groups'),
self.basedn)
conn.add_entry_to_group(user_dn, group_dn, 'uniqueMember')
# add ipara user to Registration Manager Agents group
group_dn = DN(('cn', 'Registration Manager Agents'), ('ou', 'groups'),
self.basedn)
conn.add_entry_to_group(user_dn, group_dn, 'uniqueMember')
# add ipara user to Security Domain Administrators group
group_dn = DN(('cn', 'Security Domain Administrators'),
('ou', 'groups'), self.basedn)
conn.add_entry_to_group(user_dn, group_dn, 'uniqueMember')
def __get_ca_chain(self):
try:
return dogtag.get_ca_certchain(ca_host=self.fqdn)
except Exception as e:
raise RuntimeError("Unable to retrieve CA chain: %s" % str(e))
def __export_ca_chain(self):
"""
Get the CA chain from Dogtag NSS DB and write it to paths.IPA_CA_CRT
"""
# Getting Dogtag CA chain
chain = self.__get_ca_chain()
# Convert to DER because the chain comes back as one long string which
# makes openssl throw up.
data = base64.b64decode(chain)
# Get list of PEM certificates
certlist = x509.pkcs7_to_certs(data, x509.DER)
# We need to append the certs to the existing file, so start by
# reading the file
if os.path.isfile(paths.IPA_CA_CRT):
ca_certs = x509.load_certificate_list_from_file(paths.IPA_CA_CRT)
certlist.extend(ca_certs)
# We have all the certificates in certlist, write them to a PEM file
for path in [paths.IPA_CA_CRT,
paths.KDC_CA_BUNDLE_PEM,
paths.CA_BUNDLE_PEM]:
x509.write_certificate_list(certlist, path, mode=0o644)
def __request_ra_certificate(self):
"""
Request the IPA RA certificate from dogtag.
dogtag automatically generates an admin certificate that
in a usual deployment would be used in the UI to handle
administrative duties. IPA does not use this certificate
except as a bootstrap to generate the RA.
To do this it bends over backwards a bit by modifying the
way typical certificates are retrieved using certmonger by
forcing it to call dogtag-submit directly.
"""
# create a temp PEM file storing the CA chain
chain_file = tempfile.NamedTemporaryFile(
mode="w", dir=paths.VAR_LIB_IPA, delete=False)
chain_file.close()
chain = self.__get_ca_chain()
data = base64.b64decode(chain)
ipautil.run(
[paths.OPENSSL,
"pkcs7",
"-inform",
"DER",
"-print_certs",
"-out", chain_file.name,
], stdin=data, capture_output=False)
# CA agent cert in PEM form
agent_cert = tempfile.NamedTemporaryFile(
mode="w", dir=paths.VAR_LIB_IPA, delete=False)
agent_cert.close()
# CA agent key in PEM form
agent_key = tempfile.NamedTemporaryFile(
mode="w", dir=paths.VAR_LIB_IPA, delete=False)
agent_key.close()
certs.install_pem_from_p12(paths.DOGTAG_ADMIN_P12,
self.dm_password,
agent_cert.name)
certs.install_key_from_p12(paths.DOGTAG_ADMIN_P12,
self.dm_password,
agent_key.name)
agent_args = [paths.CERTMONGER_DOGTAG_SUBMIT,
"--cafile", chain_file.name,
"--ee-url", 'http://%s:8080/ca/ee/ca/' % self.fqdn,
"--agent-url",
'https://%s:8443/ca/agent/ca/' % self.fqdn,
"--certfile", agent_cert.name,
"--keyfile", agent_key.name, ]
helper = " ".join(agent_args)
# configure certmonger renew agent to use temporary agent cert
old_helper = certmonger.modify_ca_helper(
ipalib.constants.RENEWAL_CA_NAME, helper)
try:
# The certificate must be requested using caSubsystemCert profile
# because this profile does not require agent authentication
reqId = certmonger.request_and_wait_for_cert(
certpath=(paths.RA_AGENT_PEM, paths.RA_AGENT_KEY),
principal='host/%s' % self.fqdn,
subject=str(DN(('CN', 'IPA RA'), self.subject_base)),
ca=ipalib.constants.RENEWAL_CA_NAME,
profile=ipalib.constants.RA_AGENT_PROFILE,
pre_command='renew_ra_cert_pre',
post_command='renew_ra_cert',
storage="FILE",
resubmit_timeout=api.env.certmonger_wait_timeout
)
self._set_ra_cert_perms()
self.requestId = str(reqId)
self.ra_cert = x509.load_certificate_from_file(
paths.RA_AGENT_PEM)
finally:
# we can restore the helper parameters
certmonger.modify_ca_helper(
ipalib.constants.RENEWAL_CA_NAME, old_helper)
# remove any temporary files
for f in (chain_file, agent_cert, agent_key):
try:
os.remove(f.name)
except OSError:
pass
def prepare_crl_publish_dir(self):
"""
Prepare target directory for CRL publishing
Returns a path to the CRL publishing directory
"""
publishdir = paths.PKI_CA_PUBLISH_DIR
if not os.path.exists(publishdir):
os.mkdir(publishdir)
os.chmod(publishdir, 0o775)
os.chown(publishdir, 0, self.service_user.pgid)
tasks.restore_context(publishdir)
return publishdir
def __enable_crl_publish(self):
"""
Enable file-based CRL publishing and disable LDAP publishing.
https://access.redhat.com/knowledge/docs/en-US/Red_Hat_Certificate_System/8.0/html/Admin_Guide/Setting_up_Publishing.html
"""
with directivesetter.DirectiveSetter(
self.config, quotes=False, separator='=') as ds:
# Enable file publishing, disable LDAP
ds.set('ca.publish.enable', 'true')
ds.set('ca.publish.cert.enable', 'false')
ds.set('ca.publish.ldappublish.enable', 'false')
# Create the file publisher, der only, not b64
ds.set(
'ca.publish.publisher.impl.FileBasedPublisher.class',
'com.netscape.cms.publish.publishers.FileBasedPublisher'
)
prefix = 'ca.publish.publisher.instance.FileBaseCRLPublisher.'
ds.set(prefix + 'crlLinkExt', 'bin')
ds.set(prefix + 'directory', self.prepare_crl_publish_dir())
ds.set(prefix + 'latestCrlLink', 'true')
ds.set(prefix + 'pluginName', 'FileBasedPublisher')
ds.set(prefix + 'timeStamp', 'LocalTime')
ds.set(prefix + 'zipCRLs', 'false')
ds.set(prefix + 'zipLevel', '9')
ds.set(prefix + 'Filename.b64', 'false')
ds.set(prefix + 'Filename.der', 'true')
# The publishing rule
ds.set('ca.publish.rule.instance.FileCrlRule.enable', 'true')
ds.set('ca.publish.rule.instance.FileCrlRule.mapper', 'NoMap')
ds.set('ca.publish.rule.instance.FileCrlRule.pluginName', 'Rule')
ds.set('ca.publish.rule.instance.FileCrlRule.predicate', '')
ds.set(
'ca.publish.rule.instance.FileCrlRule.publisher',
'FileBaseCRLPublisher'
)
ds.set('ca.publish.rule.instance.FileCrlRule.type', 'crl')
# Now disable LDAP publishing
ds.set('ca.publish.rule.instance.LdapCaCertRule.enable', 'false')
ds.set('ca.publish.rule.instance.LdapCrlRule.enable', 'false')
ds.set(
'ca.publish.rule.instance.LdapUserCertRule.enable',
'false'
)
ds.set('ca.publish.rule.instance.LdapXCertRule.enable', 'false')
# If we are the initial master then we are the CRL generator,
# otherwise we point to that master for CRLs.
if not self.clone:
# These next two are defaults, but I want to be explicit
# that the initial master is the CRL generator.
ds.set('ca.crl.MasterCRL.enableCRLCache', 'true')
ds.set('ca.crl.MasterCRL.enableCRLUpdates', 'true')
ds.set('ca.listenToCloneModifications', 'true')
else:
ds.set('ca.crl.MasterCRL.enableCRLCache', 'false')
ds.set('ca.crl.MasterCRL.enableCRLUpdates', 'false')
ds.set('ca.listenToCloneModifications', 'false')
def uninstall(self):
# just eat state
self.restore_state("enabled")
self.restore_hsm_state()
DogtagInstance.uninstall(self)
self.restore_state("installed")
# At one time we removed this user on uninstall. That can potentially
# orphan files, or worse, if another useradd runs in the interim,
# cause files to have a new owner.
self.restore_state("user_exists")
if not services.knownservices.dbus.is_running():
# some platforms protect dbus with RefuseManualStart=True
services.knownservices.dbus.start()
cmonger = services.knownservices.certmonger
cmonger.start()
bus = dbus.SystemBus()
obj = bus.get_object('org.fedorahosted.certmonger',
'/org/fedorahosted/certmonger')
iface = dbus.Interface(obj, 'org.fedorahosted.certmonger')
for suffix in ['', '-reuse', '-selfsigned']:
name = ipalib.constants.RENEWAL_CA_NAME + suffix
path = iface.find_ca_by_nickname(name)
if path:
iface.remove_known_ca(path)
cmonger.stop()
# remove ipa-pki-wait-running config
ipautil.remove_file(paths.SYSTEMD_PKI_TOMCAT_IPA_CONF)
try:
os.rmdir(os.path.dirname(paths.SYSTEMD_PKI_TOMCAT_IPA_CONF))
except OSError:
pass
tasks.systemd_daemon_reload()
# remove CRL files
logger.debug("Remove old CRL files")
try:
for f in get_crl_files():
logger.debug("Remove %s", f)
ipautil.remove_file(f)
except OSError as e:
logger.warning("Error while removing old CRL files: %s", e)
# remove CRL directory
logger.debug("Remove CRL directory")
if os.path.exists(paths.PKI_CA_PUBLISH_DIR):
try:
shutil.rmtree(paths.PKI_CA_PUBLISH_DIR)
except OSError as e:
logger.warning("Error while removing CRL publish "
"directory: %s", e)
ipautil.remove_file(paths.DOGTAG_ADMIN_P12)
ipautil.remove_file(paths.CACERT_P12)
ipautil.remove_file(paths.ADMIN_CERT_PATH)
def unconfigure_certmonger_renewal_guard(self):
if not self.is_configured():
return
helper = self.restore_state('certmonger_dogtag_helper')
if helper:
bus = dbus.SystemBus()
obj = bus.get_object('org.fedorahosted.certmonger',
'/org/fedorahosted/certmonger')
iface = dbus.Interface(obj, 'org.fedorahosted.certmonger')
path = iface.find_ca_by_nickname('dogtag-ipa-renew-agent')
if path:
ca_obj = bus.get_object('org.fedorahosted.certmonger', path)
ca_iface = dbus.Interface(ca_obj,
'org.freedesktop.DBus.Properties')
ca_iface.Set('org.fedorahosted.certmonger.ca',
'external-helper', helper)
@staticmethod
def configure_agent_renewal():
try:
certmonger.start_tracking(
certpath=(paths.RA_AGENT_PEM, paths.RA_AGENT_KEY),
ca=ipalib.constants.RENEWAL_CA_NAME,
profile=ipalib.constants.RA_AGENT_PROFILE,
pre_command='renew_ra_cert_pre',
post_command='renew_ra_cert',
storage='FILE')
except RuntimeError as e:
logger.error(
"certmonger failed to start tracking certificate: %s", e)
def stop_tracking_certificates(self):
"""
Stop tracking our certificates. Called on uninstall. Also called
during upgrade to fix discrepancies.
"""
super(CAInstance, self).stop_tracking_certificates()
# stop tracking lightweight CA signing certs
for request_id in certmonger.get_requests_for_dir(self.nss_db):
nickname = certmonger.get_request_value(request_id, 'key-nickname')
if nickname.startswith('caSigningCert cert-pki-ca '):
certmonger.stop_tracking(self.nss_db, nickname=nickname)
try:
certmonger.stop_tracking(certfile=paths.RA_AGENT_PEM)
except RuntimeError as e:
logger.error(
"certmonger failed to stop tracking certificate: %s", e)
def is_renewal_master(self, fqdn=None):
if fqdn is None:
fqdn = api.env.host
dn = DN(('cn', 'CA'), ('cn', fqdn), api.env.container_masters,
api.env.basedn)
renewal_filter = '(ipaConfigString=caRenewalMaster)'
try:
api.Backend.ldap2.get_entries(base_dn=dn, filter=renewal_filter,
attrs_list=[])
except errors.NotFound:
return False
return True
def set_renewal_master(self, fqdn=None):
if fqdn is None:
fqdn = api.env.host
base_dn = DN(api.env.container_masters, api.env.basedn)
filter = '(&(cn=CA)(ipaConfigString=caRenewalMaster))'
try:
entries = api.Backend.ldap2.get_entries(
base_dn=base_dn, filter=filter, attrs_list=['ipaConfigString'])
except errors.NotFound:
entries = []
dn = DN(('cn', 'CA'), ('cn', fqdn), base_dn)
master_entry = api.Backend.ldap2.get_entry(dn, ['ipaConfigString'])
for entry in entries:
if master_entry is not None and entry.dn == master_entry.dn:
master_entry = None
continue
entry['ipaConfigString'] = [x for x in entry['ipaConfigString']
if x.lower() != 'carenewalmaster']
api.Backend.ldap2.update_entry(entry)
if master_entry is not None:
master_entry['ipaConfigString'].append('caRenewalMaster')
api.Backend.ldap2.update_entry(master_entry)
def update_cert_config(self, nickname, cert):
"""
When renewing a CA subsystem certificate the configuration file
needs to get the new certificate as well.
nickname is one of the known nicknames.
cert is a DER-encoded certificate.
"""
# The cert directive to update per nickname
directives = {'auditSigningCert cert-pki-ca': 'ca.audit_signing.cert',
'ocspSigningCert cert-pki-ca': 'ca.ocsp_signing.cert',
'caSigningCert cert-pki-ca': 'ca.signing.cert',
'subsystemCert cert-pki-ca': 'ca.subsystem.cert',
'Server-Cert cert-pki-ca': 'ca.sslserver.cert'}
try:
self.backup_config()
except Exception as e:
syslog.syslog(syslog.LOG_ERR, "Failed to backup CS.cfg: %s" % e)
if nickname in directives:
super(CAInstance, self).update_cert_cs_cfg(
directives[nickname], cert)
def __create_ds_db(self):
'''
Create PKI database. Is needed when pkispawn option
pki_ds_create_new_db is set to False
'''
backend = 'ipaca'
suffix = DN(('o', 'ipaca'))
# database
dn = DN(('cn', 'ipaca'), ('cn', 'ldbm database'), ('cn', 'plugins'),
('cn', 'config'))
entry = api.Backend.ldap2.make_entry(
dn,
objectclass=["top", "extensibleObject", "nsBackendInstance"],
cn=[backend],
)
entry['nsslapd-suffix'] = [suffix]
api.Backend.ldap2.add_entry(entry)
# replication
dn = DN(('cn', str(suffix)), ('cn', 'mapping tree'), ('cn', 'config'))
entry = api.Backend.ldap2.make_entry(
dn,
objectclass=["top", "extensibleObject", "nsMappingTree"],
cn=[suffix],
)
entry['nsslapd-state'] = ['Backend']
entry['nsslapd-backend'] = [backend]
api.Backend.ldap2.add_entry(entry)
def __setup_replication(self):
repl = replication.CAReplicationManager(self.realm, self.fqdn)
repl.setup_cs_replication(self.master_host)
# Activate Topology for o=ipaca segments
self.__update_topology()
def finalize_replica_config(self):
repl = replication.CAReplicationManager(self.realm, self.fqdn)
repl.finalize_replica_config(self.master_host)
def __enable_instance(self):
basedn = ipautil.realm_to_suffix(self.realm)
if not self.clone:
config = ['caRenewalMaster']
else:
config = []
self.ldap_configure('CA', self.fqdn, None, basedn, config)
def __expose_ca_in_ldap(self):
"""
In a case when replica is created on DL0 we need to make
sure that query for CA service record of this replica in
ldap will succeed in time of installation.
This method is needed for sucessfull replica installation
on DL0 and should be removed alongside with code for DL0.
To suppress deprecation warning message this method is
not invoking ldap_enable() but _ldap_enable() method.
"""
basedn = ipautil.realm_to_suffix(self.realm)
if not self.clone:
config = ['caRenewalMaster']
else:
config = []
self._ldap_enable(ENABLED_SERVICE, "CA", self.fqdn, basedn, config)
def setup_lightweight_ca_key_retrieval(self):
# Important: there is a typo in the below string, which is known
# and should not be fixed as existing installations already use it
LWCA_KEY_RETRIEVAL = 'setup_lwca_key_retieval'
if sysupgrade.get_upgrade_state('dogtag', LWCA_KEY_RETRIEVAL):
return
logger.debug('Set up lightweight CA key retrieval')
self.__setup_lightweight_ca_key_retrieval_kerberos()
self.__setup_lightweight_ca_key_retrieval_custodia()
logger.debug('Configuring key retriever')
directives = [
('features.authority.keyRetrieverClass',
'com.netscape.ca.ExternalProcessKeyRetriever'),
('features.authority.keyRetrieverConfig.executable',
paths.IPA_PKI_RETRIEVE_KEY),
]
for k, v in directives:
directivesetter.set_directive(
self.config, k, v, quotes=False, separator='=')
sysupgrade.set_upgrade_state('dogtag', LWCA_KEY_RETRIEVAL, True)
def __setup_lightweight_ca_key_retrieval_kerberos(self):
logger.debug('Creating principal')
installutils.kadmin_addprinc(self.principal)
self.suffix = ipautil.realm_to_suffix(self.realm)
self.move_service(self.principal)
logger.debug('Retrieving keytab')
installutils.create_keytab(self.keytab, self.principal)
os.chmod(self.keytab, 0o600)
self.service_user.chown(self.keytab)
def __setup_lightweight_ca_key_retrieval_custodia(self):
logger.debug('Creating Custodia keys')
custodia_basedn = DN(
('cn', 'custodia'), ('cn', 'ipa'), ('cn', 'etc'), api.env.basedn)
ensure_entry(
custodia_basedn,
objectclass=['top', 'nsContainer'],
cn=['custodia'],
)
ensure_entry(
DN(('cn', 'dogtag'), custodia_basedn),
objectclass=['top', 'nsContainer'],
cn=['dogtag'],
)
keyfile = os.path.join(paths.PKI_TOMCAT, self.service_prefix + '.keys')
keystore = IPAKEMKeys({'server_keys': keyfile})
keystore.generate_keys(self.service_prefix)
os.chmod(keyfile, 0o600)
self.service_user.chown(keyfile)
def add_lightweight_ca_tracking_requests(self):
try:
lwcas = api.Backend.ldap2.get_entries(
base_dn=api.env.basedn,
filter='(objectclass=ipaca)',
attrs_list=['cn', 'ipacaid'],
)
add_lightweight_ca_tracking_requests(lwcas)
except errors.NotFound:
# shouldn't happen, but don't fail if it does
logger.warning(
"Did not find any lightweight CAs; nothing to track")
def __dogtag10_migration(self):
self._ldap_update(['50-dogtag10-migration.update'])
def is_crlgen_enabled(self):
"""Check if the local CA instance is generating CRL
Three conditions must be met to consider that the local CA is CRL
generation master:
- in CS.cfg ca.crl.MasterCRL.enableCRLCache=true
- in CS.cfg ca.crl.MasterCRL.enableCRLUpdates=true
- in CS.cfg ca.listenToCloneModifications=true
- in CS.cfg ca.certStatusUpdateInterval != 0
- in /etc/httpd/conf.d/ipa-pki-proxy.conf the RewriteRule
^/ipa/crl/MasterCRL.bin is disabled (commented or removed)
If the values are inconsistent, an exception is raised
:returns: True/False
:raises: InconsistentCRLGenConfigException if the config is
inconsistent
"""
try:
cache = directivesetter.get_directive(
self.config, 'ca.crl.MasterCRL.enableCRLCache', '=')
enableCRLCache = cache.lower() == 'true'
updates = directivesetter.get_directive(
self.config, 'ca.crl.MasterCRL.enableCRLUpdates', '=')
enableCRLUpdates = updates.lower() == 'true'
listen = directivesetter.get_directive(
self.config, 'ca.listenToCloneModifications', '=')
enableToClone = listen.lower() == 'true'
updateinterval = directivesetter.get_directive(
self.config, 'ca.certStatusUpdateInterval', '=')
# If the values are different, the config is inconsistent
if not (enableCRLCache == enableCRLUpdates == enableToClone):
raise InconsistentCRLGenConfigException(
"Configuration is inconsistent, please check "
"ca.crl.MasterCRL.enableCRLCache, "
"ca.crl.MasterCRL.enableCRLUpdates and "
"ca.listenToCloneModifications in {} and "
"run ipa-crlgen-manage [enable|disable] to repair".format(
self.config))
# If they are the same then we are the CRL renewal master. Ensure
# the update task is configured.
if enableCRLCache and updateinterval == '0':
raise InconsistentCRLGenConfigException(
"Configuration is inconsistent, please check "
"ca.certStatusUpdateInterval in {}. It should "
"be either not present or not zero. Run "
"ipa-crlgen-manage [enable|disable] to repair".format(
self.config))
except IOError:
raise RuntimeError(
"Unable to read {}".format(self.config))
# At this point enableCRLCache and enableCRLUpdates have the same value
try:
rewriteRuleDisabled = True
p = re.compile(self.crl_rewrite_pattern)
with open(paths.HTTPD_IPA_PKI_PROXY_CONF) as f:
for line in f.readlines():
if p.search(line):
rewriteRuleDisabled = False
break
except IOError:
raise InconsistentCRLGenConfigException(
"Unable to read {}".format(paths.HTTPD_IPA_PKI_PROXY_CONF))
# if enableCRLUpdates and rewriteRuleDisabled are different, the config
# is inconsistent
if enableCRLUpdates != rewriteRuleDisabled:
raise InconsistentCRLGenConfigException(
"Configuration is inconsistent, please check "
"ca.crl.MasterCRL.enableCRLCache in {} and the "
"RewriteRule ^/ipa/crl/MasterCRL.bin in {} and "
"run ipa-crlgen-manage [enable|disable] to repair".format(
self.config, paths.HTTPD_IPA_PKI_PROXY_CONF))
return enableCRLUpdates
def setup_crlgen(self, setup_crlgen):
"""Configure the local host for CRL generation
:param setup_crlgen: if True enable CRL generation, if False, disable
"""
try:
crlgen_enabled = self.is_crlgen_enabled()
if crlgen_enabled == setup_crlgen:
logger.info(
"Nothing to do, CRL generation already %s",
"enabled" if crlgen_enabled else "disabled")
return
except InconsistentCRLGenConfigException:
logger.warning("CRL generation is partially enabled, repairing...")
# Stop PKI
logger.info("Stopping %s", self.service_name)
self.stop_instance()
logger.debug("%s successfully stopped", self.service_name)
# Edit the CS.cfg directives
logger.info("Editing %s", self.config)
with directivesetter.DirectiveSetter(
self.config, quotes=False, separator='=') as ds:
# Convert the bool setup_crlgen to a lowercase string
str_value = str(setup_crlgen).lower()
ds.set('ca.crl.MasterCRL.enableCRLCache', str_value)
ds.set('ca.crl.MasterCRL.enableCRLUpdates', str_value)
ds.set('ca.listenToCloneModifications', str_value)
if setup_crlgen:
ds.set('ca.certStatusUpdateInterval', None)
else:
ds.set('ca.certStatusUpdateInterval', '0')
# Start pki-tomcat
logger.info("Starting %s", self.service_name)
self.start_instance()
logger.debug("%s successfully started", self.service_name)
# Edit the RewriteRule
def comment_rewriterule():
logger.info("Editing %s", paths.HTTPD_IPA_PKI_PROXY_CONF)
# look for the pattern RewriteRule ^/ipa/crl/MasterCRL.bin ..
# and comment out
p = re.compile(self.crl_rewrite_pattern, re.MULTILINE)
with open(paths.HTTPD_IPA_PKI_PROXY_CONF) as f:
content = f.read()
new_content = p.sub(r"#\1", content)
with open(paths.HTTPD_IPA_PKI_PROXY_CONF, 'w') as f:
f.write(new_content)
def uncomment_rewriterule():
logger.info("Editing %s", paths.HTTPD_IPA_PKI_PROXY_CONF)
# check if the pattern RewriteRule ^/ipa/crl/MasterCRL.bin ..
# is already present
present = False
p = re.compile(self.crl_rewrite_pattern, re.MULTILINE)
with open(paths.HTTPD_IPA_PKI_PROXY_CONF) as f:
content = f.read()
present = p.search(content)
# Remove the comment
p_comment = re.compile(self.crl_rewrite_comment, re.MULTILINE)
new_content = p_comment.sub("", content)
# If not already present, add RewriteRule
if not present:
new_content += self.crl_rewriterule.format(api.env.host)
# Finally write the file
with open(paths.HTTPD_IPA_PKI_PROXY_CONF, 'w') as f:
f.write(new_content)
try:
if setup_crlgen:
comment_rewriterule()
else:
uncomment_rewriterule()
except IOError:
raise RuntimeError(
"Unable to access {}".format(paths.HTTPD_IPA_PKI_PROXY_CONF))
# Restart httpd
http_service = services.knownservices.httpd
logger.info("Restarting %s", http_service.service_name)
http_service.restart()
logger.debug("%s successfully restarted", http_service.service_name)
# make sure a CRL is generated if setup_crl is True
if setup_crlgen:
logger.info("Forcing CRL update")
api.Backend.ra.override_port = 8443
result = api.Backend.ra.updateCRL(wait='true')
if result.get('crlUpdate', 'Failure') == 'Success':
logger.debug("Successfully updated CRL")
api.Backend.ra.override_port = None
@staticmethod
def acme_uid(fqdn: str) -> str:
"""Compute ACME RA account uid."""
return f'acme-{fqdn}'
def setup_acme(self) -> bool:
"""
Set up ACME service, if needed.
Return False if ACME service was already set up, otherwise True.
"""
# ACME LDAP database schema will be added by ipa-server-upgrade.
# It is fine if this subroutine runs *before* the schema update,
# because we only create the container objects.
if os.path.isdir(os.path.join(paths.PKI_TOMCAT, 'acme')):
logger.debug('ACME service is already deployed')
return False
if not minimum_acme_support():
logger.debug('Minimum ACME support not available')
return False
logger.debug('Deploying ACME')
self._ldap_mod('/usr/share/pki/acme/database/ds/schema.ldif')
configure_acme_acls()
# create ACME agent group (if not exist already) and user
self.ensure_group(ACME_AGENT_GROUP, "ACME RA accounts")
acme_user = self.acme_uid(self.fqdn)
result = self.create_user(
uid=acme_user,
cn=acme_user,
sn=acme_user,
user_type='agentType',
groups=[ACME_AGENT_GROUP],
force=True,
)
if result is None:
raise RuntimeError("Failed to add ACME RA user")
else:
password = result
# Add the IPA RA user as a member of the ACME admins for
# ipa-acme-manage.
user_dn = DN(('uid', "ipara"), ('ou', 'People'), self.basedn)
conn = api.Backend.ldap2
group_dn = DN(('cn', ACME_AGENT_GROUP), ('ou', 'groups'),
self.basedn)
try:
conn.add_entry_to_group(user_dn, group_dn, 'uniqueMember')
except errors.AlreadyGroupMember:
pass
# create container object heirarchy in LDAP
ensure_acme_containers()
# create ACME service instance
ipautil.run(['pki-server', 'acme-create'])
# write configuration files
sub_dict = dict(
FQDN=self.fqdn,
USER=acme_user,
PASSWORD=password,
)
for template_name, target in ACME_CONFIG_FILES:
template_filename = \
os.path.join(paths.USR_SHARE_IPA_DIR, template_name)
filled = ipautil.template_file(template_filename, sub_dict)
with open(target, 'w') as f:
f.write(filled)
os.fchmod(f.fileno(), 0o600)
self.service_user.chown(f.fileno())
# deploy ACME Tomcat application
ipautil.run(['pki-server', 'acme-deploy'])
return True
def __store_random_serial_number_state(self):
"""
Save the Random Serial Number (RSN) version.
This is intended to add flexibility in case RSN bumps
another version in dogtag. For now we only support v3
or no randomization (0).
"""
if self.random_serial_numbers:
value = 3
else:
value = 0
dn = DN(('cn', ipalib.constants.IPA_CA_CN), api.env.container_ca,
api.env.basedn)
entry_attrs = api.Backend.ldap2.get_entry(dn)
entry_attrs['ipaCaRandomSerialNumberVersion'] = value
api.Backend.ldap2.update_entry(entry_attrs)
def __store_hsm_configuration_state(self):
"""
Save the HSM token configuration.
This data is used during replica install to determine whether
the remote server uses an HSM.
"""
if not self.token_name or self.token_name == 'internal':
return
dn = DN(('cn', ipalib.constants.IPA_CA_CN), api.env.container_ca,
api.env.basedn)
entry_attrs = api.Backend.ldap2.get_entry(dn)
entry_attrs['ipaCaHSMConfiguration'] = '{};{}'.format(
self.token_name, self.token_library_path)
api.Backend.ldap2.update_entry(entry_attrs)
def __update_entry_from_cert(make_filter, make_entry, cert):
"""
Given a certificate and functions to make a filter based on the
cert, and make a new entry based on the cert, update database
accordingly.
:param make_filter:
function that takes a certificate in DER format and
returns an LDAP search filter
:param make_entry:
function that takes a certificate in DER format and an
LDAP entry, and returns the new state of the LDAP entry.
Return the input unchanged to skip an entry.
:param cert:
An IPACertificate object
Logging is done via syslog.
Return ``True`` if all updates were successful (zero updates is
vacuously successful) otherwise ``False``.
"""
base_dn = DN(('o', 'ipaca'))
conn = api.Backend.ldap2
attempts = 0
updated = False
while attempts < 10:
try:
db_filter = make_filter(cert)
try:
entries = conn.get_entries(base_dn, conn.SCOPE_SUBTREE, db_filter)
except errors.NotFound:
entries = []
updated = True
for entry in entries:
syslog.syslog(
syslog.LOG_NOTICE, 'Updating entry %s' % str(entry.dn))
try:
entry = make_entry(cert, entry)
conn.update_entry(entry)
except errors.EmptyModlist:
pass
except Exception as e:
syslog.syslog(
syslog.LOG_ERR,
'Updating entry %s failed: %s' % (str(entry.dn), e))
updated = False
break
except errors.NetworkError:
syslog.syslog(
syslog.LOG_ERR,
'Connection to %s failed, sleeping 30s' % api.env.ldap_uri)
time.sleep(30)
attempts += 1
except Exception as e:
syslog.syslog(syslog.LOG_ERR, 'Caught unhandled exception: %s' % e)
break
if not updated:
syslog.syslog(syslog.LOG_ERR, 'Update failed.')
return False
return True
def update_people_entry(cert):
"""
Update the userCerticate for an entry in the dogtag ou=People. This
is needed when a certificate is renewed.
"""
def make_filter(cert):
ldap = api.Backend.ldap2
subject = DN(cert.subject)
issuer = DN(cert.issuer)
return ldap.combine_filters(
[
ldap.make_filter({'objectClass': 'inetOrgPerson'}),
ldap.make_filter(
{'description': ';%s;%s' % (issuer, subject)},
exact=False, trailing_wildcard=False),
],
ldap.MATCH_ALL)
def make_entry(cert, entry):
serial_number = cert.serial_number
subject = DN(cert.subject)
issuer = DN(cert.issuer)
entry['usercertificate'].append(cert)
entry['description'] = '2;%d;%s;%s' % (serial_number, issuer, subject)
return entry
return __update_entry_from_cert(make_filter, make_entry, cert)
def update_authority_entry(cert):
"""
Find the authority entry for the given cert, and update the
serial number to match the given cert.
"""
def make_filter(cert):
ldap = api.Backend.ldap2
subject = str(DN(cert.subject))
return ldap.make_filter(
dict(objectclass='authority', authoritydn=subject),
rules=ldap.MATCH_ALL,
)
def make_entry(cert, entry):
entry['authoritySerial'] = cert.serial_number
return entry
return __update_entry_from_cert(make_filter, make_entry, cert)
def get_ca_renewal_nickname(subject_base, ca_subject_dn, sdn):
"""
Get the nickname for storage in the cn_renewal container.
:param subject_base: Certificate subject base
:param ca_subject_dn: IPA CA subject DN
:param sdn: Subject DN
:return: string, or None if nickname cannot be determined.
"""
assert isinstance(sdn, DN)
nickname_by_subject_dn = {
DN(ca_subject_dn): 'caSigningCert cert-pki-ca',
DN('CN=CA Audit', subject_base): 'auditSigningCert cert-pki-ca',
DN('CN=OCSP Subsystem', subject_base): 'ocspSigningCert cert-pki-ca',
DN('CN=CA Subsystem', subject_base): 'subsystemCert cert-pki-ca',
DN('CN=KRA Audit', subject_base): 'auditSigningCert cert-pki-kra',
DN('CN=KRA Transport Certificate', subject_base):
'transportCert cert-pki-kra',
DN('CN=KRA Storage Certificate', subject_base):
'storageCert cert-pki-kra',
DN('CN=IPA RA', subject_base): 'ipaCert',
}
return nickname_by_subject_dn.get(sdn)
def update_ca_renewal_entry(conn, nickname, cert):
"""
Update the ca_renewal entry for the given nickname.
:param conn: A *connected* LDAP handle
:param nickname: NSSDB nickname
:param cert: python-cryptography X509Certificate
"""
dn = DN(('cn', nickname), api.env.container_ca_renewal, api.env.basedn)
try:
entry = conn.get_entry(dn, ['usercertificate'])
entry['usercertificate'] = [cert]
conn.update_entry(entry)
except errors.NotFound:
entry = conn.make_entry(
dn,
objectclass=['top', 'pkiuser', 'nscontainer'],
cn=[nickname],
usercertificate=[cert])
conn.add_entry(entry)
except errors.EmptyModlist:
pass
def ensure_ldap_profiles_container():
ensure_entry(
PROFILES_DN,
objectclass=['top', 'organizationalUnit'],
ou=['certificateProfiles'],
)
def ensure_lightweight_cas_container():
return ensure_entry(
DN(('ou', 'authorities'), ('ou', 'ca'), ('o', 'ipaca')),
objectclass=['top', 'organizationalUnit'],
ou=['authorities'],
)
def enable_lightweight_ca_monitor():
# Check LWCA monitor
value = directivesetter.get_directive(
paths.CA_CS_CFG_PATH,
'ca.authorityMonitor.enable',
separator='=')
if value == 'true':
return False # already enabled; restart not needed
# Enable LWCA monitor
directivesetter.set_directive(
paths.CA_CS_CFG_PATH,
'ca.authorityMonitor.enable',
'true',
quotes=False,
separator='=')
return True # restart needed
def minimum_acme_support(data=None):
"""
ACME with global enable/disable is required.
This first shipped in dogtag version 10.10.0.
Parse the version string to determine if the minimum version
is met. If parsing fails return False.
:param: data: The string value to parse for version. Defaults to
reading from the filesystem.
"""
if not data:
with open('/usr/share/pki/VERSION', 'r') as fd:
data = fd.read()
groups = re.match(r'.*\nSpecification-Version: ([\d+\.]*)\n.*', data)
if groups:
version_string = groups.groups(0)[0]
minimum_version = parse_version('10.10.0')
return parse_version(version_string) >= minimum_version
else:
logger.debug('Unable to parse version from %s', data)
return False
def ensure_acme_containers():
"""
Create the ACME container objects under ou=acme,o=ipaca if
they do not exist.
"""
ou_acme = RDN(('ou', 'acme'))
rdns = [
DN(ou_acme),
DN(('ou', 'nonces'), ou_acme),
DN(('ou', 'accounts'), ou_acme),
DN(('ou', 'orders'), ou_acme),
DN(('ou', 'authorizations'), ou_acme),
DN(('ou', 'challenges'), ou_acme),
DN(('ou', 'certificates'), ou_acme),
]
extensible_rdns = [
DN(('ou', 'config'), ou_acme),
]
for rdn in rdns:
ensure_entry(
DN(rdn, ('o', 'ipaca')),
objectclass=['top', 'organizationalUnit'],
ou=[rdn[0][0].value],
)
for rdn in extensible_rdns:
ensure_entry(
DN(rdn, ('o', 'ipaca')),
objectclass=['top', 'organizationalUnit', 'extensibleObject'],
ou=[rdn[0][0].value],
)
def ensure_entry(dn, **attrs):
"""Ensure an entry exists.
If an entry with the given DN already exists, return ``False``,
otherwise add the entry and return ``True``.
"""
conn = api.Backend.ldap2
try:
conn.get_entry(dn)
return False
except errors.NotFound:
# entry doesn't exist; add it
entry = conn.make_entry(dn, **attrs)
conn.add_entry(entry)
return True
def configure_profiles_acl():
"""Allow the Certificate Manager Agents group to modify profiles."""
new_rules = [
'certServer.profile.configuration:read,modify' +
':allow (read,modify) group="Certificate Manager Agents"' +
':Certificate Manager agents may modify (create/update/delete) ' +
'and read profiles',
'certServer.ca.account:login,logout' +
':allow (login,logout) user="anybody"' +
':Anybody can login and logout',
]
return __add_acls(new_rules)
def configure_lightweight_ca_acls():
"""Allow Certificate Manager Agents to manage lightweight CAs."""
new_rules = [
'certServer.ca.authorities:list,read' +
':allow (list,read) user="anybody"' +
':Anybody may list and read lightweight authorities',
'certServer.ca.authorities:create,modify' +
':allow (create,modify) group="Administrators"' +
':Administrators may create and modify lightweight authorities',
'certServer.ca.authorities:delete' +
':allow (delete) group="Administrators"' +
':Administrators may delete lightweight authorities',
'certServer.ca.authorities:create,modify,delete' +
':allow (create,modify,delete) group="Certificate Manager Agents"' +
':Certificate Manager Agents may manage lightweight authorities',
]
return __add_acls(new_rules)
def configure_acme_acls():
"""Allow the ACME Agents to modify profiles."""
# The "execute" operation sounds scary, but it actually only allows
# revocation and unrevocation. See CertResource.java and
# base/ca/shared/conf/acl.properties in the Dogtag source.
new_rules = [
'certServer.ca.certs:execute'
f':allow (execute) group="{ACME_AGENT_GROUP}"'
':ACME Agents may execute cert operations',
]
return __add_acls(new_rules)
def __add_acls(new_rules):
"""Add the given Dogtag ACLs.
``new_rules``
Iterable of ACL rule values to add
Return ``True`` if any ACLs were added otherwise ``False``.
"""
updated = False
dn = DN(('cn', 'aclResources'), ('o', 'ipaca'))
conn = api.Backend.ldap2
entry = conn.get_entry(dn)
cur_rules = entry.get('resourceACLS', [])
add_rules = [rule for rule in new_rules if rule not in cur_rules]
if add_rules:
cur_rules.extend(add_rules)
conn.update_entry(entry)
updated = True
return updated
def __get_profile_config(profile_id):
sub_dict = dict(
DOMAIN=ipautil.format_netloc(api.env.domain),
IPA_CA_RECORD=ipalib.constants.IPA_CA_RECORD,
CRL_ISSUER='CN=Certificate Authority,o=ipaca',
SUBJECT_DN_O=dsinstance.DsInstance().find_subject_base(),
ACME_AGENT_GROUP=ACME_AGENT_GROUP,
)
# To work around lack of proper profile upgrade system, we ship
# two versions of some profiles - one for new installs only, and
# the other for upgrading to LDAP-based profiles in an existing
# deployment.
#
# Select UPGRADE version if we are in the 'updates' API context
# and an upgrade-specific version of the profile exists.
#
profile_filename = '/usr/share/ipa/profiles/{}.cfg'.format(profile_id)
profile_upg_filename = \
'/usr/share/ipa/profiles/{}.UPGRADE.cfg'.format(profile_id)
if api.env.context == 'updates' and os.path.isfile(profile_upg_filename):
profile_filename = profile_upg_filename
return ipautil.template_file(profile_filename, sub_dict)
def import_included_profiles():
conn = api.Backend.ldap2
ensure_entry(
DN(('cn', 'ca'), api.env.basedn),
objectclass=['top', 'nsContainer'],
cn=['ca'],
)
ensure_entry(
DN(api.env.container_certprofile, api.env.basedn),
objectclass=['top', 'nsContainer'],
cn=['certprofiles'],
)
# At this point Apache may or may not be running with a valid
# certificate. The local server is not yet recognized as a full
# CA yet so it isn't discoverable. So try to do some detection
# on what port to use, 443 (remote) or 8443 (local) for importing
# the profiles.
#
# api.Backend.ra_certprofile invokes the RestClient class
# which will discover and login to the CA REST API. We can
# use this information to detect where to import the profiles.
#
# If the login is successful (e.g. doesn't raise an exception)
# and it returns our hostname (it prefers the local host) then
# we override and talk locally.
#
# Otherwise a NetworkError means we can't connect on 443 (perhaps
# a firewall) or we get an HTTP error (valid TLS certificate on
# Apache but no CA, login fails with 404) so we override to the
# local server.
#
# When override port was always set to 8443 the RestClient could
# pick a remote server and since 8443 isn't in our firewall profile
# setting up a new server would fail.
try:
with api.Backend.ra_certprofile as profile_api:
if profile_api.ca_host == api.env.host:
api.Backend.ra_certprofile.override_port = 8443
except (errors.NetworkError, errors.RemoteRetrieveError) as e:
logger.debug('Overriding CA port: %s', e)
api.Backend.ra_certprofile.override_port = 8443
for (profile_id, desc, store_issued) in dogtag.INCLUDED_PROFILES:
dn = DN(('cn', profile_id),
api.env.container_certprofile, api.env.basedn)
try:
conn.get_entry(dn)
except errors.NotFound:
# profile not found; add it
entry = conn.make_entry(
dn,
objectclass=['ipacertprofile'],
cn=[profile_id],
description=[desc],
ipacertprofilestoreissued=[store_issued],
)
conn.add_entry(entry)
# Create the profile, replacing any existing profile of same name
profile_data = __get_profile_config(profile_id)
try:
_create_dogtag_profile(profile_id, profile_data,
overwrite=True)
except errors.HTTPRequestError as e:
logger.warning("Failed to import profile '%s': %s. Running "
"ipa-server-upgrade when installation is "
"completed may resolve this issue.",
profile_id, e)
conn.delete_entry(entry)
else:
logger.debug("Imported profile '%s'", profile_id)
else:
logger.debug(
"Profile '%s' is already in LDAP; skipping", profile_id
)
api.Backend.ra_certprofile.override_port = None
def repair_profile_caIPAserviceCert():
"""
A regression caused replica installation to replace the FreeIPA
version of caIPAserviceCert with the version shipped by Dogtag.
This function detects and repairs occurrences of this problem.
"""
api.Backend.ra_certprofile.override_port = 8443
profile_id = 'caIPAserviceCert'
with api.Backend.ra_certprofile as profile_api:
try:
cur_config = profile_api.read_profile(profile_id).splitlines()
except errors.RemoteRetrieveError:
# no profile there to check/repair
api.Backend.ra_certprofile.override_port = None
return
indicators = [
(
b"policyset.serverCertSet.1.default.params.name="
b"CN=$request.req_subject_name.cn$, OU=pki-ipa, O=IPA "
),
(
b"policyset.serverCertSet.9.default.params."
b"crlDistPointsPointName_0="
b"https://ipa.example.com/ipa/crl/MasterCRL.bin"
),
]
need_repair = all(l in cur_config for l in indicators)
if need_repair:
logger.debug(
"Detected that profile '%s' has been replaced with "
"incorrect version; begin repair.", profile_id)
_create_dogtag_profile(
profile_id, __get_profile_config(profile_id), overwrite=True)
logger.debug("Repair of profile '%s' complete.", profile_id)
api.Backend.ra_certprofile.override_port = None
def migrate_profiles_to_ldap():
"""Migrate profiles from filesystem to LDAP.
This must be run *after* switching to the LDAPProfileSubsystem
and restarting the CA.
The profile might already exist, e.g. if a replica was already
upgraded, so this case is ignored. New/missing profiles are imported
into LDAP. Existing profiles are not modified. This means that they are
neither enabled nor updated when the file on disk has been changed.
"""
ensure_ldap_profiles_container()
api.Backend.ra_certprofile.override_port = 8443
with open(paths.CA_CS_CFG_PATH) as f:
cs_cfg = f.read()
match = re.search(r'^profile\.list=(\S*)', cs_cfg, re.MULTILINE)
profile_ids = match.group(1).split(',')
profile_states = _get_ldap_profile_states()
for profile_id in profile_ids:
state = profile_states.get(profile_id.lower(), ProfileState.MISSING)
if state != ProfileState.MISSING:
# We don't reconsile enabled/disabled state.
logger.debug(
"Profile '%s' is already in LDAP and %s; skipping",
profile_id, state.value
)
continue
logger.info("Migrating profile '%s'", profile_id)
match = re.search(
r'^profile\.{}\.config=(\S*)'.format(profile_id),
cs_cfg, re.MULTILINE
)
if match is None:
logger.info("No file for profile '%s'; skipping", profile_id)
continue
filename = match.group(1)
match = re.search(
r'^profile\.{}\.class_id=(\S*)'.format(profile_id),
cs_cfg, re.MULTILINE
)
if match is None:
logger.info("No class_id for profile '%s'; skipping", profile_id)
continue
class_id = match.group(1)
with open(filename) as f:
profile_data = f.read()
if profile_data[-1] != '\n':
profile_data += '\n'
profile_data += 'profileId={}\n'.format(profile_id)
profile_data += 'classId={}\n'.format(class_id)
# Import the profile, but do not replace it if it already exists.
# This prevents replicas from replacing IPA-managed profiles with
# Dogtag default profiles of same name.
#
_create_dogtag_profile(profile_id, profile_data, overwrite=False)
api.Backend.ra_certprofile.override_port = None
class ProfileState(enum.Enum):
MISSING = "missing"
ENABLED = "enabled"
DISABLED = "disabled"
def _get_ldap_profile_states():
"""Get LDAP profile states
The function directly access LDAP for performance reasons. It's much
faster than Dogtag's REST API and it's easier to check profiles for all
subsystems.
:return: mapping of lowercase profile id to state enum member
"""
conn = api.Backend.ldap2
entries = conn.get_entries(
base_dn=PROFILES_DN,
scope=conn.SCOPE_SUBTREE,
filter="(objectClass=certProfile)",
attrs_list=["cn", "certProfileConfig"]
)
results = {}
for entry in entries:
single = entry.single_value
cn = single["cn"]
try:
cfg = single["certProfileConfig"]
except (ValueError, KeyError):
# certProfileConfig is neither mandatory nor single value
# skip entries with incomplete configuration
state = ProfileState.MISSING
else:
if isinstance(cfg, bytes):
# some profile configurations are marked as binary
cfg = cfg.decode("utf-8")
for line in cfg.split("\n"):
if line.lower() == "enable=true":
state = ProfileState.ENABLED
break
else:
state = ProfileState.DISABLED
results[cn.lower()] = state
return results
def _create_dogtag_profile(profile_id, profile_data, overwrite):
with api.Backend.ra_certprofile as profile_api:
# import the profile
try:
profile_api.create_profile(profile_data)
logger.debug("Profile '%s' successfully migrated to LDAP",
profile_id)
except errors.RemoteRetrieveError as e:
logger.debug("Error migrating '%s': %s", profile_id, e)
# profile already exists
if overwrite:
try:
profile_api.disable_profile(profile_id)
except errors.RemoteRetrieveError:
logger.debug(
"Failed to disable profile '%s' "
"(it is probably already disabled)",
profile_id)
profile_api.update_profile(profile_id, profile_data)
# enable the profile
try:
profile_api.enable_profile(profile_id)
except errors.RemoteRetrieveError:
logger.debug(
"Failed to enable profile '%s' "
"(it is probably already enabled)",
profile_id)
def ensure_ipa_authority_entry():
"""Add the IPA CA ipaCa object if missing.
This requires the "host authority" authority entry to have been
created, which Dogtag will do automatically upon startup, if the
ou=authorities,ou=ca,o=ipaca container exists. Therefore, the
``ensure_lightweight_cas_container`` function must be executed,
and Dogtag restarted, before executing this function.
"""
# find out authority id, issuer DN and subject DN of IPA CA
api.Backend.ra_lightweight_ca.override_port = 8443
with api.Backend.ra_lightweight_ca as lwca:
data = lwca.read_ca('host-authority')
attrs = dict(
ipacaid=data['id'],
ipacaissuerdn=data['issuerDN'],
ipacasubjectdn=data['dn'],
)
api.Backend.ra_lightweight_ca.override_port = None
ensure_entry(
DN(api.env.container_ca, api.env.basedn),
objectclass=['top', 'nsContainer'],
cn=['cas'],
)
ensure_entry(
DN(('cn', ipalib.constants.IPA_CA_CN), api.env.container_ca, api.env.basedn),
objectclass=['top', 'ipaca'],
cn=[ipalib.constants.IPA_CA_CN],
description=['IPA CA'],
**attrs
)
def ensure_default_caacl():
"""Add the default CA ACL if missing."""
ensure_entry(
DN(('cn', 'ca'), api.env.basedn),
objectclass=['top', 'nsContainer'],
cn=['ca'],
)
ensure_entry(
DN(api.env.container_caacl, api.env.basedn),
objectclass=['top', 'nsContainer'],
cn=['certprofiles'],
)
if not api.Command.caacl_find()['result']:
api.Command.caacl_add(u'hosts_services_caIPAserviceCert',
hostcategory=u'all', servicecategory=u'all')
api.Command.caacl_add_profile(u'hosts_services_caIPAserviceCert',
certprofile=(u'caIPAserviceCert',))
def add_lightweight_ca_tracking_requests(lwcas):
"""Add tracking requests for the given lightweight CAs.
The entries must have the 'cn' and 'ipacaid' attributes.
The IPA CA, if present, is skipped.
"""
for entry in lwcas:
if ipalib.constants.IPA_CA_CN in entry['cn']:
continue
nickname = "{} {}".format(
ipalib.constants.IPA_CA_NICKNAME,
entry['ipacaid'][0])
criteria = {
'cert-database': paths.PKI_TOMCAT_ALIAS_DIR,
'cert-nickname': nickname,
'ca-name': ipalib.constants.RENEWAL_CA_NAME,
}
request_id = certmonger.get_request_id(criteria)
if request_id is None:
try:
certmonger.start_tracking(
certpath=paths.PKI_TOMCAT_ALIAS_DIR,
pin=certmonger.get_pin(INTERNAL_TOKEN),
nickname=nickname,
ca=ipalib.constants.RENEWAL_CA_NAME,
profile='caCACert',
pre_command='stop_pkicad',
post_command='renew_ca_cert "%s"' % nickname,
)
logger.debug(
'Lightweight CA renewal: '
'added tracking request for "%s"', nickname)
except RuntimeError as e:
logger.error(
'Lightweight CA renewal: Certmonger failed to '
'start tracking certificate: %s', e)
else:
logger.debug(
'Lightweight CA renewal: '
'already tracking certificate "%s"', nickname)
def update_ipa_conf(ca_host=None):
"""
Update IPA configuration file to ensure that RA plugins are enabled and
that CA host points to specified server (or localhost if ca_host=None).
"""
parser = RawConfigParser()
parser.read(paths.IPA_DEFAULT_CONF)
parser.set('global', 'enable_ra', 'True')
parser.set('global', 'ra_plugin', 'dogtag')
parser.set('global', 'dogtag_version', '10')
if ca_host is None:
parser.remove_option('global', 'ca_host')
else:
parser.set('global', 'ca_host', ca_host)
with open(paths.IPA_DEFAULT_CONF, 'w') as f:
parser.write(f)
def import_ra_key(custodia):
custodia.import_ra_key()
CAInstance._set_ra_cert_perms()
CAInstance.configure_agent_renewal()
def check_ipa_ca_san(cert):
"""
Test whether the certificate has an ipa-ca SAN
:param cert: x509.IPACertificate
This SAN is necessary for ACME.
The caller is responsible for initializing the api.
On success returns None, on failure raises ValidationError
"""
expect = f'{ipalib.constants.IPA_CA_RECORD}.' \
f'{ipautil.format_netloc(api.env.domain)}'
try:
cert.match_hostname(expect)
except x509.ssl_match_hostname.CertificateError:
raise errors.ValidationError(
name='certificate',
error='Does not have a \'{}\' SAN'.format(expect)
)
if __name__ == "__main__":
standard_logging_setup("install.log")
ds = dsinstance.DsInstance()
ca = CAInstance("EXAMPLE.COM")
ca.configure_instance("catest.example.com", "password", "password")
| 91,273
|
Python
|
.py
| 2,102
| 32.4196
| 140
| 0.594447
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,785
|
upgradeinstance.py
|
freeipa_freeipa/ipaserver/install/upgradeinstance.py
|
# Authors: Rob Crittenden <rcritten@redhat.com>
#
# Copyright (C) 2010 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import
import logging
import ldif
import shutil
import random
import traceback
from ipalib import api
from ipaplatform.paths import paths
from ipaplatform import services
from ipapython import ipaldap
from ipaserver.install import installutils
from ipaserver.install import schemaupdate
from ipaserver.install import ldapupdate
from ipaserver.install import service
logger = logging.getLogger(__name__)
DSE = 'dse.ldif'
COMPAT_DN = "cn=Schema Compatibility,cn=plugins,cn=config"
REPL_PLUGIN_DN_TEMPLATE = "cn=Multi%s Replication Plugin,cn=plugins,cn=config"
class GetEntryFromLDIF(ldif.LDIFParser):
"""
LDIF parser.
To get results, method parse() must be called first, then method
get_results() which return parsed entries
"""
def __init__(self, input_file, entries_dn=[]):
"""
Parse LDIF file.
:param input_file: an LDIF file to be parsed
:param entries_dn: list of DN which will be returned. All entries are
returned if list is empty.
"""
ldif.LDIFParser.__init__(self, input_file)
self.entries_dn = entries_dn
self.results = {}
def get_results(self):
"""
Returns results in dictionary {DN: entry, ...}
"""
return self.results
def handle(self, dn, entry):
if self.entries_dn and dn not in self.entries_dn:
return
self.results[dn] = entry
class IPAUpgrade(service.Service):
"""
Update the LDAP data in an instance by turning off all network
listeners and updating over ldapi. This way we know the server is
quiet.
"""
def __init__(self, realm_name, files=[], schema_files=[]):
"""
realm_name: kerberos realm name, used to determine DS instance dir
files: list of update files to process. If none use UPDATEDIR
"""
ext = ''
rand = random.Random()
for _i in range(8):
h = "%02x" % rand.randint(0,255)
ext += h
super(IPAUpgrade, self).__init__("dirsrv", realm_name=realm_name)
serverid = ipaldap.realm_to_serverid(realm_name)
self.filename = '%s/%s' % (paths.ETC_DIRSRV_SLAPD_INSTANCE_TEMPLATE % serverid, DSE)
self.savefilename = '%s/%s.ipa.%s' % (paths.ETC_DIRSRV_SLAPD_INSTANCE_TEMPLATE % serverid, DSE, ext)
self.files = files
self.modified = False
self.serverid = serverid
self.schema_files = schema_files
def __start(self):
srv = services.service(self.service_name, api)
srv.start(self.serverid, ldapi=True)
api.Backend.ldap2.connect()
def __stop_instance(self):
"""Stop only the main DS instance"""
if api.Backend.ldap2.isconnected():
api.Backend.ldap2.disconnect()
super(IPAUpgrade, self).stop(self.serverid)
def create_instance(self):
ds_running = super(IPAUpgrade, self).is_running()
if ds_running:
self.step("stopping directory server", self.__stop_instance)
self.step("saving configuration", self.__save_config)
self.step("disabling listeners", self.__disable_listeners)
self.step("enabling DS global lock", self.__enable_ds_global_write_lock)
self.step("disabling Schema Compat", self.__disable_schema_compat)
self.step("starting directory server", self.__start)
if self.schema_files:
self.step("updating schema", self.__update_schema)
self.step("upgrading server", self.__upgrade)
self.step("stopping directory server", self.__stop_instance,
run_after_failure=True)
self.step("restoring configuration", self.__restore_config,
run_after_failure=True)
if ds_running:
self.step("starting directory server", self.__start)
self.start_creation(start_message="Upgrading IPA:",
show_service_name=False,
runtime=90)
def __save_config(self):
shutil.copy2(self.filename, self.savefilename)
if self.get_state('upgrade-in-progress') is not None:
logger.debug('Previous upgrade in process, not saving config')
return
else:
self.backup_state('upgrade-in-progress', True)
with open(self.filename, "r") as in_file:
parser = GetEntryFromLDIF(in_file, entries_dn=["cn=config"])
parser.parse()
try:
config_entry = parser.get_results()["cn=config"]
except KeyError:
raise RuntimeError("Unable to find cn=config entry in %s" %
self.filename)
try:
port = config_entry['nsslapd-port'][0].decode('utf-8')
except KeyError:
pass
else:
self.backup_state('nsslapd-port', port)
try:
security = config_entry['nsslapd-security'][0].decode('utf-8')
except KeyError:
pass
else:
self.backup_state('nsslapd-security', security)
try:
global_lock = config_entry[
'nsslapd-global-backend-lock'][0].decode('utf-8')
except KeyError:
pass
else:
self.backup_state('nsslapd-global-backend-lock', global_lock)
with open(self.filename, "r") as in_file:
parser = GetEntryFromLDIF(in_file, entries_dn=[COMPAT_DN])
parser.parse()
try:
compat_entry = parser.get_results()[COMPAT_DN]
except KeyError:
return
schema_compat_enabled = compat_entry.get('nsslapd-pluginEnabled')
if schema_compat_enabled is None:
schema_compat_enabled = compat_entry.get('nsslapd-pluginenabled')
if schema_compat_enabled:
self.backup_state('schema_compat_enabled',
schema_compat_enabled[0].decode('utf-8'))
def __enable_ds_global_write_lock(self):
ldif_outfile = "%s.modified.out" % self.filename
with open(ldif_outfile, "w") as out_file:
with open(self.filename, "r") as in_file:
parser = installutils.ModifyLDIF(in_file, out_file)
parser.replace_value(
"cn=config", "nsslapd-global-backend-lock", [b"on"])
parser.parse()
shutil.copy2(ldif_outfile, self.filename)
def __restore_config(self):
# peek the values during the restoration
port = self.get_state('nsslapd-port')
security = self.get_state('nsslapd-security')
global_lock = self.get_state('nsslapd-global-backend-lock')
schema_compat_enabled = self.get_state('schema_compat_enabled')
ldif_outfile = "%s.modified.out" % self.filename
with open(ldif_outfile, "w") as out_file:
with open(self.filename, "r") as in_file:
parser = installutils.ModifyLDIF(in_file, out_file)
if port is not None:
parser.replace_value(
"cn=config", "nsslapd-port", [port.encode('utf-8')])
if security is not None:
parser.replace_value("cn=config", "nsslapd-security",
[security.encode('utf-8')])
# disable global lock by default
parser.remove_value("cn=config", "nsslapd-global-backend-lock")
if global_lock is not None:
parser.add_value("cn=config", "nsslapd-global-backend-lock",
[global_lock.encode('utf-8')])
if schema_compat_enabled is not None:
parser.replace_value(
COMPAT_DN, "nsslapd-pluginEnabled",
[schema_compat_enabled.encode('utf-8')])
parser.parse()
shutil.copy2(ldif_outfile, self.filename)
# Now the restore is really done, remove upgrade-in-progress
self.restore_state('upgrade-in-progress')
# the values are restored, remove from the state file
self.restore_state('nsslapd-port')
self.restore_state('nsslapd-security')
self.restore_state('nsslapd-global-backend-lock')
self.restore_state('schema_compat_enabled')
def __disable_listeners(self):
ldif_outfile = "%s.modified.out" % self.filename
with open(ldif_outfile, "w") as out_file:
with open(self.filename, "r") as in_file:
parser = installutils.ModifyLDIF(in_file, out_file)
parser.replace_value("cn=config", "nsslapd-port", [b"0"])
parser.replace_value("cn=config", "nsslapd-security", [b"off"])
parser.remove_value("cn=config", "nsslapd-ldapientrysearchbase")
parser.parse()
shutil.copy2(ldif_outfile, self.filename)
def __disable_schema_compat(self):
ldif_outfile = "%s.modified.out" % self.filename
with open(self.filename, "r") as in_file:
parser = GetEntryFromLDIF(in_file, entries_dn=[COMPAT_DN])
parser.parse()
try:
compat_entry = parser.get_results()[COMPAT_DN]
except KeyError:
return
if not compat_entry.get('nsslapd-pluginEnabled'):
return
with open(ldif_outfile, "w") as out_file:
with open(self.filename, "r") as in_file:
parser = installutils.ModifyLDIF(in_file, out_file)
parser.remove_value(COMPAT_DN, "nsslapd-pluginEnabled")
parser.remove_value(COMPAT_DN, "nsslapd-pluginenabled")
parser.add_value(COMPAT_DN, "nsslapd-pluginEnabled",
[b"off"])
parser.parse()
shutil.copy2(ldif_outfile, self.filename)
def __update_schema(self):
self.modified = schemaupdate.update_schema(
self.schema_files, ldapi=True) or self.modified
def __upgrade(self):
try:
ld = ldapupdate.LDAPUpdate(api=self.api)
if len(self.files) == 0:
self.files = ld.get_all_files(ldapupdate.UPDATES_DIR)
self.modified = (ld.update(self.files) or self.modified)
except ldapupdate.BadSyntax as e:
logger.error('Bad syntax in upgrade %s', e)
raise
except Exception as e:
# Bad things happened, return gracefully
logger.error('Upgrade failed with %s', e)
logger.debug('%s', traceback.format_exc())
raise RuntimeError(e)
| 11,483
|
Python
|
.py
| 252
| 34.900794
| 108
| 0.608906
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,786
|
ipa_trust_enable_agent.py
|
freeipa_freeipa/ipaserver/install/ipa_trust_enable_agent.py
|
#
# Copyright (C) 2020 FreeIPA Contributors see COPYING for license
#
from __future__ import print_function, absolute_import
import logging
from ipalib import api
from ipaplatform import services
from ipaplatform.paths import paths
from ipapython.admintool import AdminTool, ScriptError
from ipapython.dn import DN
from ipapython.ipautil import CalledProcessError
from ipaserver.install import installutils
logger = logging.getLogger(__name__)
class IPATrustEnableAgent(AdminTool):
command_name = "ipa-trust-enable-agent"
log_file_name = paths.IPATRUSTENABLEAGENT_LOG
usage = "%prog"
description = "Enable this server as a trust agent"
@classmethod
def add_options(cls, parser):
super(IPATrustEnableAgent, cls).add_options(parser)
parser.add_option(
"--enable-compat",
dest="enable_compat", default=False, action="store_true",
help="Enable support for trusted domains for old clients")
def validate_options(self):
super(IPATrustEnableAgent, self).validate_options(needs_root=True)
installutils.check_server_configuration()
def _enable_compat_tree(self):
logger.info("Enabling Schema Compatibility plugin")
compat_plugin_dn = DN("cn=Schema Compatibility,cn=plugins,cn=config")
lookup_nsswitch_name = "schema-compat-lookup-nsswitch"
for config in (("cn=users", "user"), ("cn=groups", "group")):
entry_dn = DN(config[0], compat_plugin_dn)
current = api.Backend.ldap2.get_entry(entry_dn)
lookup_nsswitch = current.get(lookup_nsswitch_name, [])
if not(config[1] in lookup_nsswitch):
logger.debug("Enabling Schema Compatibility plugin "
"for %s", config[0])
current[lookup_nsswitch_name] = [config[1]]
api.Backend.ldap2.update_entry(current)
else:
logger.debug("Schema Compatibility plugin already enabled "
"for %s", config[0])
def run(self):
api.bootstrap(in_server=True, confdir=paths.ETC_IPA)
api.finalize()
try:
api.Backend.ldap2.connect() # ensure DS is up
# If required, enable Schema compat plugin on users/groups
if self.options.enable_compat:
try:
self._enable_compat_tree()
except Exception as e:
raise ScriptError(
"Enabling Schema Compatibility plugin "
"failed: {}".format(e))
# Restart 389-ds and sssd
logger.info("Restarting Directory Server")
try:
services.knownservices.dirsrv.restart()
except Exception as e:
raise ScriptError(
"Directory Server restart was unsuccessful: {}".format(e))
logger.info("Restarting SSSD service")
try:
sssd = services.service('sssd', api)
sssd.restart()
except CalledProcessError as e:
raise ScriptError(
"SSSD service restart was unsuccessful: {}".format(e))
finally:
if api.Backend.ldap2.isconnected():
api.Backend.ldap2.disconnect()
return 0
| 3,353
|
Python
|
.py
| 75
| 33.52
| 78
| 0.616564
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,787
|
dns.py
|
freeipa_freeipa/ipaserver/install/dns.py
|
#
# Copyright (C) 2015 FreeIPA Contributors see COPYING for license
#
"""
DNS installer module
"""
from __future__ import absolute_import
from __future__ import print_function
import enum
import logging
import os
import sys
import six
from subprocess import CalledProcessError
from ipalib import api
from ipalib import errors
from ipalib import util
from ipalib.install import hostname, sysrestore
from ipalib.install.service import enroll_only, prepare_only
from ipalib.install import dnsforwarders
from ipaplatform.paths import paths
from ipaplatform.constants import constants
from ipaplatform import services
from ipapython import ipautil
from ipapython import dnsutil
from ipapython.dn import DN
from ipapython.dnsutil import check_zone_overlap
from ipapython.install import typing
from ipapython.install.core import group, knob
from ipapython.admintool import ScriptError
from ipapython.ipautil import user_input
from ipaserver.install.installutils import get_server_ip_address
from ipaserver.install.installutils import read_dns_forwarders
from ipaserver.install.installutils import update_hosts_file
from ipaserver.install import bindinstance
from ipaserver.install import dnskeysyncinstance
from ipaserver.install import odsexporterinstance
from ipaserver.install import opendnssecinstance
from ipaserver.install import service
if six.PY3:
unicode = str
logger = logging.getLogger(__name__)
ip_addresses = []
reverse_zones = []
def _find_dnssec_enabled_zones(conn):
search_kw = {'idnssecinlinesigning': True}
dnssec_enabled_filter = conn.make_filter(search_kw)
dn = DN('cn=dns', api.env.basedn)
try:
entries, _truncated = conn.find_entries(
base_dn=dn, filter=dnssec_enabled_filter, attrs_list=['idnsname'])
except errors.NotFound:
return []
else:
return [entry.single_value['idnsname'] for entry in entries
if 'idnsname' in entry]
def _is_master():
# test if server is DNSSEC key master
masters = opendnssecinstance.get_dnssec_key_masters(api.Backend.ldap2)
if api.env.host not in masters:
raise RuntimeError("Current server is not DNSSEC key master")
def _disable_dnssec():
fstore = sysrestore.FileStore(paths.SYSRESTORE)
ods = opendnssecinstance.OpenDNSSECInstance(fstore)
ods.realm = api.env.realm
ods_exporter = odsexporterinstance.ODSExporterInstance(fstore)
ods_exporter.realm = api.env.realm
# unconfigure services first
ods.uninstall() # needs keytab to flush the latest ods database
ods_exporter.uninstall()
ods.ldap_disable('DNSSEC', api.env.host, api.env.basedn)
ods.ldap_remove_service_container('DNSSEC', api.env.host, api.env.basedn)
ods_exporter.ldap_disable('DNSKeyExporter', api.env.host, api.env.basedn)
ods_exporter.remove_service()
ods_exporter.ldap_remove_service_container('DNSKeyExporter', api.env.host,
api.env.basedn)
conn = api.Backend.ldap2
dn = DN(('cn', 'DNSSEC'), ('cn', api.env.host),
api.env.container_masters, api.env.basedn)
try:
entry = conn.get_entry(dn)
except errors.NotFound:
pass
else:
ipa_config = entry.get('ipaConfigString', [])
if opendnssecinstance.KEYMASTER in ipa_config:
ipa_config.remove(opendnssecinstance.KEYMASTER)
conn.update_entry(entry)
def package_check(exception):
if not os.path.isfile(paths.IPA_DNS_INSTALL):
raise exception(
"Integrated DNS requires '%s' package"
% constants.IPA_DNS_PACKAGE_NAME
)
def install_check(standalone, api, replica, options, hostname):
global ip_addresses
global reverse_zones
fstore = sysrestore.FileStore(paths.SYSRESTORE)
package_check(RuntimeError)
# when installing first DNS instance we need to check zone overlap
if replica or standalone:
already_enabled = api.Command.dns_is_enabled()['result']
else:
already_enabled = False
if not already_enabled:
domain = dnsutil.DNSName(util.normalize_zone(api.env.domain))
try:
dnsutil.check_zone_overlap(domain, raise_on_error=False)
except dnsutil.DNSZoneAlreadyExists as e:
if options.force or options.allow_zone_overlap:
logger.warning("%s Please make sure that the domain is "
"properly delegated to this IPA server.",
e)
else:
hst = dnsutil.DNSName(hostname).make_absolute().to_text()
if hst not in e.kwargs['ns']:
raise ValueError(str(e))
for reverse_zone in options.reverse_zones:
try:
dnsutil.check_zone_overlap(reverse_zone)
except dnsutil.DNSZoneAlreadyExists as e:
if options.force or options.allow_zone_overlap:
logger.warning('%s', str(e))
else:
raise e
if standalone:
print("==============================================================================")
print("This program will setup DNS for the IPA Server.")
print("")
print("This includes:")
print(" * Configure DNS (bind)")
print(" * Configure SoftHSM (required by DNSSEC)")
print(" * Configure ipa-dnskeysyncd (required by DNSSEC)")
if options.dnssec_master:
print(" * Configure ipa-ods-exporter (required by DNSSEC key master)")
print(" * Configure OpenDNSSEC (required by DNSSEC key master)")
print(" * Generate DNSSEC master key (required by DNSSEC key master)")
elif options.disable_dnssec_master:
print(" * Unconfigure ipa-ods-exporter")
print(" * Unconfigure OpenDNSSEC")
print("")
print("No new zones will be signed without DNSSEC key master IPA server.")
print("")
print(("Please copy file from %s after uninstallation. This file is needed "
"on new DNSSEC key " % paths.IPA_KASP_DB_BACKUP))
print("master server")
print("")
print("NOTE: DNSSEC zone signing is not enabled by default")
print("")
if options.dnssec_master:
print("Plan carefully, replacing DNSSEC key master is not recommended")
print("")
print("")
print("To accept the default shown in brackets, press the Enter key.")
print("")
if (options.dnssec_master and not options.unattended and not
ipautil.user_input(
"Do you want to setup this IPA server as DNSSEC key master?",
False)):
sys.exit("Aborted")
elif (options.disable_dnssec_master and not options.unattended and not
ipautil.user_input(
"Do you want to disable current DNSSEC key master?",
False)):
sys.exit("Aborted")
if options.disable_dnssec_master:
_is_master()
if options.disable_dnssec_master or options.dnssec_master:
dnssec_zones = _find_dnssec_enabled_zones(api.Backend.ldap2)
if options.disable_dnssec_master:
if dnssec_zones and not options.force:
raise RuntimeError(
"Cannot disable DNSSEC key master, DNSSEC signing is still "
"enabled for following zone(s):\n"
"%s\n"
"It is possible to move DNSSEC key master role to a different "
"server by using --force option to skip this check.\n\n"
"WARNING: You have to immediately copy kasp.db file to a new "
"server and run command 'ipa-dns-install --dnssec-master "
"--kasp-db'.\n"
"Your DNS zones will become unavailable if you "
"do not reinstall the DNSSEC key master role immediately." %
", ".join([str(zone) for zone in dnssec_zones]))
elif options.dnssec_master:
ods = opendnssecinstance.OpenDNSSECInstance(fstore)
ods.realm = api.env.realm
dnssec_masters = ods.get_masters()
# we can reinstall current server if it is dnssec master
if dnssec_masters and api.env.host not in dnssec_masters:
print("DNSSEC key master(s):", u','.join(dnssec_masters))
raise ScriptError(
"Only one DNSSEC key master is supported in current version.")
if options.kasp_db_file:
dnskeysyncd = services.service('ipa-dnskeysyncd', api)
if not dnskeysyncd.is_installed():
raise RuntimeError("ipa-dnskeysyncd is not configured on this "
"server, you cannot reuse OpenDNSSEC "
"database (kasp.db file)")
# check if replica can be the DNSSEC master
cmd = [paths.IPA_DNSKEYSYNCD_REPLICA]
environment = {
"SOFTHSM2_CONF": paths.DNSSEC_SOFTHSM2_CONF,
}
# stop dnskeysyncd before test
dnskeysyncd_running = dnskeysyncd.is_running()
dnskeysyncd.stop()
try:
ipautil.run(cmd, env=environment,
runas=constants.ODS_USER,
suplementary_groups=[constants.NAMED_GROUP])
except CalledProcessError as e:
logger.debug("%s", e)
raise RuntimeError("This IPA server cannot be promoted to "
"DNSSEC master role because some keys were "
"not replicated from the original "
"DNSSEC master server")
finally:
if dnskeysyncd_running:
dnskeysyncd.start()
elif dnssec_zones and not options.force:
# some zones have --dnssec=true, make sure a user really want to
# install new database
raise RuntimeError(
"DNSSEC signing is already enabled for following zone(s): %s\n"
"Installation cannot continue without the OpenDNSSEC database "
"file from the original DNSSEC master server.\n"
"Please use option --kasp-db to specify location "
"of the kasp.db file copied from the original "
"DNSSEC master server.\n"
"WARNING: Zones will become unavailable if you do not provide "
"the original kasp.db file." %
", ".join([str(zone) for zone in dnssec_zones]))
ip_addresses = get_server_ip_address(hostname, options.unattended,
True, options.ip_addresses)
util.no_matching_interface_for_ip_address_warning(ip_addresses)
if not options.forward_policy:
# user did not specify policy, derive it: default is 'first' but
# if any of local IP addresses belongs to private ranges use 'only'
options.forward_policy = 'first'
for ip in ip_addresses:
if dnsutil.inside_auto_empty_zone(dnsutil.DNSName(ip.reverse_dns)):
options.forward_policy = 'only'
logger.debug('IP address %s belongs to a private range, '
'using forward policy only', ip)
break
if options.no_forwarders:
options.forwarders = []
elif options.forwarders or options.auto_forwarders:
if not options.forwarders:
options.forwarders = []
if options.auto_forwarders:
options.forwarders.extend(dnsforwarders.get_nameservers())
elif standalone or not replica:
options.forwarders = read_dns_forwarders()
# test DNSSEC forwarders
if options.forwarders:
if not options.no_dnssec_validation \
and not bindinstance.check_forwarders(options.forwarders):
options.no_dnssec_validation = True
print("WARNING: DNSSEC validation will be disabled")
logger.debug("will use DNS forwarders: %s\n", options.forwarders)
if not standalone:
search_reverse_zones = False
else:
search_reverse_zones = True
if not standalone and replica:
reverse_zones_unattended_check = True
else:
reverse_zones_unattended_check = options.unattended
reverse_zones = bindinstance.check_reverse_zones(
ip_addresses, options.reverse_zones, options,
reverse_zones_unattended_check, search_reverse_zones
)
if reverse_zones:
print("Using reverse zone(s) %s" % ', '.join(reverse_zones))
def install(standalone, replica, options, api=api):
fstore = sysrestore.FileStore(paths.SYSRESTORE)
if standalone:
# otherwise this is done by server/replica installer
update_hosts_file(ip_addresses, api.env.host, fstore)
bind = bindinstance.BindInstance(fstore, api=api)
bind.setup(api.env.host, ip_addresses, api.env.realm, api.env.domain,
options.forwarders, options.forward_policy,
reverse_zones, zonemgr=options.zonemgr,
no_dnssec_validation=options.no_dnssec_validation)
if standalone and not options.unattended:
print("")
print("The following operations may take some minutes to complete.")
print("Please wait until the prompt is returned.")
print("")
bind.create_instance()
print("Restarting the web server to pick up resolv.conf changes")
services.knownservices.httpd.restart(capture_output=True)
# on dnssec master this must be installed last
dnskeysyncd = dnskeysyncinstance.DNSKeySyncInstance(fstore)
dnskeysyncd.create_instance(api.env.host, api.env.realm)
if options.dnssec_master:
ods = opendnssecinstance.OpenDNSSECInstance(fstore)
ods_exporter = odsexporterinstance.ODSExporterInstance(fstore)
ods_exporter.create_instance(api.env.host, api.env.realm)
ods.create_instance(api.env.host, api.env.realm,
kasp_db_file=options.kasp_db_file)
elif options.disable_dnssec_master:
_disable_dnssec()
dnskeysyncd.start_dnskeysyncd()
bind.start_named()
# Enable configured services for standalone check_global_configuration()
if standalone:
service.enable_services(api.env.host)
# this must be done when bind is started and operational
bind.update_system_records()
if standalone:
print("==============================================================================")
print("Setup complete")
print("")
bind.check_global_configuration()
print("")
print("")
print("\tYou must make sure these network ports are open:")
print("\t\tTCP Ports:")
print("\t\t * 53: bind")
print("\t\tUDP Ports:")
print("\t\t * 53: bind")
elif not standalone and replica:
print("")
bind.check_global_configuration()
print("")
def uninstall_check(options):
# test if server is DNSSEC key master
masters = opendnssecinstance.get_dnssec_key_masters(api.Backend.ldap2)
if api.env.host in masters:
print("This server is active DNSSEC key master. Uninstall could break your DNS system.")
if not (options.unattended or user_input(
"Are you sure you want to continue with the uninstall "
"procedure?", False)):
print("")
print("Aborting uninstall operation.")
sys.exit(1)
def uninstall():
fstore = sysrestore.FileStore(paths.SYSRESTORE)
ods = opendnssecinstance.OpenDNSSECInstance(fstore)
if ods.is_configured():
ods.uninstall()
ods_exporter = odsexporterinstance.ODSExporterInstance(fstore)
if ods_exporter.is_configured():
ods_exporter.uninstall()
bind = bindinstance.BindInstance(fstore)
if bind.is_configured():
bind.uninstall()
dnskeysync = dnskeysyncinstance.DNSKeySyncInstance(fstore)
if dnskeysync.is_configured():
dnskeysync.uninstall()
class DNSForwardPolicy(enum.Enum):
ONLY = 'only'
FIRST = 'first'
@group
class DNSInstallInterface(hostname.HostNameInstallInterface):
"""
Interface of the DNS installer
Knobs defined here will be available in:
* ipa-server-install
* ipa-replica-prepare
* ipa-replica-install
* ipa-dns-install
"""
description = "DNS"
allow_zone_overlap = knob(
None,
description="Create DNS zone even if it already exists",
)
allow_zone_overlap = prepare_only(allow_zone_overlap)
reverse_zones = knob(
typing.List[str], [],
description=("The reverse DNS zone to use. This option can be used "
"multiple times"),
cli_names='--reverse-zone',
cli_metavar='REVERSE_ZONE',
)
reverse_zones = prepare_only(reverse_zones)
@reverse_zones.validator
def reverse_zones(self, values):
if not self.allow_zone_overlap:
for zone in values:
check_zone_overlap(zone)
no_reverse = knob(
None,
description="Do not create new reverse DNS zone",
)
no_reverse = prepare_only(no_reverse)
auto_reverse = knob(
None,
description="Create necessary reverse zones",
)
auto_reverse = prepare_only(auto_reverse)
zonemgr = knob(
str, None,
description=("DNS zone manager e-mail address. Defaults to "
"hostmaster@DOMAIN"),
)
zonemgr = prepare_only(zonemgr)
@zonemgr.validator
def zonemgr(self, value):
# validate the value first
if six.PY3:
bindinstance.validate_zonemgr_str(value)
else:
try:
# IDNA support requires unicode
encoding = getattr(sys.stdin, 'encoding', None)
if encoding is None:
encoding = 'utf-8'
# value is string in py2 and py3
if not isinstance(value, unicode):
value = value.decode(encoding)
bindinstance.validate_zonemgr_str(value)
except ValueError as e:
# FIXME we can do this in better way
# https://fedorahosted.org/freeipa/ticket/4804
# decode to proper stderr encoding
stderr_encoding = getattr(sys.stderr, 'encoding', None)
if stderr_encoding is None:
stderr_encoding = 'utf-8'
error = unicode(e).encode(stderr_encoding)
raise ValueError(error)
forwarders = knob(
typing.List[ipautil.CheckedIPAddressLoopback], None,
description=("Add a DNS forwarder. This option can be used multiple "
"times"),
cli_names='--forwarder',
)
forwarders = enroll_only(forwarders)
no_forwarders = knob(
None,
description="Do not add any DNS forwarders, use root servers instead",
)
no_forwarders = enroll_only(no_forwarders)
auto_forwarders = knob(
None,
description="Use DNS forwarders configured in /etc/resolv.conf",
)
auto_forwarders = enroll_only(auto_forwarders)
forward_policy = knob(
DNSForwardPolicy, None,
description=("DNS forwarding policy for global forwarders"),
)
forward_policy = enroll_only(forward_policy)
no_dnssec_validation = knob(
None,
description="Disable DNSSEC validation",
)
no_dnssec_validation = enroll_only(no_dnssec_validation)
dnssec_master = False
disable_dnssec_master = False
kasp_db_file = None
force = False
| 19,715
|
Python
|
.py
| 455
| 33.876923
| 96
| 0.636833
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,788
|
upgrade.py
|
freeipa_freeipa/ipaserver/install/server/upgrade.py
|
#
# Copyright (C) 2015 FreeIPA Contributors see COPYING for license
#
from __future__ import print_function, absolute_import
import errno
import itertools
import logging
import re
import os
import glob
import shutil
import fileinput
import stat
import sys
import tempfile
from contextlib import contextmanager
from augeas import Augeas
from pkg_resources import parse_version
from ipalib import api, x509
from ipalib.constants import RENEWAL_CA_NAME, RA_AGENT_PROFILE, IPA_CA_RECORD
from ipalib.install import certmonger
from ipalib import sysrestore
from ipalib.facts import is_ipa_configured
import SSSDConfig
import ipalib.util
import ipalib.errors
from ipaclient.install import timeconf
from ipaclient.install.client import sssd_enable_ifp
from ipalib.install.dnsforwarders import detect_resolve1_resolv_conf
from ipaplatform import services
from ipaplatform.tasks import tasks
from ipapython import ipautil, version
from ipapython import ipaldap
from ipapython import directivesetter
from ipapython.dn import DN
from ipapython.version import KRB5_BUILD_VERSION
from ipaplatform.constants import constants
from ipaplatform.paths import paths
from ipaserver import servroles
from ipaserver.install import installutils
from ipaserver.install import dsinstance
from ipaserver.install import httpinstance
from ipaserver.install import bindinstance
from ipaserver.install import service
from ipaserver.install import cainstance
from ipaserver.install import krainstance
from ipaserver.install import certs
from ipaserver.install import otpdinstance
from ipaserver.install import schemaupdate
from ipaserver.install import custodiainstance
from ipaserver.install import sysupgrade
from ipaserver.install import dnskeysyncinstance
from ipaserver.install import dogtaginstance
from ipaserver.install import krbinstance
from ipaserver.install import adtrustinstance
from ipaserver.install import replication
from ipaserver.install.upgradeinstance import IPAUpgrade
from ipaserver.install.ldapupdate import BadSyntax
import six
# pylint: disable=import-error
if six.PY3:
# The SafeConfigParser class has been renamed to ConfigParser in Py3
from configparser import ConfigParser as SafeConfigParser
else:
from ConfigParser import SafeConfigParser
# pylint: enable=import-error
if six.PY3:
unicode = str
logger = logging.getLogger(__name__)
class KpasswdInstance(service.SimpleServiceInstance):
def __init__(self):
service.SimpleServiceInstance.__init__(self, "ipa_kpasswd")
def uninstall_ipa_kpasswd():
"""
We can't use the full service uninstaller because that will attempt
to stop and disable the service which by now doesn't exist. We just
want to clean up sysrestore.state to remove all references to
ipa_kpasswd.
"""
ipa_kpasswd = KpasswdInstance()
enabled = not ipa_kpasswd.restore_state("enabled")
if enabled is not None and not enabled:
ipa_kpasswd.remove()
def uninstall_ipa_memcached():
"""
We can't use the full service uninstaller because that will attempt
to stop and disable the service which by now doesn't exist. We just
want to clean up sysrestore.state to remove all references to
ipa_memcached.
"""
ipa_memcached = service.SimpleServiceInstance('ipa_memcached')
if ipa_memcached.is_configured():
ipa_memcached.uninstall()
def backup_file(filename, ext):
"""Make a backup of filename using ext as the extension. Do not overwrite
previous backups."""
if not os.path.isabs(filename):
raise ValueError("Absolute path required")
backupfile = filename + ".bak"
while os.path.exists(backupfile):
backupfile = backupfile + "." + str(ext)
try:
shutil.copy2(filename, backupfile)
except IOError as e:
if e.errno == 2: # No such file or directory
pass
else:
raise e
def update_conf(sub_dict, filename, template_filename):
template = ipautil.template_file(template_filename, sub_dict)
fd = open(filename, "w")
fd.write(template)
fd.close()
def find_autoredirect(fqdn):
"""
When upgrading ipa-rewrite.conf we need to see if the automatic redirect
was disabled during install time (or afterward). So sift through the
configuration file and see if we can determine the status.
Returns True if autoredirect is enabled, False otherwise
"""
filename = paths.HTTPD_IPA_REWRITE_CONF
if os.path.exists(filename):
pattern = r"^RewriteRule \^/\$ https://%s/ipa/ui \[L,NC,R=301\]" % fqdn
p = re.compile(pattern)
for line in fileinput.input(filename):
if p.search(line):
fileinput.close()
return True
fileinput.close()
return False
return True
def find_version(filename):
"""Find the version of a configuration file
If no VERSION entry exists in the file, returns 0.
If the file does not exist, returns -1.
"""
if os.path.exists(filename):
pattern = r"^[\s#]*VERSION\s+([0-9]+)\s+.*"
p = re.compile(pattern)
for line in fileinput.input(filename):
if p.search(line):
fileinput.close()
return p.search(line).group(1)
fileinput.close()
# no VERSION found
return 0
else:
return -1
def upgrade_file(sub_dict, filename, template, add=False, force=False):
"""
Get the version from the current and template files and update the
installed configuration file if there is a new template.
If add is True then create a new configuration file.
If force is True then the version comparison is skipped. This should
be used judiciously. It does not override add nor will it affect
files that don't exist (version == -1).
"""
old = int(find_version(filename))
new = int(find_version(template))
if old < 0 and not add:
logger.error("%s not found.", filename)
raise RuntimeError("%s not found." % filename)
if new < 0:
logger.error("%s not found.", template)
if new == 0:
logger.error("Template %s is not versioned.", template)
if old == 0:
# The original file does not have a VERSION entry. This means it's now
# managed by IPA, but previously was not.
logger.warning("%s is now managed by IPA. It will be "
"overwritten. A backup of the original will be made.",
filename)
if force:
logger.error("Forcing update of template %s", template)
if ((old < new) or (add and old == 0)) or force:
backup_file(filename, new)
update_conf(sub_dict, filename, template)
logger.info("Upgraded %s to version %d", filename, new)
def check_certs():
"""Check ca.crt is in the right place, and try to fix if not"""
logger.info('[Verifying that root certificate is published]')
if not os.path.exists(paths.CA_CRT):
ca_file = paths.IPA_CA_CRT
if os.path.exists(ca_file):
old_umask = os.umask(0o22) # make sure its readable by httpd
try:
shutil.copyfile(ca_file, paths.CA_CRT)
finally:
os.umask(old_umask)
else:
logger.error("Missing Certification Authority file.")
logger.error("You should place a copy of the CA certificate in "
"/usr/share/ipa/html/ca.crt")
else:
logger.debug('Certificate file exists')
def update_dbmodules(realm, filename=paths.KRB5_CONF):
newfile = []
found_dbrealm = False
found_realm = False
prefix = ''
logger.info('[Verifying that KDC configuration is using ipa-kdb backend]')
fd = open(filename)
lines = fd.readlines()
fd.close()
if ' db_library = ipadb.so\n' in lines:
logger.debug('dbmodules already updated in %s', filename)
return
for line in lines:
if line.startswith('[dbmodules]'):
found_dbrealm = True
if found_dbrealm and line.find(realm) > -1:
found_realm = True
prefix = '#'
if found_dbrealm and line.find('}') > -1 and found_realm:
found_realm = False
newfile.append('#%s' % line)
prefix = ''
continue
newfile.append('%s%s' % (prefix, line))
# Append updated dbmodules information
newfile.append(' %s = {\n' % realm)
newfile.append(' db_library = ipadb.so\n')
newfile.append(' }\n')
# Write out new file
fd = open(filename, 'w')
fd.write("".join(newfile))
fd.close()
logger.debug('%s updated', filename)
def cleanup_kdc(fstore):
"""
Clean up old KDC files if they exist. We need to remove the actual
file and any references in the uninstall configuration.
"""
logger.info('[Checking for deprecated KDC configuration files]')
for file in ['kpasswd.keytab', 'ldappwd']:
filename = os.path.join(paths.VAR_KERBEROS_KRB5KDC_DIR, file)
ipautil.remove_file(filename)
if fstore.has_file(filename):
fstore.untrack_file(filename)
logger.debug('Uninstalling %s', filename)
def cleanup_adtrust(fstore):
"""
Clean up any old Samba backup files that were deprecated.
"""
logger.info('[Checking for deprecated backups of Samba '
'configuration files]')
for backed_up_file in [paths.SMB_CONF]:
if fstore.has_file(backed_up_file):
fstore.untrack_file(backed_up_file)
logger.debug('Removing %s from backup', backed_up_file)
def cleanup_dogtag():
"""
pkispawn leaves some mess we were not cleaning up until recently. Try
to clean up what we can.
"""
subsystems = []
if api.Command.ca_is_enabled()['result']:
subsystems.append('CA')
if api.Command.kra_is_enabled()['result']:
subsystems.append('KRA')
for system in subsystems:
logger.debug(
"Cleaning up after pkispawn for the %s subsystem",
system)
instance = dogtaginstance.DogtagInstance(
api.env.realm, system, service_desc=None,
)
instance.clean_pkispawn_files()
def cleanup_kdcinfo():
""" Remove stale kdcinfo.*|kpasswdinfo.* files generated by SSSD """
for pattern in ('kdcinfo.*', 'kpasswdinfo.*'):
for fname in glob.glob(os.path.join(paths.SSSD_PUBCONF_DIR, pattern)):
logger.debug('Removing stale info file %s', fname)
os.unlink(fname)
def upgrade_adtrust_config():
"""
Upgrade 'dedicated keytab file' in smb.conf to omit FILE: prefix
"""
if not adtrustinstance.ipa_smb_conf_exists():
return
logger.info("[Remove FILE: prefix from 'dedicated keytab file' "
"in Samba configuration]")
args = [paths.NET, "conf", "setparm", "global",
"dedicated keytab file", paths.SAMBA_KEYTAB]
try:
ipautil.run(args)
except ipautil.CalledProcessError as e:
logger.warning("Error updating Samba registry: %s", e)
logger.info("[Update 'max smbd processes' in Samba configuration "
"to prevent unlimited SMBLoris attack amplification]")
args = [paths.NET, "conf", "getparm", "global", "max smbd processes"]
try:
ipautil.run(args)
except ipautil.CalledProcessError as e:
if e.returncode == 255:
# 'max smbd processes' does not exist
args = [paths.NET, "conf", "setparm", "global",
"max smbd processes", "1000"]
try:
ipautil.run(args)
except ipautil.CalledProcessError as e2:
logger.warning("Error updating Samba registry: %s", e2)
else:
logger.warning("Error updating Samba registry: %s", e)
logger.info("[Change 'server role' from "
"'CLASSIC PRIMARY DOMAIN CONTROLLER' "
"to 'IPA PRIMARY DOMAIN CONTROLLER' in Samba configuration]")
args = [paths.NET, "conf", "setparm", "global",
"server role", "IPA PRIMARY DOMAIN CONTROLLER"]
try:
ipautil.run(args)
except ipautil.CalledProcessError as e:
# Only report an error if return code is not 255
# which indicates that the new server role is not supported
# and we don't need to do anything
if e.returncode != 255:
logger.warning("Error updating Samba registry: %s", e)
def ca_configure_profiles_acl(ca):
logger.info('[Authorizing RA Agent to modify profiles]')
if not ca.is_configured():
logger.info('CA is not configured')
return False
return cainstance.configure_profiles_acl()
def ca_configure_lightweight_ca_acls(ca):
logger.info('[Authorizing RA Agent to manage lightweight CAs]')
if not ca.is_configured():
logger.info('CA is not configured')
return False
return cainstance.configure_lightweight_ca_acls()
def ca_enable_ldap_profile_subsystem(ca):
logger.info('[Ensuring CA is using LDAPProfileSubsystem]')
if not ca.is_configured():
logger.info('CA is not configured')
return False
needs_update = False
directive = None
try:
i = 0
while True:
# find profile subsystem
directive = "subsystem.{}.id".format(i)
value = directivesetter.get_directive(
paths.CA_CS_CFG_PATH,
directive,
separator='=')
if not value:
logger.error('Unable to find profile subsystem in %s',
paths.CA_CS_CFG_PATH)
return False
if value != 'profile':
i = i + 1
continue
# check profile subsystem class name
directive = "subsystem.{}.class".format(i)
value = directivesetter.get_directive(
paths.CA_CS_CFG_PATH,
directive,
separator='=')
if value != 'com.netscape.cmscore.profile.LDAPProfileSubsystem':
needs_update = True
# break after finding profile subsystem
break
except OSError as e:
logger.error('Cannot read CA configuration file "%s": %s',
paths.CA_CS_CFG_PATH, e)
return False
if needs_update:
directivesetter.set_directive(
paths.CA_CS_CFG_PATH,
directive,
'com.netscape.cmscore.profile.LDAPProfileSubsystem',
quotes=False,
separator='=')
ca.restart('pki-tomcat')
logger.info('[Migrating certificate profiles to LDAP]')
cainstance.migrate_profiles_to_ldap()
return needs_update
def ca_import_included_profiles(ca):
logger.info('[Ensuring presence of included profiles]')
if not ca.is_configured():
logger.info('CA is not configured')
return False
return cainstance.import_included_profiles()
def ca_ensure_lightweight_cas_container(ca):
logger.info('[Ensuring Lightweight CAs container exists in Dogtag '
'database]')
if not ca.is_configured():
logger.info('CA is not configured')
return False
return cainstance.ensure_lightweight_cas_container()
def ca_enable_lightweight_ca_monitor(ca):
logger.info('[Enabling LWCA monitor]')
if not ca.is_configured():
logger.info('CA is not configured')
return False
return cainstance.enable_lightweight_ca_monitor()
def ca_add_default_ocsp_uri(ca):
logger.info('[Adding default OCSP URI configuration]')
if not ca.is_configured():
logger.info('CA is not configured')
return False
value = directivesetter.get_directive(
paths.CA_CS_CFG_PATH,
'ca.defaultOcspUri',
separator='=')
if value:
return False # already set; restart not needed
directivesetter.set_directive(
paths.CA_CS_CFG_PATH,
'ca.defaultOcspUri',
'http://ipa-ca.%s/ca/ocsp' % ipautil.format_netloc(api.env.domain),
quotes=False,
separator='=')
return True # restart needed
def ca_disable_publish_cert(ca):
logger.info('[Disabling cert publishing]')
if not ca.is_configured():
logger.info('CA is not configured')
return False
value = directivesetter.get_directive(
paths.CA_CS_CFG_PATH,
'ca.publish.cert.enable',
separator='=')
if value:
return False # already set; restart not needed
directivesetter.set_directive(
paths.CA_CS_CFG_PATH,
'ca.publish.cert.enable',
'false',
quotes=False,
separator='=')
return True # restart needed
def ca_initialize_hsm_state(ca):
"""Initializse HSM state as False / internal token
"""
if not ca.sstore.has_state(ca.hsm_sstore):
section_name = ca.subsystem.upper()
config = SafeConfigParser()
config.add_section(section_name)
config.set(section_name, 'pki_hsm_enable', 'False')
ca.set_hsm_state(config)
def dnssec_set_openssl_engine(dnskeysyncd):
"""
Setup OpenSSL engine for BIND
"""
if constants.NAMED_OPENSSL_ENGINE is None:
return False
if sysupgrade.get_upgrade_state('dns', 'openssl_engine'):
return False
logger.info('[Set OpenSSL engine for BIND]')
dnskeysyncd.setup_named_openssl_conf()
dnskeysyncd.setup_named_sysconfig()
dnskeysyncd.setup_ipa_dnskeysyncd_sysconfig()
sysupgrade.set_upgrade_state('dns', 'openssl_engine', True)
return True
def certificate_renewal_update(ca, kra, ds, http):
"""
Update certmonger certificate renewal configuration.
"""
# First ensure the renewal helpers are defined.
ca.configure_certmonger_renewal_helpers()
template = paths.CERTMONGER_COMMAND_TEMPLATE
serverid = ipaldap.realm_to_serverid(api.env.realm)
requests = []
dogtag_reqs = ca.tracking_reqs.items()
if kra.is_installed():
dogtag_reqs = itertools.chain(dogtag_reqs, kra.tracking_reqs.items())
for nick, profile in dogtag_reqs:
req = {
'cert-database': paths.PKI_TOMCAT_ALIAS_DIR,
'cert-nickname': nick,
'ca-name': RENEWAL_CA_NAME,
'cert-presave-command': template % 'stop_pkicad',
'cert-postsave-command':
(template % 'renew_ca_cert "{}"'.format(nick)),
'template-profile': profile,
}
requests.append(req)
requests.append(
{
'cert-file': paths.RA_AGENT_PEM,
'key-file': paths.RA_AGENT_KEY,
'ca-name': RENEWAL_CA_NAME,
'template-profile': RA_AGENT_PROFILE,
'cert-presave-command': template % 'renew_ra_cert_pre',
'cert-postsave-command': template % 'renew_ra_cert',
},
)
logger.info("[Update certmonger certificate renewal configuration]")
if not ca.is_configured():
logger.info('CA is not configured')
return False
# Check the http server cert if issued by IPA
cert = x509.load_certificate_from_file(paths.HTTPD_CERT_FILE)
if certs.is_ipa_issued_cert(api, cert):
requests.append(
{
'cert-file': paths.HTTPD_CERT_FILE,
'key-file': paths.HTTPD_KEY_FILE,
'ca-name': 'IPA',
'cert-postsave-command': template % 'restart_httpd',
'template-hostname': [
http.fqdn,
f'{IPA_CA_RECORD}.{ipautil.format_netloc(api.env.domain)}',
],
}
)
# Check the ldap server cert if issued by IPA
ds_nickname = ds.get_server_cert_nickname(serverid)
ds_db_dirname = dsinstance.config_dirname(serverid)
ds_db = certs.CertDB(api.env.realm, nssdir=ds_db_dirname)
if ds_db.is_ipa_issued_cert(api, ds_nickname):
requests.append(
{
'cert-database': ds_db_dirname[:-1],
'cert-nickname': ds_nickname,
'ca-name': 'IPA',
'cert-postsave-command':
'%s %s' % (template % 'restart_dirsrv', serverid),
}
)
db = certs.CertDB(api.env.realm, paths.PKI_TOMCAT_ALIAS_DIR)
for nickname, _trust_flags in db.list_certs():
if nickname.startswith('caSigningCert cert-pki-ca '):
requests.append(
{
'cert-database': paths.PKI_TOMCAT_ALIAS_DIR,
'cert-nickname': nickname,
'ca-name': RENEWAL_CA_NAME,
'cert-presave-command': template % 'stop_pkicad',
'cert-postsave-command':
(template % ('renew_ca_cert "%s"' % nickname)),
'template-profile': 'caCACert',
}
)
# State not set, lets see if we are already configured
missing_or_misconfigured_requests = []
for request in requests:
request_id = certmonger.get_request_id(request)
if request_id is None:
missing_or_misconfigured_requests.append(request)
if len(missing_or_misconfigured_requests) == 0:
logger.info("Certmonger certificate renewal configuration already "
"up-to-date")
return False
# Print info about missing requests
logger.info("Missing or incorrect tracking request for certificates:")
for request in missing_or_misconfigured_requests:
cert = None
if 'cert-file' in request:
cert = request['cert-file']
elif 'cert-database' in request and 'cert-nickname' in request:
cert = '{cert-database}:{cert-nickname}'.format(**request)
if cert is not None:
logger.info(" %s", cert)
# Ok, now we need to stop tracking, then we can start tracking them
# again with new configuration:
ca.stop_tracking_certificates()
if kra.is_installed():
kra.stop_tracking_certificates()
ds.stop_tracking_certificates(serverid)
http.stop_tracking_certificates()
filename = paths.CERTMONGER_CAS_CA_RENEWAL
if os.path.exists(filename):
with installutils.stopped_service('certmonger'):
logger.info("Removing %s", filename)
ipautil.remove_file(filename)
ca.configure_renewal()
ca.configure_agent_renewal()
ca.add_lightweight_ca_tracking_requests()
if kra.is_installed():
kra.configure_renewal()
ds.start_tracking_certificates(serverid)
http.start_tracking_certificates()
logger.info("Certmonger certificate renewal configuration updated")
return True
def http_certificate_ensure_ipa_ca_dnsname(http):
"""
Ensure the HTTP service certificate has the ipa-ca.$DOMAIN SAN dNSName.
This subroutine should be executed *after* ``certificate_renewal_update``,
which adds the name to the tracking request. It assumes that the tracking
request already has the ipa-ca.$DOMAIN DNS name set, and all that is needed
is to resubmit the request.
If HTTP certificate is issued by a third party, print manual remediation
steps.
"""
logger.info('[Adding ipa-ca alias to HTTP certificate]')
expect = f'{IPA_CA_RECORD}.{ipautil.format_netloc(api.env.domain)}'
cert = x509.load_certificate_from_file(paths.HTTPD_CERT_FILE)
try:
cert.match_hostname(expect)
except x509.ssl_match_hostname.CertificateError:
if certs.is_ipa_issued_cert(api, cert):
request_id = certmonger.get_request_id(
{'cert-file': paths.HTTPD_CERT_FILE})
if request_id is None:
# shouldn't happen
logger.error('Could not find HTTP cert tracking request.')
else:
logger.info('Resubmitting HTTP cert tracking request')
certmonger.resubmit_request(request_id)
# NOTE: due to https://pagure.io/certmonger/issue/143, the
# resubmitted request, if it does not immediately succeed
# (fairly likely during ipa-server-upgrade) and if the notAfter
# date of the current cert is still far off (also likely), then
# Certmonger will wait 7 days before trying again (unless
# restarted). There is not much we can do about that here, in
# the middle of ipa-server-upgrade.
else:
logger.error('HTTP certificate is issued by third party.')
logger.error(
'Obtain a new certificate with the following DNS names, \n'
'and install via ipa-server-certinstall(1):\n'
' - %s\n'
' - %s',
http.fqdn,
expect,
)
else:
logger.info('Certificate is OK; nothing to do')
def copy_crl_file(old_path, new_path=None):
"""
Copy CRL to new location, update permissions and SELinux context
"""
if new_path is None:
filename = os.path.basename(old_path)
new_path = os.path.join(paths.PKI_CA_PUBLISH_DIR, filename)
logger.debug('copy_crl_file: %s -> %s', old_path, new_path)
if os.path.islink(old_path):
# update symlink to the most most recent CRL file
filename = os.path.basename(os.readlink(old_path))
realpath = os.path.join(paths.PKI_CA_PUBLISH_DIR, filename)
logger.debug('copy_crl_file: Create symlink %s -> %s',
new_path, realpath)
os.symlink(realpath, new_path)
else:
shutil.copy2(old_path, new_path)
constants.PKI_USER.chown(new_path)
tasks.restore_context(new_path)
def migrate_crl_publish_dir(ca):
"""
Move CRL publish dir from /var/lib/pki-ca/publish to IPA controlled tree:
/var/lib/ipa/pki-ca/publish
"""
logger.info('[Migrate CRL publish directory]')
if sysupgrade.get_upgrade_state('dogtag', 'moved_crl_publish_dir'):
logger.info('CRL tree already moved')
return False
if not ca.is_configured():
logger.info('CA is not configured')
return False
try:
old_publish_dir = directivesetter.get_directive(
paths.CA_CS_CFG_PATH,
'ca.publish.publisher.instance.FileBaseCRLPublisher.directory',
separator='=')
except OSError as e:
logger.error('Cannot read CA configuration file "%s": %s',
paths.CA_CS_CFG_PATH, e)
return False
# Prepare target publish dir (creation, permissions, SELinux context)
# Run this every update to ensure proper values
publishdir = ca.prepare_crl_publish_dir()
if old_publish_dir == paths.PKI_CA_PUBLISH_DIR:
# publish dir is already updated
logger.info('Publish directory already set to new location')
sysupgrade.set_upgrade_state('dogtag', 'moved_crl_publish_dir', True)
return False
# Copy all CRLs to new directory
logger.info('Copy all CRLs to new publish directory')
try:
crl_files_unsorted = cainstance.get_crl_files(old_publish_dir)
except OSError as e:
logger.error('Cannot move CRL files to new directory: %s', e)
else:
# Move CRL files at the end of the list to make sure that the actual
# CRL files are copied first
crl_files = sorted(crl_files_unsorted,
key=lambda f: os.path.islink(f))
for f in crl_files:
try:
copy_crl_file(f)
except Exception as e:
logger.error('Cannot move CRL file to new directory: %s', e)
try:
directivesetter.set_directive(
paths.CA_CS_CFG_PATH,
'ca.publish.publisher.instance.FileBaseCRLPublisher.directory',
publishdir, quotes=False, separator='=')
except OSError as e:
logger.error('Cannot update CA configuration file "%s": %s',
paths.CA_CS_CFG_PATH, e)
return False
sysupgrade.set_upgrade_state('dogtag', 'moved_crl_publish_dir', True)
logger.info('CRL publish directory has been migrated, '
'request pki-tomcat restart')
return True
def ca_enable_pkix(ca):
logger.info('[Enable PKIX certificate path discovery and validation]')
if sysupgrade.get_upgrade_state('dogtag', 'pkix_enabled'):
logger.info('PKIX already enabled')
return False
if not ca.is_configured():
logger.info('CA is not configured')
return False
ca.enable_pkix()
sysupgrade.set_upgrade_state('dogtag', 'pkix_enabled', True)
return True
def add_ca_dns_records(bind):
logger.info('[Add missing CA DNS records]')
if sysupgrade.get_upgrade_state('dns', 'ipa_ca_records'):
logger.info('IPA CA DNS records already processed')
return False
ret = api.Command['dns_is_enabled']()
if not ret['result']:
logger.info('DNS is not configured')
sysupgrade.set_upgrade_state('dns', 'ipa_ca_records', True)
return False
bind.remove_ipa_ca_cnames(api.env.domain)
bind.update_system_records()
sysupgrade.set_upgrade_state('dns', 'ipa_ca_records', True)
return True
def find_subject_base():
"""
Try to find the current value of certificate subject base.
See the docstring in dsinstance.DsInstance for details.
"""
subject_base = dsinstance.DsInstance().find_subject_base()
if subject_base:
sysupgrade.set_upgrade_state(
'certmap.conf',
'subject_base',
subject_base
)
return subject_base
logger.error('Unable to determine certificate subject base. '
'certmap.conf will not be updated.')
return None
def uninstall_selfsign(ds, http):
logger.info('[Removing self-signed CA]')
"""Replace self-signed CA by a CA-less install"""
if api.env.ra_plugin != 'selfsign':
logger.debug('Self-signed CA is not installed')
return
logger.warning(
'Removing self-signed CA. Certificates will need to managed manually.')
p = SafeConfigParser()
p.read(paths.IPA_DEFAULT_CONF)
p.set('global', 'enable_ra', 'False')
p.set('global', 'ra_plugin', 'none')
with open(paths.IPA_DEFAULT_CONF, 'w') as f:
p.write(f)
ds.stop_tracking_certificates()
http.stop_tracking_certificates()
def uninstall_dogtag_9(ds, http):
logger.info('[Removing Dogtag 9 CA]')
if api.env.ra_plugin != 'dogtag':
logger.debug('Dogtag CA is not installed')
return
if api.env.dogtag_version >= 10:
logger.debug('Dogtag is version 10 or above')
return
dn = DN(('cn', 'CA'), ('cn', api.env.host), api.env.container_masters,
api.env.basedn)
try:
api.Backend.ldap2.delete_entry(dn)
except ipalib.errors.PublicError as e:
logger.error("Cannot delete %s: %s", dn, e)
p = SafeConfigParser()
p.read(paths.IPA_DEFAULT_CONF)
p.set('global', 'dogtag_version', '10')
with open(paths.IPA_DEFAULT_CONF, 'w') as f:
p.write(f)
sstore = sysrestore.StateFile(paths.SYSRESTORE)
sstore.restore_state('pkids', 'enabled')
sstore.restore_state('pkids', 'running')
sstore.restore_state('pkids', 'user_exists')
serverid = sstore.restore_state('pkids', 'serverid')
sstore.save()
ca = dogtaginstance.DogtagInstance(
api.env.realm, "CA", "certificate server",
nss_db=paths.VAR_LIB_PKI_CA_ALIAS_DIR)
ca.stop_tracking_certificates()
if serverid is not None:
# drop the trailing / off the config_dirname so the directory
# will match what is in certmonger
dirname = dsinstance.config_dirname(serverid)[:-1]
dsdb = certs.CertDB(api.env.realm, nssdir=dirname)
dsdb.untrack_server_cert("Server-Cert")
try:
services.service('pki-cad', api).disable('pki-ca')
except Exception as e:
logger.warning("Failed to disable pki-cad: %s", e)
try:
services.service('pki-cad', api).stop('pki-ca')
except Exception as e:
logger.warning("Failed to stop pki-cad: %s", e)
if serverid is not None:
try:
services.service('dirsrv', api).disable(serverid)
except Exception as e:
logger.warning("Failed to disable dirsrv: %s", e)
try:
services.service('dirsrv', api).stop(serverid)
except Exception as e:
logger.warning("Failed to stop dirsrv: %s", e)
http.restart()
def fix_schema_file_syntax():
"""Fix syntax errors in schema files
https://fedorahosted.org/freeipa/ticket/3578
"""
logger.info('[Fix DS schema file syntax]')
# This is not handled by normal schema updates, because pre-1.3.2 DS will
# ignore (auto-fix) these syntax errors, and 1.3.2 and above will choke on
# them before checking dynamic schema updates.
if sysupgrade.get_upgrade_state('ds', 'fix_schema_syntax'):
logger.info('Syntax already fixed')
return
serverid = ipaldap.realm_to_serverid(api.env.realm)
ds_dir = dsinstance.config_dirname(serverid)
# 1. 60ipadns.ldif: Add parenthesis to idnsRecord
filename = os.path.join(ds_dir, 'schema', '60ipadns.ldif')
result_lines = []
with open(filename) as file:
for line in file:
line = line.strip('\n')
if (line.startswith('objectClasses:') and
"NAME 'idnsRecord'" in line and
line.count('(') == 2 and
line.count(')') == 1):
logger.debug('Add closing parenthesis in idnsRecord')
line += ' )'
result_lines.append(line)
with open(filename, 'w') as file:
file.write('\n'.join(result_lines))
# 2. 65ipasudo.ldif: Remove extra dollar from ipaSudoRule
filename = os.path.join(ds_dir, 'schema', '65ipasudo.ldif')
result_lines = []
with open(filename) as file:
for line in file:
line = line.strip('\n')
if (line.startswith('objectClasses:') and
"NAME 'ipaSudoRule'" in line):
logger.debug('Remove extra dollar sign in ipaSudoRule')
line = line.replace('$$', '$')
result_lines.append(line)
with open(filename, 'w') as file:
file.write('\n'.join(result_lines))
# Done
sysupgrade.set_upgrade_state('ds', 'fix_schema_syntax', True)
def sssd_update():
sssdconfig = SSSDConfig.SSSDConfig()
sssdconfig.import_config()
# upgrade domain
domain = sssdconfig.get_domain(str(api.env.domain))
domain.set_option('ipa_server_mode', 'True')
domain.set_option('ipa_server', api.env.host)
sssdconfig.save_domain(domain)
# check if service has ok_to_auth_as_delegate
service = 'HTTP/{}'.format(api.env.host)
result = api.Command.service_show(service, all=True)
flag = result['result'].get('ipakrboktoauthasdelegate', False)
if flag:
logger.debug(
"%s has ok_to_auth_as_delegate, allow Apache to access IFP",
services
)
# enable and configure IFP plugin
sssd_enable_ifp(sssdconfig, allow_httpd=flag)
# clean stale files generated by sssd
cleanup_kdcinfo()
# write config and restart service
sssdconfig.write(paths.SSSD_CONF)
sssd = services.service('sssd', api)
sssd.restart()
def remove_ds_ra_cert(subject_base):
logger.info('[Removing RA cert from DS NSS database]')
if sysupgrade.get_upgrade_state('ds', 'remove_ra_cert'):
logger.info('RA cert already removed')
return
dbdir = dsinstance.config_dirname(
ipaldap.realm_to_serverid(api.env.realm))
dsdb = certs.CertDB(api.env.realm, nssdir=dbdir, subject_base=subject_base)
nickname = 'CN=IPA RA,%s' % subject_base
cert = dsdb.get_cert_from_db(nickname)
if cert:
dsdb.delete_cert(nickname)
sysupgrade.set_upgrade_state('ds', 'remove_ra_cert', True)
def migrate_to_mod_ssl(http):
logger.info('[Migrating from mod_nss to mod_ssl]')
if sysupgrade.get_upgrade_state('ssl.conf', 'migrated_to_mod_ssl'):
logger.info("Already migrated to mod_ssl")
return
http.migrate_to_mod_ssl()
sysupgrade.set_upgrade_state('ssl.conf', 'migrated_to_mod_ssl', True)
def update_ipa_httpd_service_conf(http):
logger.info('[Updating HTTPD service IPA configuration]')
http.update_httpd_service_ipa_conf()
def update_ipa_http_wsgi_conf(http):
logger.info('[Updating HTTPD service IPA WSGI configuration]')
http.update_httpd_wsgi_conf()
def update_http_keytab(http):
logger.info('[Moving HTTPD service keytab to gssproxy]')
if os.path.exists(paths.OLD_IPA_KEYTAB):
# ensure proper SELinux context by using copy operation
shutil.copy(paths.OLD_IPA_KEYTAB, http.keytab)
try:
os.remove(paths.OLD_IPA_KEYTAB)
except OSError as e:
logger.error(
'Cannot remove file %s (%s). Please remove the file manually.',
paths.OLD_IPA_KEYTAB, e
)
http.keytab_user.chown(http.keytab)
tasks.restore_context(http.keytab)
def ds_enable_sidgen_extdom_plugins(ds):
"""For AD trust agents, make sure we enable sidgen and extdom plugins
"""
logger.info('[Enable sidgen and extdom plugins by default]')
if sysupgrade.get_upgrade_state('ds', 'enable_ds_sidgen_extdom_plugins'):
logger.debug('sidgen and extdom plugins are enabled already')
return False
ds.add_sidgen_plugin(api.env.basedn)
ds.add_extdom_plugin(api.env.basedn)
sysupgrade.set_upgrade_state('ds', 'enable_ds_sidgen_extdom_plugins', True)
return True
def ds_enable_graceperiod_plugin(ds):
"""Graceperiod is a newer DS plugin so needs to be enabled on upgrade"""
if sysupgrade.get_upgrade_state('ds', 'enable_ds_graceperiod_plugin'):
logger.debug('graceperiod is enabled already')
return False
ds.config_graceperiod_module()
sysupgrade.set_upgrade_state('ds', 'enable_ds_graceperiod_plugin', True)
return True
def ca_upgrade_schema(ca):
logger.info('[Upgrading CA schema]')
if not ca.is_configured():
logger.info('CA is not configured')
return False
# ACME schema file moved in pki-server-10.9.0-0.3
# ACME database connections were abstrated in pki-acme-10.10.0
for path in [
'/usr/share/pki/acme/conf/database/ds/schema.ldif',
'/usr/share/pki/acme/conf/database/ldap/schema.ldif',
'/usr/share/pki/acme/database/ldap/schema.ldif',
]:
if os.path.exists(path):
acme_schema_ldif = path
break
else:
logger.info('ACME schema is not available')
return False
schema_files=[
'/usr/share/pki/server/conf/schema-certProfile.ldif',
'/usr/share/pki/server/conf/schema-authority.ldif',
acme_schema_ldif,
]
try:
modified = schemaupdate.update_schema(schema_files, ldapi=True)
except Exception as e:
logger.error("%s", e)
raise RuntimeError('CA schema upgrade failed.', 1)
else:
if modified:
logger.info('CA schema update complete')
return True
else:
logger.info('CA schema update complete (no changes)')
return False
def add_default_caacl(ca):
logger.info('[Add default CA ACL]')
if sysupgrade.get_upgrade_state('caacl', 'add_default_caacl'):
logger.info('Default CA ACL already added')
return
if ca.is_configured():
cainstance.ensure_default_caacl()
sysupgrade.set_upgrade_state('caacl', 'add_default_caacl', True)
def add_agent_to_security_domain_admins():
user_dn = DN(('uid', "ipara"), ('ou', 'People'), ('o', 'ipaca'))
group_dn = DN(('cn', 'Security Domain Administrators'), ('ou', 'groups'),
('o', 'ipaca'))
try:
api.Backend.ldap2.add_entry_to_group(user_dn, group_dn, 'uniqueMember')
except ipalib.errors.AlreadyGroupMember:
pass
def setup_pkinit(krb):
logger.info("[Setup PKINIT]")
if not krbinstance.is_pkinit_enabled():
krb.issue_selfsigned_pkinit_certs()
aug = Augeas(flags=Augeas.NO_LOAD | Augeas.NO_MODL_AUTOLOAD,
loadpath=paths.USR_SHARE_IPA_DIR)
try:
aug.transform('IPAKrb5', paths.KRB5KDC_KDC_CONF)
aug.load()
path = '/files{}/realms/{}'.format(paths.KRB5KDC_KDC_CONF, krb.realm)
modified = False
value = 'FILE:{},{}'.format(paths.KDC_CERT, paths.KDC_KEY)
expr = '{}[count(pkinit_identity)=1][pkinit_identity="{}"]'.format(
path, value)
if not aug.match(expr):
aug.remove('{}/pkinit_identity'.format(path))
aug.set('{}/pkinit_identity'.format(path), value)
modified = True
for value in ['FILE:{}'.format(paths.KDC_CERT),
'FILE:{}'.format(paths.CACERT_PEM)]:
expr = '{}/pkinit_anchors[.="{}"]'.format(path, value)
if not aug.match(expr):
aug.set('{}/pkinit_anchors[last()+1]'.format(path), value)
modified = True
value = 'FILE:{}'.format(paths.CA_BUNDLE_PEM)
expr = '{}/pkinit_pool[.="{}"]'.format(path, value)
if not aug.match(expr):
aug.set('{}/pkinit_pool[last()+1]'.format(path), value)
modified = True
if modified:
try:
aug.save()
except IOError:
for error_path in aug.match('/augeas//error'):
logger.error('augeas: %s', aug.get(error_path))
raise
if krb.is_running():
krb.stop()
krb.start()
finally:
aug.close()
def setup_spake(krb):
logger.info("[Setup SPAKE]")
aug = Augeas(flags=Augeas.NO_LOAD | Augeas.NO_MODL_AUTOLOAD,
loadpath=paths.USR_SHARE_IPA_DIR)
try:
aug.transform("IPAKrb5", paths.KRB5KDC_KDC_CONF)
aug.load()
path = "/files{}/libdefaults/spake_preauth_kdc_challenge"
path = path.format(paths.KRB5KDC_KDC_CONF)
value = "edwards25519"
if aug.match(path):
return
aug.remove(path)
aug.set(path, value)
try:
aug.save()
except IOError:
for error_path in aug.match('/augeas//error'):
logger.error('augeas: %s', aug.get(error_path))
raise
if krb.is_running():
krb.stop()
krb.start()
finally:
aug.close()
# Currently, this doesn't support templating.
def enable_server_snippet():
logger.info("[Enable server krb5.conf snippet]")
template = os.path.join(
paths.USR_SHARE_IPA_DIR,
os.path.basename(paths.KRB5_FREEIPA_SERVER) + ".template"
)
shutil.copy(template, paths.KRB5_FREEIPA_SERVER)
os.chmod(paths.KRB5_FREEIPA_SERVER, 0o644)
tasks.restore_context(paths.KRB5_FREEIPA_SERVER)
def setup_kpasswd_server(krb):
logger.info("[Setup kpasswd_server]")
aug = Augeas(
flags=Augeas.NO_LOAD | Augeas.NO_MODL_AUTOLOAD,
loadpath=paths.USR_SHARE_IPA_DIR,
)
try:
aug.transform("IPAKrb5", paths.KRB5_CONF)
aug.load()
kpass_srv_path = "/files{}/realms/{}/kpasswd_server"
kpass_srv_path = kpass_srv_path.format(paths.KRB5_CONF, krb.realm)
if aug.match(kpass_srv_path):
return
aug.set(kpass_srv_path, f"{krb.fqdn}:464")
aug.save()
finally:
aug.close()
def ntpd_cleanup(fqdn, fstore):
sstore = sysrestore.StateFile(paths.SYSRESTORE)
timeconf.restore_forced_timeservices(sstore, 'ntpd')
if sstore.has_state('ntp'):
instance = services.service('ntpd', api)
sstore.restore_state(instance.service_name, 'enabled')
sstore.restore_state(instance.service_name, 'running')
sstore.restore_state(instance.service_name, 'step-tickers')
try:
instance.disable()
instance.stop()
except Exception:
logger.debug("Service ntpd was not disabled or stopped")
for ntpd_file in [paths.NTP_CONF, paths.NTP_STEP_TICKERS,
paths.SYSCONFIG_NTPD]:
try:
fstore.restore_file(ntpd_file)
except ValueError as e:
logger.debug(e)
try:
api.Backend.ldap2.delete_entry(DN(('cn', 'NTP'), ('cn', fqdn),
api.env.container_masters))
except ipalib.errors.NotFound:
logger.debug("NTP service entry was not found in LDAP.")
ntp_role_instance = servroles.ServiceBasedRole(
u"ntp_server_server",
u"NTP server",
component_services=['NTP']
)
updated_role_instances = tuple()
for role_instance in servroles.role_instances:
if role_instance is not ntp_role_instance:
updated_role_instances += tuple([role_instance])
servroles.role_instances = updated_role_instances
sysupgrade.set_upgrade_state('ntpd', 'ntpd_cleaned', True)
def update_replica_config(db_suffix):
dn = DN(
('cn', 'replica'), ('cn', db_suffix), ('cn', 'mapping tree'),
('cn', 'config')
)
try:
entry = api.Backend.ldap2.get_entry(dn)
except ipalib.errors.NotFound:
return # entry does not exist until a replica is installed
for key, value in replication.REPLICA_FINAL_SETTINGS.items():
entry[key] = value
try:
api.Backend.ldap2.update_entry(entry)
except ipalib.errors.EmptyModlist:
pass
else:
logger.info("Updated entry %s", dn)
def migrate_to_authselect():
logger.info('[Migrating to authselect profile]')
if sysupgrade.get_upgrade_state('authcfg', 'migrated_to_authselect'):
logger.info("Already migrated to authselect profile")
return
statestore = sysrestore.StateFile(paths.IPA_CLIENT_SYSRESTORE)
try:
tasks.migrate_auth_configuration(statestore)
except ipautil.CalledProcessError as e:
raise RuntimeError(
"Failed to migrate to authselect profile: %s" % e, 1)
sysupgrade.set_upgrade_state('authcfg', 'migrated_to_authselect', True)
def add_systemd_user_hbac():
logger.info('[Create systemd-user hbac service and rule]')
rule = u'allow_systemd-user'
service = u'systemd-user'
try:
api.Command.hbacsvc_add(
service,
description=u'pam_systemd and systemd user@.service'
)
except ipalib.errors.DuplicateEntry:
logger.info('hbac service %s already exists', service)
# Don't create hbac rule when hbacsvc already exists, so the rule
# does not get re-created after it has been deleted by an admin.
return
else:
logger.info('Created hbacsvc %s', service)
try:
api.Command.hbacrule_add(
rule,
description=(u'Allow pam_systemd to run user@.service to create '
'a system user session'),
usercategory=u'all',
hostcategory=u'all',
)
except ipalib.errors.DuplicateEntry:
logger.info('hbac rule %s already exists', rule)
else:
api.Command.hbacrule_add_service(
rule,
hbacsvc=(service,)
)
logger.info('Created hbac rule %s with hbacsvc=%s', rule, service)
def add_admin_root_alias():
"""Make root principal an alias of admin
Fix for CVE-2020-10747
"""
rootprinc = "root@{}".format(api.env.realm)
logger.info("[Add %s alias to admin account]", rootprinc)
try:
api.Command.user_add_principal("admin", rootprinc)
except ipalib.errors.DuplicateEntry:
results = api.Command.user_find(krbprincipalname=rootprinc)
uid = results["result"][0]["uid"][0]
logger.warning(
"WARN: '%s' alias is assigned to user '%s'!", rootprinc, uid
)
except ipalib.errors.AlreadyContainsValueError:
logger.info("Alias already exists")
else:
logger.info("Added '%s' alias to admin account", rootprinc)
def fix_permissions():
"""Fix permission of public accessible files and directories
In case IPA was installed with restricted umask, some public files and
directories may not be readable and accessible.
See https://pagure.io/freeipa/issue/7594
"""
candidates = [
os.path.dirname(paths.GSSAPI_SESSION_KEY),
paths.CA_BUNDLE_PEM,
paths.KDC_CA_BUNDLE_PEM,
paths.IPA_CA_CRT,
paths.IPA_P11_KIT,
]
for filename in candidates:
try:
s = os.stat(filename)
except OSError as e:
if e.errno != errno.ENOENT:
raise
continue
mode = 0o755 if stat.S_ISDIR(s.st_mode) else 0o644
if mode != stat.S_IMODE(s.st_mode):
logger.debug("Fix permission of %s to %o", filename, mode)
os.chmod(filename, mode)
def upgrade_bind(fstore):
"""Update BIND named DNS server instance
"""
bind = bindinstance.BindInstance(fstore, api=api)
bind.setup_templating(
fqdn=api.env.host,
realm_name=api.env.realm,
domain_name=api.env.domain
)
# always executed
add_ca_dns_records(bind)
if not bindinstance.named_conf_exists():
logger.info("DNS service is not configured")
return False
bind_switch_service(bind)
# get rid of old states
bind_old_states(bind)
bind_old_upgrade_states()
# only upgrade with drop-in is missing and /etc/resolv.conf is a link to
# resolve1's stub resolver config file.
has_resolved_ipa_conf = os.path.isfile(paths.SYSTEMD_RESOLVED_IPA_CONF)
if not has_resolved_ipa_conf and detect_resolve1_resolv_conf():
ip_addresses = installutils.resolve_ip_addresses_nss(
api.env.host
)
bind.ip_addresses = ip_addresses
bind.setup_resolv_conf()
logger.info("Updated systemd-resolved configuration")
if bind.is_configured() and not bind.is_running():
# some upgrade steps may require bind running
bind_started = True
bind.start()
else:
bind_started = False
# create or update autobind entry
bind.setup_autobind()
try:
changed = bind.setup_named_conf(backup=True)
if changed:
logger.info("named.conf has been modified, restarting named")
try:
if bind.is_running():
bind.restart()
except ipautil.CalledProcessError as e:
logger.error("Failed to restart %s: %s", bind.service_name, e)
finally:
if bind_started:
bind.stop()
return changed
def bind_switch_service(bind):
"""
Mask either named or named-pkcs11, we need to run only one,
running both can cause unexpected errors.
"""
named_conflict_name = bind.named_conflict.systemd_name
named_conflict_old = sysupgrade.get_upgrade_state('dns', 'conflict_named')
# nothing changed
if named_conflict_old and named_conflict_old == named_conflict_name:
return False
bind.switch_service()
sysupgrade.set_upgrade_state('dns', 'conflict_named', named_conflict_name)
return True
def bind_old_states(bind):
"""Remove old states
"""
# no longer used states
old_states = [
"enabled",
"running",
"named-regular-enabled",
"named-regular-running",
]
for state in old_states:
bind.delete_state(state)
def bind_old_upgrade_states():
"""Remove old upgrade states
"""
named_conf_states = (
# old states before 4.8.7
"gssapi_updated",
"pid-file_updated",
"dnssec-enabled_remove",
"bindkey-file_removed",
"managed-keys-directory_updated",
"root_key_updated",
"forward_policy_conflict_with_empty_zones_handled",
"add_server_id",
"add_crypto_policy",
)
dns_states = (
"regular_named_masked",
"dyndb_ipa_workdir_perm"
)
for state in named_conf_states:
sysupgrade.remove_upgrade_state("named.conf", state)
for state in dns_states:
sysupgrade.remove_upgrade_state("dns", state)
def ca_update_acme_configuration(ca, fqdn):
"""
Re-apply the templates in case anyting has been updated.
"""
logger.info('[Updating ACME configuration]')
if not os.path.isdir(os.path.join(paths.PKI_TOMCAT, 'acme')):
logger.info('ACME is not deployed, skipping')
return
if not os.path.exists(paths.PKI_ACME_ISSUER_CONF):
logger.info('ACME configuration file %s is missing',
paths.PKI_ACME_ISSUER_CONF)
return
password = directivesetter.get_directive(
paths.PKI_ACME_ISSUER_CONF,
'password',
separator='=')
acme_user = ca.acme_uid(fqdn)
sub_dict = dict(
FQDN=fqdn,
USER=acme_user,
PASSWORD=password,
)
for template_name, target in cainstance.ACME_CONFIG_FILES:
upgrade_file(sub_dict, target,
os.path.join(paths.USR_SHARE_IPA_DIR,
template_name))
def set_default_grace_time():
dn = DN(
('cn', 'global_policy'), ('cn', api.env.realm),
('cn', 'kerberos'), api.env.basedn
)
entry = api.Backend.ldap2.get_entry(dn)
for (a,_v) in entry.items():
if a.lower() == 'passwordgracelimit':
return
entry['objectclass'].append('ipapwdpolicy')
entry['passwordgracelimit'] = -1
api.Backend.ldap2.update_entry(entry)
def upgrade_configuration():
"""
Execute configuration upgrade of the IPA services
"""
logger.debug('IPA version %s', version.VENDOR_VERSION)
fstore = sysrestore.FileStore(paths.SYSRESTORE)
sstore = sysrestore.StateFile(paths.SYSRESTORE)
if not sstore.has_state('installation'):
if is_ipa_configured():
sstore.backup_state('installation', 'complete', True)
else:
sstore.backup_state('installation', 'complete', False)
fqdn = api.env.host
# Ok, we are an IPA server, do the additional tests
ds = dsinstance.DsInstance(realm_name=api.env.realm)
# start DS, CA will not start without running DS, and cause error
ds_running = ds.is_running()
if not ds_running:
ds.start(ds.serverid)
if not sysupgrade.get_upgrade_state('ntpd', 'ntpd_cleaned'):
ntpd_cleanup(fqdn, fstore)
if tasks.configure_pkcs11_modules(fstore):
print("Disabled p11-kit-proxy")
check_certs()
fix_permissions()
auto_redirect = find_autoredirect(fqdn)
sub_dict = dict(
REALM=api.env.realm,
FQDN=fqdn,
AUTOREDIR='' if auto_redirect else '#',
CRL_PUBLISH_PATH=paths.PKI_CA_PUBLISH_DIR,
DOGTAG_PORT=8009,
CLONE='#',
WSGI_PREFIX_DIR=paths.WSGI_PREFIX_DIR,
WSGI_PROCESSES=constants.WSGI_PROCESSES,
GSSAPI_SESSION_KEY=paths.GSSAPI_SESSION_KEY,
FONTS_DIR=paths.FONTS_DIR,
FONTS_OPENSANS_DIR=paths.FONTS_OPENSANS_DIR,
FONTS_FONTAWESOME_DIR=paths.FONTS_FONTAWESOME_DIR,
IPA_CCACHES=paths.IPA_CCACHES,
IPA_CUSTODIA_SOCKET=paths.IPA_CUSTODIA_SOCKET,
KDCPROXY_CONFIG=paths.KDCPROXY_CONFIG,
DOMAIN=api.env.domain,
)
subject_base = find_subject_base()
if subject_base:
sub_dict['ISSUER_DN'] = 'CN=Certificate Authority,' + subject_base
ca = cainstance.CAInstance(
api.env.realm, host_name=api.env.host)
ca_running = ca.is_running()
kra = krainstance.KRAInstance(api.env.realm)
# create passswd.txt file in PKI_TOMCAT_ALIAS_DIR if it does not exist
# this file will be required on most actions over this NSS DB in FIPS
if ca.is_configured() and not os.path.exists(os.path.join(
paths.PKI_TOMCAT_ALIAS_DIR, 'pwdfile.txt')):
ca.create_certstore_passwdfile()
with installutils.stopped_service('pki-tomcatd', 'pki-tomcat'):
# Dogtag must be stopped to be able to backup CS.cfg config
if ca.is_configured():
ca.backup_config()
# migrate CRL publish dir before the location in ipa.conf is updated
ca_restart = migrate_crl_publish_dir(ca)
if ca.is_configured():
crl = directivesetter.get_directive(
paths.CA_CS_CFG_PATH, 'ca.crl.MasterCRL.enableCRLUpdates', '=')
sub_dict['CLONE']='#' if crl.lower() == 'true' else ''
ds_dirname = dsinstance.config_dirname(ds.serverid)
upgrade_file(sub_dict, paths.HTTPD_IPA_CONF,
os.path.join(paths.USR_SHARE_IPA_DIR,
"ipa.conf.template"))
upgrade_file(sub_dict, paths.HTTPD_IPA_REWRITE_CONF,
os.path.join(paths.USR_SHARE_IPA_DIR,
"ipa-rewrite.conf.template"))
upgrade_file(sub_dict, paths.HTTPD_IPA_KDCPROXY_CONF,
os.path.join(paths.USR_SHARE_IPA_DIR,
"ipa-kdc-proxy.conf.template"))
if ca.is_configured():
# Ensure that the drop-in file is present
if not os.path.isfile(paths.SYSTEMD_PKI_TOMCAT_IPA_CONF):
ca.add_ipa_wait()
# Handle upgrade of AJP connector configuration
rewrite = ca.secure_ajp_connector()
if ca.ajp_secret:
sub_dict['DOGTAG_AJP_SECRET'] = "secret={}".format(
ca.ajp_secret)
else:
sub_dict['DOGTAG_AJP_SECRET'] = ''
# force=True will ensure the secret is updated if it changes
if rewrite:
upgrade_file(
sub_dict,
paths.HTTPD_IPA_PKI_PROXY_CONF,
os.path.join(paths.USR_SHARE_IPA_DIR,
"ipa-pki-proxy.conf.template"),
add=True, force=True)
else:
if os.path.isfile(paths.HTTPD_IPA_PKI_PROXY_CONF):
os.remove(paths.HTTPD_IPA_PKI_PROXY_CONF)
if subject_base:
upgrade_file(
sub_dict,
os.path.join(ds_dirname, "certmap.conf"),
os.path.join(paths.USR_SHARE_IPA_DIR, "certmap.conf.template")
)
if kra.is_installed():
logger.info('[Ensuring ephemeralRequest is enabled in KRA]')
kra.backup_config()
value = directivesetter.get_directive(
paths.KRA_CS_CFG_PATH,
'kra.ephemeralRequests',
separator='=')
if value is None or value.lower() != 'true':
logger.info('Enabling ephemeralRequest')
kra.enable_ephemeral()
else:
logger.info('ephemeralRequest is already enabled')
if tasks.is_fips_enabled():
logger.info('[Ensuring KRA OAEP wrap algo is enabled in FIPS]')
value = directivesetter.get_directive(
paths.KRA_CS_CFG_PATH,
'keyWrap.useOAEP',
separator='=')
if value is None or value.lower() != 'true':
logger.info('Use the OAEP key wrap algo')
kra.enable_oaep_wrap_algo()
else:
logger.info('OAEP key wrap algo is already enabled')
# several upgrade steps require running CA. If CA is configured,
# always run ca.start() because we need to wait until CA is really ready
# by checking status using http
if ca.is_configured():
ca.start('pki-tomcat')
if kra.is_installed() and not kra.is_running():
# This is for future-proofing in case the KRA is ever standalone.
kra.start('pki-tomcat')
certmonger_service = services.knownservices.certmonger
if ca.is_configured() and not certmonger_service.is_running():
certmonger_service.start()
ca.unconfigure_certmonger_renewal_guard()
update_dbmodules(api.env.realm)
uninstall_ipa_kpasswd()
uninstall_ipa_memcached()
removed_sysconfig_file = paths.SYSCONFIG_HTTPD
if fstore.has_file(removed_sysconfig_file):
logger.info('Restoring %s as it is no longer required',
removed_sysconfig_file)
fstore.restore_file(removed_sysconfig_file)
http = httpinstance.HTTPInstance(fstore)
http.fqdn = fqdn
http.realm = api.env.realm
http.suffix = ipautil.realm_to_suffix(api.env.realm)
http.configure_selinux_for_httpd()
http.set_mod_ssl_protocol()
http.configure_certmonger_renewal_guard()
http.enable_and_start_oddjobd()
ds.configure_systemd_ipa_env()
update_replica_config(ipautil.realm_to_suffix(api.env.realm))
if ca.is_configured():
update_replica_config(DN(('o', 'ipaca')))
ds.stop(ds.serverid)
fix_schema_file_syntax()
remove_ds_ra_cert(subject_base)
ds.start(ds.serverid)
ds.fqdn = fqdn
ds.realm = api.env.realm
ds.suffix = ipautil.realm_to_suffix(api.env.realm)
if any([
ds_enable_sidgen_extdom_plugins(ds),
ds_enable_graceperiod_plugin(ds)
]):
ds.restart(ds.serverid)
set_default_grace_time()
if not http.is_kdcproxy_configured():
logger.info('[Enabling KDC Proxy]')
http.create_kdcproxy_conf()
http.enable_kdcproxy()
http.stop()
update_ipa_httpd_service_conf(http)
update_ipa_http_wsgi_conf(http)
migrate_to_mod_ssl(http)
update_http_keytab(http)
http.configure_gssproxy()
http.start()
uninstall_selfsign(ds, http)
uninstall_dogtag_9(ds, http)
simple_service_list = (
(otpdinstance.OtpdInstance(), 'OTPD'),
)
for svc, ldap_name in simple_service_list:
try:
if not svc.is_configured():
svc.create_instance(ldap_name, fqdn,
ipautil.realm_to_suffix(api.env.realm),
realm=api.env.realm)
except ipalib.errors.DuplicateEntry:
pass
# install DNSKeySync service only if DNS is configured on server
if bindinstance.named_conf_exists():
dnskeysyncd = dnskeysyncinstance.DNSKeySyncInstance(fstore)
if not dnskeysyncd.is_configured():
dnskeysyncd.create_instance(fqdn, api.env.realm)
dnskeysyncd.start_dnskeysyncd()
else:
if dnssec_set_openssl_engine(dnskeysyncd):
dnskeysyncd.start_dnskeysyncd()
dnskeysyncd.set_dyndb_ldap_workdir_permissions()
cleanup_kdc(fstore)
cleanup_adtrust(fstore)
cleanup_dogtag()
upgrade_adtrust_config()
upgrade_bind(fstore)
custodia = custodiainstance.CustodiaInstance(api.env.host, api.env.realm)
custodia.upgrade_instance()
# Don't include schema upgrades in restart consideration, see
# https://pagure.io/freeipa/issue/9204
ca_upgrade_schema(ca)
ca_restart = any([
ca_restart,
certificate_renewal_update(ca, kra, ds, http),
ca_enable_pkix(ca),
ca_configure_profiles_acl(ca),
ca_configure_lightweight_ca_acls(ca),
ca_ensure_lightweight_cas_container(ca),
ca_enable_lightweight_ca_monitor(ca),
ca_add_default_ocsp_uri(ca),
ca_disable_publish_cert(ca),
])
if ca_restart:
logger.info(
'pki-tomcat configuration changed, restart pki-tomcat')
try:
ca.restart('pki-tomcat')
except ipautil.CalledProcessError as e:
logger.error("Failed to restart %s: %s", ca.service_name, e)
ca_enable_ldap_profile_subsystem(ca)
# This step MUST be done after ca_enable_ldap_profile_subsystem and
# ca_configure_profiles_acl, and the consequent restart, but does not
# itself require a restart.
#
ca_import_included_profiles(ca)
add_default_caacl(ca)
if ca.is_configured():
ca.reindex_task()
cainstance.repair_profile_caIPAserviceCert()
ca.setup_lightweight_ca_key_retrieval()
cainstance.ensure_ipa_authority_entry()
ca.setup_acme()
ca_update_acme_configuration(ca, fqdn)
ca_initialize_hsm_state(ca)
add_agent_to_security_domain_admins()
migrate_to_authselect()
add_systemd_user_hbac()
add_admin_root_alias()
sssd_update()
krb = krbinstance.KrbInstance(fstore)
krb.fqdn = fqdn
krb.realm = api.env.realm
krb.suffix = ipautil.realm_to_suffix(krb.realm)
krb.subject_base = subject_base
krb.sub_dict = dict(FQDN=krb.fqdn,
SUFFIX=krb.suffix,
DOMAIN=api.env.domain,
HOST=api.env.host,
SERVER_ID=ipaldap.realm_to_serverid(krb.realm),
REALM=krb.realm,
KRB5KDC_KADM5_ACL=paths.KRB5KDC_KADM5_ACL,
DICT_WORDS=paths.DICT_WORDS,
KRB5KDC_KADM5_KEYTAB=paths.KRB5KDC_KADM5_KEYTAB,
KDC_CERT=paths.KDC_CERT,
KDC_KEY=paths.KDC_KEY,
CACERT_PEM=paths.CACERT_PEM,
KDC_CA_BUNDLE_PEM=paths.KDC_CA_BUNDLE_PEM,
CA_BUNDLE_PEM=paths.CA_BUNDLE_PEM)
krb.add_anonymous_principal()
setup_spake(krb)
setup_pkinit(krb)
enable_server_snippet()
setup_kpasswd_server(krb)
if KRB5_BUILD_VERSION >= parse_version('1.20'):
krb.pac_tkt_sign_support_enable()
# Must be executed after certificate_renewal_update
# (see function docstring for details)
http_certificate_ensure_ipa_ca_dnsname(http)
# Convert configuredService to either enabledService or hiddenService
# depending on the state of the server role. This is to fix situations
# when deployment has happened before introduction of hidden replicas
# as those services will stay as configuredService and will not get
# started after upgrade, rendering the system non-functioning
service.sync_services_state(fqdn)
if not ds_running:
ds.stop(ds.serverid)
if ca.is_configured():
if ca_running and not ca.is_running():
ca.start('pki-tomcat')
elif not ca_running and ca.is_running():
ca.stop('pki-tomcat')
def upgrade_check(options):
try:
installutils.check_server_configuration()
tasks.check_ipv6_stack_enabled()
except RuntimeError as e:
logger.error("%s", e)
sys.exit(1)
if not options.skip_version_check:
# check IPA version and data version
try:
installutils.check_version()
except (installutils.UpgradePlatformError,
installutils.UpgradeDataNewerVersionError) as e:
raise RuntimeError(
'Unable to execute IPA upgrade: %s' % e, 1)
except installutils.UpgradeMissingVersionError as e:
logger.info("Missing version: %s", e)
except installutils.UpgradeVersionError:
# Ignore other errors
pass
else:
logger.info("Skipping version check")
logger.warning("Upgrade without version check may break your system")
@contextmanager
def empty_ccache():
# Create temporary directory and use it as a DIR: ccache collection
# instead of whatever is a default in /etc/krb5.conf
#
# In Fedora 28 KCM: became a default credentials cache collection
# but if KCM daemon (part of SSSD) is not running, libkrb5 will fail
# to initialize. This causes kadmin.local to fail.
# Since we are in upgrade, we cannot kinit anyway (KDC is offline).
# Bug https://bugzilla.redhat.com/show_bug.cgi?id=1558818
kpath_dir = tempfile.mkdtemp(prefix="upgrade_ccaches",
dir=paths.IPA_CCACHES)
kpath = "DIR:{}".format(kpath_dir)
old_path = os.environ.get('KRB5CCNAME')
try:
os.environ['KRB5CCNAME'] = kpath
yield
finally:
if old_path:
os.environ['KRB5CCNAME'] = old_path
else:
os.environ.pop('KRB5CCNAME', None)
shutil.rmtree(kpath_dir)
def upgrade():
realm = api.env.realm
schema_files = [os.path.join(paths.USR_SHARE_IPA_DIR, f) for f
in dsinstance.ALL_SCHEMA_FILES]
schema_files.extend(dsinstance.get_all_external_schema_files(
paths.EXTERNAL_SCHEMA_DIR))
data_upgrade = IPAUpgrade(realm, schema_files=schema_files)
try:
data_upgrade.create_instance()
except BadSyntax:
raise RuntimeError(
'Bad syntax detected in upgrade file(s).', 1)
except RuntimeError:
raise RuntimeError('IPA upgrade failed.', 1)
else:
if data_upgrade.modified:
logger.info('Update complete')
else:
logger.info('Update complete, no data were modified')
print('Upgrading IPA services')
logger.info('Upgrading the configuration of the IPA services')
with empty_ccache():
upgrade_configuration()
logger.info('The IPA services were upgraded')
# store new data version after upgrade
installutils.store_version()
| 69,591
|
Python
|
.py
| 1,719
| 31.951716
| 79
| 0.635968
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,789
|
__init__.py
|
freeipa_freeipa/ipaserver/install/server/__init__.py
|
#
# Copyright (C) 2015 FreeIPA Contributors see COPYING for license
#
"""
Server installer module
"""
import os.path
import random
from ipaclient.install import client
from ipalib import constants
from ipalib.util import validate_domain_name
from ipalib.install import service
from ipalib.install.service import (enroll_only,
installs_master,
installs_replica,
master_install_only,
prepare_only,
replica_install_only)
from ipapython.install import typing
from ipapython.install.core import group, knob, extend_knob
from ipapython.install.common import step
from .install import validate_admin_password, validate_dm_password
from .install import get_min_idstart
from .install import init as master_init
from .install import install as master_install
from .install import install_check as master_install_check
from .install import uninstall, uninstall_check
from .replicainstall import init as replica_init
from .replicainstall import install as replica_install
from .replicainstall import promote_check as replica_promote_check
from .upgrade import upgrade_check, upgrade
from .. import adtrust, ca, conncheck, dns, kra
@group
class ServerUninstallInterface(service.ServiceInstallInterface):
description = "Uninstall"
ignore_topology_disconnect = knob(
None,
description="do not check whether server uninstall disconnects the "
"topology (domain level 1+)",
)
ignore_topology_disconnect = master_install_only(ignore_topology_disconnect)
ignore_last_of_role = knob(
None,
description="do not check whether server uninstall removes last "
"CA/DNS server or DNSSec master (domain level 1+)",
)
ignore_last_of_role = master_install_only(ignore_last_of_role)
@group
class ServerCertificateInstallInterface(service.ServiceInstallInterface):
description = "SSL certificate"
dirsrv_cert_files = knob(
typing.List[str], None,
description=("File containing the Directory Server SSL certificate "
"and private key"),
cli_names='--dirsrv-cert-file',
cli_deprecated_names='--dirsrv_pkcs12',
cli_metavar='FILE',
)
dirsrv_cert_files = prepare_only(dirsrv_cert_files)
http_cert_files = knob(
typing.List[str], None,
description=("File containing the Apache Server SSL certificate and "
"private key"),
cli_names='--http-cert-file',
cli_deprecated_names='--http_pkcs12',
cli_metavar='FILE',
)
http_cert_files = prepare_only(http_cert_files)
pkinit_cert_files = knob(
typing.List[str], None,
description=("File containing the Kerberos KDC SSL certificate and "
"private key"),
cli_names='--pkinit-cert-file',
cli_deprecated_names='--pkinit_pkcs12',
cli_metavar='FILE',
)
pkinit_cert_files = prepare_only(pkinit_cert_files)
dirsrv_pin = knob(
str, None,
sensitive=True,
description="The password to unlock the Directory Server private key",
cli_deprecated_names='--dirsrv_pin',
cli_metavar='PIN',
)
dirsrv_pin = prepare_only(dirsrv_pin)
http_pin = knob(
str, None,
sensitive=True,
description="The password to unlock the Apache Server private key",
cli_deprecated_names='--http_pin',
cli_metavar='PIN',
)
http_pin = prepare_only(http_pin)
pkinit_pin = knob(
str, None,
sensitive=True,
description="The password to unlock the Kerberos KDC private key",
cli_deprecated_names='--pkinit_pin',
cli_metavar='PIN',
)
pkinit_pin = prepare_only(pkinit_pin)
dirsrv_cert_name = knob(
str, None,
description="Name of the Directory Server SSL certificate to install",
cli_metavar='NAME',
)
dirsrv_cert_name = prepare_only(dirsrv_cert_name)
http_cert_name = knob(
str, None,
description="Name of the Apache Server SSL certificate to install",
cli_metavar='NAME',
)
http_cert_name = prepare_only(http_cert_name)
pkinit_cert_name = knob(
str, None,
description="Name of the Kerberos KDC SSL certificate to install",
cli_metavar='NAME',
)
pkinit_cert_name = prepare_only(pkinit_cert_name)
@group
class ServerHSMInstallInterface(service.ServiceInstallInterface):
description = "HSM"
token_name = knob(
str, None,
description=(
"The PKCS#11 token name if using an HSM to store and generate "
"private keys."
),
cli_metavar='NAME',
)
token_name = master_install_only(token_name)
token_library_path = knob(
str, None,
description=(
"The full path to the PKCS#11 shared library needed to"
"access an HSM device."
),
cli_metavar='NAME',
)
token_library_path = prepare_only(token_library_path)
token_password = knob(
str, None,
sensitive=True,
description=("The PKCS#11 token password for the HSM."),
cli_metavar='NAME',
)
token_password = prepare_only(token_password)
token_password_file = knob(
str, None,
description=("The full path to a file containing the password to "
"the PKCS#11 token password."),
cli_metavar='NAME',
)
token_password_file = prepare_only(token_password_file)
@group
class ServerInstallInterface(ServerCertificateInstallInterface,
ServerHSMInstallInterface,
client.ClientInstallInterface,
ca.CAInstallInterface,
kra.KRAInstallInterface,
dns.DNSInstallInterface,
adtrust.ADTrustInstallInterface,
conncheck.ConnCheckInterface,
ServerUninstallInterface):
"""
Interface of server installers
Knobs defined here will be available in:
* ipa-server-install
* ipa-replica-prepare
* ipa-replica-install
"""
description = "Server"
kinit_attempts = 1
fixed_primary = True
permit = False
enable_dns_updates = False
no_krb5_offline_passwords = False
preserve_sssd = False
no_sssd = False
domain_name = client.ClientInstallInterface.domain_name
domain_name = extend_knob(
domain_name,
cli_names=list(domain_name.cli_names) + ['-n'],
)
servers = extend_knob(
client.ClientInstallInterface.servers,
description="fully qualified name of IPA server to enroll to",
)
servers = enroll_only(servers)
realm_name = client.ClientInstallInterface.realm_name
realm_name = extend_knob(
realm_name,
cli_names=list(realm_name.cli_names) + ['-r'],
)
host_name = extend_knob(
client.ClientInstallInterface.host_name,
description="fully qualified name of this host",
)
ca_cert_files = extend_knob(
client.ClientInstallInterface.ca_cert_files,
description="File containing CA certificates for the service "
"certificate files",
cli_deprecated_names='--root-ca-file',
)
ca_cert_files = prepare_only(ca_cert_files)
dm_password = extend_knob(
client.ClientInstallInterface.dm_password,
description="Directory Manager password",
)
ip_addresses = extend_knob(
client.ClientInstallInterface.ip_addresses,
description="Server IP Address. This option can be used multiple "
"times",
)
principal = client.ClientInstallInterface.principal
principal = extend_knob(
principal,
description="User Principal allowed to promote replicas and join IPA "
"realm",
cli_names=list(principal.cli_names) + ['-P'],
)
principal = replica_install_only(principal)
admin_password = extend_knob(
client.ClientInstallInterface.admin_password,
)
master_password = knob(
str, None,
sensitive=True,
deprecated=True,
description="kerberos master password (normally autogenerated)",
)
master_password = master_install_only(master_password)
hidden_replica = knob(
None,
cli_names='--hidden-replica',
description="Install a hidden replica",
)
hidden_replica = replica_install_only(hidden_replica)
domain_level = knob(
int, constants.MAX_DOMAIN_LEVEL,
description="IPA domain level",
deprecated=True,
)
domain_level = master_install_only(domain_level)
@domain_level.validator
def domain_level(self, value):
# Check that Domain Level is within the allowed range
if value < constants.MIN_DOMAIN_LEVEL:
raise ValueError(
"Domain Level cannot be lower than {0}".format(
constants.MIN_DOMAIN_LEVEL))
elif value > constants.MAX_DOMAIN_LEVEL:
raise ValueError(
"Domain Level cannot be higher than {0}".format(
constants.MAX_DOMAIN_LEVEL))
setup_adtrust = knob(
None,
description="configure AD trust capability"
)
setup_ca = knob(
None,
description="configure a dogtag CA",
)
setup_ca = enroll_only(setup_ca)
setup_kra = knob(
None,
description="configure a dogtag KRA",
)
setup_kra = enroll_only(setup_kra)
setup_dns = knob(
None,
description="configure bind with our zone",
)
setup_dns = enroll_only(setup_dns)
@setup_dns.validator
def setup_dns(self, value):
if value:
dns.package_check(ValueError)
idstart = knob(
int, random.randint(1, 10000) * 200000,
description="The starting value for the IDs range (default random)",
)
idstart = master_install_only(idstart)
idmax = knob(
int,
description=("The max value for the IDs range (default: "
"idstart+199999)"),
)
idmax = master_install_only(idmax)
@idmax.default_getter
def idmax(self):
return self.idstart + 200000 - 1
no_hbac_allow = knob(
None,
description="Don't install allow_all HBAC rule",
cli_deprecated_names='--no_hbac_allow',
)
no_hbac_allow = master_install_only(no_hbac_allow)
no_pkinit = knob(
None,
description="disables pkinit setup steps",
)
no_pkinit = prepare_only(no_pkinit)
no_ui_redirect = knob(
None,
description="Do not automatically redirect to the Web UI",
)
no_ui_redirect = enroll_only(no_ui_redirect)
dirsrv_config_file = knob(
str, None,
description="The path to LDIF file that will be used to modify "
"configuration of dse.ldif during installation of the "
"directory server instance",
cli_metavar='FILE',
)
dirsrv_config_file = enroll_only(dirsrv_config_file)
skip_mem_check = knob(
None,
description="Skip checking for minimum required memory",
)
skip_mem_check = enroll_only(skip_mem_check)
@dirsrv_config_file.validator
def dirsrv_config_file(self, value):
if not os.path.exists(value):
raise ValueError("File %s does not exist." % value)
def __init__(self, **kwargs):
super(ServerInstallInterface, self).__init__(**kwargs)
# If any of the key file options are selected, all are required.
cert_file_req = (self.dirsrv_cert_files, self.http_cert_files)
cert_file_opt = (self.pkinit_cert_files,)
if not self.no_pkinit:
cert_file_req += cert_file_opt
if self.no_pkinit and self.pkinit_cert_files:
raise RuntimeError(
"--no-pkinit and --pkinit-cert-file cannot be specified "
"together"
)
if any(cert_file_req + cert_file_opt) and not all(cert_file_req):
raise RuntimeError(
"--dirsrv-cert-file, --http-cert-file, and --pkinit-cert-file "
"or --no-pkinit are required if any key file options are used."
)
if not self.interactive:
if self.dirsrv_cert_files and self.dirsrv_pin is None:
raise RuntimeError(
"You must specify --dirsrv-pin with --dirsrv-cert-file")
if self.http_cert_files and self.http_pin is None:
raise RuntimeError(
"You must specify --http-pin with --http-cert-file")
if self.pkinit_cert_files and self.pkinit_pin is None:
raise RuntimeError(
"You must specify --pkinit-pin with --pkinit-cert-file")
if not self.setup_dns:
if self.forwarders:
raise RuntimeError(
"You cannot specify a --forwarder option without the "
"--setup-dns option")
if self.auto_forwarders:
raise RuntimeError(
"You cannot specify a --auto-forwarders option without "
"the --setup-dns option")
if self.no_forwarders:
raise RuntimeError(
"You cannot specify a --no-forwarders option without the "
"--setup-dns option")
if self.forward_policy:
raise RuntimeError(
"You cannot specify a --forward-policy option without the "
"--setup-dns option")
if self.reverse_zones:
raise RuntimeError(
"You cannot specify a --reverse-zone option without the "
"--setup-dns option")
if self.auto_reverse:
raise RuntimeError(
"You cannot specify a --auto-reverse option without the "
"--setup-dns option")
if self.no_reverse:
raise RuntimeError(
"You cannot specify a --no-reverse option without the "
"--setup-dns option")
if self.no_dnssec_validation:
raise RuntimeError(
"You cannot specify a --no-dnssec-validation option "
"without the --setup-dns option")
elif self.forwarders and self.no_forwarders:
raise RuntimeError(
"You cannot specify a --forwarder option together with "
"--no-forwarders")
elif self.auto_forwarders and self.no_forwarders:
raise RuntimeError(
"You cannot specify a --auto-forwarders option together with "
"--no-forwarders")
elif self.reverse_zones and self.no_reverse:
raise RuntimeError(
"You cannot specify a --reverse-zone option together with "
"--no-reverse")
elif self.auto_reverse and self.no_reverse:
raise RuntimeError(
"You cannot specify a --auto-reverse option together with "
"--no-reverse")
if not self.setup_adtrust:
if self.add_agents:
raise RuntimeError(
"You cannot specify an --add-agents option without the "
"--setup-adtrust option")
if self.enable_compat:
raise RuntimeError(
"You cannot specify an --enable-compat option without the "
"--setup-adtrust option")
if self.no_msdcs:
raise RuntimeError(
"You cannot specify a --no-msdcs option without the "
"--setup-adtrust option")
if not hasattr(self, 'replica_install'):
if self.external_cert_files and self.dirsrv_cert_files:
raise RuntimeError(
"Service certificate file options cannot be used with the "
"external CA options.")
if self.external_ca_type and not self.external_ca:
raise RuntimeError(
"You cannot specify --external-ca-type without "
"--external-ca")
if self.external_ca_profile and not self.external_ca:
raise RuntimeError(
"You cannot specify --external-ca-profile without "
"--external-ca")
if self.uninstalling: # pylint: disable=using-constant-test
if (self.realm_name or self.admin_password or
self.master_password):
raise RuntimeError(
"In uninstall mode, -a, -r and -P options are not "
"allowed")
elif not self.interactive:
if (not self.realm_name or not self.dm_password or
not self.admin_password):
raise RuntimeError(
"In unattended mode you need to provide at least -r, "
"-p and -a options")
if self.setup_dns:
if (not self.forwarders and
not self.no_forwarders and
not self.auto_forwarders):
raise RuntimeError(
"You must specify at least one of --forwarder, "
"--auto-forwarders, or --no-forwarders options")
any_ignore_option_true = any(
[self.ignore_topology_disconnect, self.ignore_last_of_role])
if any_ignore_option_true and not self.uninstalling:
raise RuntimeError(
"'--ignore-topology-disconnect/--ignore-last-of-role' "
"options can be used only during uninstallation")
min_idstart = get_min_idstart()
if self.idstart < min_idstart:
raise RuntimeError(
"idstart (%i) must be larger than UID_MAX/GID_MAX (%i) "
"setting in /etc/login.defs." % (
self.idstart, min_idstart
)
)
if self.idmax < self.idstart:
raise RuntimeError(
"idmax (%s) cannot be smaller than idstart (%s)" %
(self.idmax, self.idstart))
else:
# replica installers
if self.servers and not self.domain_name:
raise RuntimeError(
"The --server option cannot be used without providing "
"domain via the --domain option")
if self.setup_dns:
if (not self.forwarders and
not self.no_forwarders and
not self.auto_forwarders):
raise RuntimeError(
"You must specify at least one of --forwarder, "
"--auto-forwarders, or --no-forwarders options")
ServerMasterInstallInterface = installs_master(ServerInstallInterface)
class ServerMasterInstall(ServerMasterInstallInterface):
"""
Server master installer
"""
force_join = False
servers = None
no_wait_for_dns = True
host_password = None
keytab = None
setup_ca = True
domain_name = extend_knob(
ServerMasterInstallInterface.domain_name,
)
@domain_name.validator
def domain_name(self, value):
# There might be an overlap but at this point we don't have
# complete installer object to verify that DNS is hosted
# by the same machine (i.e. we are already installed).
# Later, DNS.install_check will do its zone overlap check
# and will make sure to fail if overlap does really exist.
# At this point we only verify that value is a valid DNS syntax.
validate_domain_name(value)
dm_password = extend_knob(
ServerMasterInstallInterface.dm_password,
)
@dm_password.validator
def dm_password(self, value):
validate_dm_password(value)
admin_password = extend_knob(
ServerMasterInstallInterface.admin_password,
description="admin user kerberos password",
)
@admin_password.validator
def admin_password(self, value):
validate_admin_password(value)
# always run sidgen task and do not allow adding agents on first master
add_sids = True
add_agents = False
def __init__(self, **kwargs):
super(ServerMasterInstall, self).__init__(**kwargs)
master_init(self)
@step()
def main(self):
master_install_check(self)
yield
master_install(self)
@main.uninstaller
def main(self):
uninstall_check(self)
yield
uninstall(self)
ServerReplicaInstallInterface = installs_replica(ServerInstallInterface)
class ServerReplicaInstall(ServerReplicaInstallInterface):
"""
Server replica installer
"""
subject_base = None
ca_subject = None
admin_password = extend_knob(
ServerReplicaInstallInterface.admin_password,
description="Kerberos password for the specified admin principal",
)
def __init__(self, **kwargs):
super(ServerReplicaInstall, self).__init__(**kwargs)
replica_init(self)
@step()
def main(self):
replica_promote_check(self)
yield
replica_install(self)
| 21,786
|
Python
|
.py
| 548
| 29.182482
| 80
| 0.601088
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,790
|
replicainstall.py
|
freeipa_freeipa/ipaserver/install/server/replicainstall.py
|
#
# Copyright (C) 2015 FreeIPA Contributors see COPYING for license
#
from __future__ import print_function, absolute_import
import contextlib
import logging
import dns.exception as dnsexception
import dns.name as dnsname
import itertools
import os
import shutil
import socket
import sys
import tempfile
import textwrap
import traceback
from pkg_resources import parse_version
import six
from ipaclient.install.client import check_ldap_conf, sssd_enable_ifp
import ipaclient.install.timeconf
from ipalib.install import sysrestore
from ipalib.kinit import kinit_keytab
from ipapython import ipaldap, ipautil
from ipapython.dn import DN
from ipapython.dnsutil import DNSResolver
from ipapython.admintool import ScriptError
from ipapython.ipachangeconf import IPAChangeConf
from ipaplatform import services
from ipaplatform.tasks import tasks
from ipaplatform.paths import paths
from ipalib import api, constants, create_api, errors, rpc
from ipalib.config import Env
from ipalib.facts import is_ipa_configured, is_ipa_client_configured
from ipalib.util import no_matching_interface_for_ip_address_warning
from ipaclient.install.client import configure_krb5_conf, purge_host_keytab
from ipaserver.install.dogtaginstance import INTERNAL_TOKEN
from ipaserver.install import (
adtrust, bindinstance, ca, cainstance, dns, dsinstance, httpinstance,
installutils, kra, krainstance, krbinstance, otpdinstance,
custodiainstance, service,)
from ipaserver.install import certs
from ipaserver.install.installutils import (
ReplicaConfig, load_pkcs12, validate_mask)
from ipaserver.install.replication import (
ReplicationManager, replica_conn_check)
from ipaserver.masters import find_providing_servers, find_providing_server
import SSSDConfig
from subprocess import CalledProcessError
if six.PY3:
unicode = str
NoneType = type(None)
logger = logging.getLogger(__name__)
def get_dirman_password():
return installutils.read_password("Directory Manager (existing master)",
confirm=False, validate=False)
def make_pkcs12_info(directory, cert_name, password_name):
"""Make pkcs12_info
:param directory: Base directory (config.dir)
:param cert_name: Cert filename (e.g. "dscert.p12")
:param password_name: Cert filename (e.g. "dirsrv_pin.txt")
:return: a (full cert path, password) tuple, or None if cert is not found
"""
cert_path = os.path.join(directory, cert_name)
if os.path.isfile(cert_path):
password_file = os.path.join(directory, password_name)
password = open(password_file).read().strip()
return cert_path, password
else:
return None
def install_replica_ds(config, options, ca_is_configured, remote_api,
ca_file, pkcs12_info=None, fstore=None):
dsinstance.check_ports()
# if we have a pkcs12 file, create the cert db from
# that. Otherwise the ds setup will create the CA
# cert
if pkcs12_info is None:
pkcs12_info = make_pkcs12_info(config.dir, "dscert.p12",
"dirsrv_pin.txt")
if ca_is_configured:
ca_subject = ca.lookup_ca_subject(remote_api, config.subject_base)
else:
ca_subject = installutils.default_ca_subject_dn(config.subject_base)
ds = dsinstance.DsInstance(
config_ldif=options.dirsrv_config_file,
fstore=fstore)
ds.create_replica(
realm_name=config.realm_name,
master_fqdn=config.master_host_name,
fqdn=config.host_name,
domain_name=config.domain_name,
dm_password=config.dirman_password,
subject_base=config.subject_base,
ca_subject=ca_subject,
pkcs12_info=pkcs12_info,
ca_is_configured=ca_is_configured,
ca_file=ca_file,
api=remote_api,
setup_pkinit=not options.no_pkinit,
)
return ds
def install_krb(config, setup_pkinit=False, pkcs12_info=None, fstore=None):
krb = krbinstance.KrbInstance(fstore=fstore)
# pkinit files
if pkcs12_info is None:
pkcs12_info = make_pkcs12_info(config.dir, "pkinitcert.p12",
"pkinit_pin.txt")
krb.create_replica(config.realm_name,
config.master_host_name, config.host_name,
config.domain_name, config.dirman_password,
setup_pkinit, pkcs12_info,
subject_base=config.subject_base)
return krb
def install_http(config, auto_redirect, ca_is_configured, ca_file,
pkcs12_info=None, fstore=None):
# if we have a pkcs12 file, create the cert db from
# that. Otherwise the ds setup will create the CA
# cert
if pkcs12_info is None:
pkcs12_info = make_pkcs12_info(config.dir, "httpcert.p12",
"http_pin.txt")
http = httpinstance.HTTPInstance(fstore=fstore)
http.create_instance(
config.realm_name, config.host_name, config.domain_name,
config.dirman_password, pkcs12_info,
auto_redirect=auto_redirect, ca_file=ca_file,
ca_is_configured=ca_is_configured, promote=True,
subject_base=config.subject_base, master_fqdn=config.master_host_name)
return http
def install_dns_records(config, options, remote_api, fstore=None):
if not bindinstance.dns_container_exists(
ipautil.realm_to_suffix(config.realm_name)):
return
try:
bind = bindinstance.BindInstance(api=remote_api, fstore=fstore)
for ip in config.ips:
reverse_zone = bindinstance.find_reverse_zone(ip, remote_api)
bind.add_master_dns_records(config.host_name,
[str(ip)],
config.realm_name,
config.domain_name,
reverse_zone)
except errors.NotFound as e:
logger.debug('Replica DNS records could not be added '
'on master: %s', str(e))
# we should not fail here no matter what
except Exception as e:
logger.info('Replica DNS records could not be added '
'on master: %s', str(e))
def create_ipa_conf(fstore, config, ca_enabled, master=None):
"""
Create /etc/ipa/default.conf master configuration
:param fstore: sysrestore file store used for backup and restore of
the server configuration
:param config: replica config
:param ca_enabled: True if the topology includes a CA
:param master: if set, the xmlrpc_uri parameter will use the provided
master instead of this host
"""
# Save client file on Domain Level 1
target_fname = paths.IPA_DEFAULT_CONF
fstore.backup_file(target_fname)
ipaconf = IPAChangeConf("IPA Replica Install")
ipaconf.setOptionAssignment(" = ")
ipaconf.setSectionNameDelimiters(("[", "]"))
if master:
xmlrpc_uri = 'https://{0}/ipa/xml'.format(
ipautil.format_netloc(master))
else:
xmlrpc_uri = 'https://{0}/ipa/xml'.format(
ipautil.format_netloc(config.host_name))
ldapi_uri = ipaldap.realm_to_ldapi_uri(config.realm_name)
# [global] section
gopts = [
ipaconf.setOption('basedn', str(config.basedn)),
ipaconf.setOption('host', config.host_name),
ipaconf.setOption('realm', config.realm_name),
ipaconf.setOption('domain', config.domain_name),
ipaconf.setOption('xmlrpc_uri', xmlrpc_uri),
ipaconf.setOption('ldap_uri', ldapi_uri),
ipaconf.setOption('mode', 'production')
]
if ca_enabled:
gopts.extend([
ipaconf.setOption('enable_ra', 'True'),
ipaconf.setOption('ra_plugin', 'dogtag'),
ipaconf.setOption('dogtag_version', '10')
])
if not config.setup_ca:
gopts.append(ipaconf.setOption('ca_host', config.ca_host_name))
else:
gopts.extend([
ipaconf.setOption('enable_ra', 'False'),
ipaconf.setOption('ra_plugin', 'None')
])
opts = [
ipaconf.setSection('global', gopts),
{'name': 'empty', 'type': 'empty'}
]
ipaconf.newConf(target_fname, opts)
# the new file must be readable for httpd
# Also, umask applies when creating a new file but we want 0o644 here
os.chmod(target_fname, 0o644)
def check_dirsrv():
(ds_unsecure, ds_secure) = dsinstance.check_ports()
if not ds_unsecure or not ds_secure:
msg = ("IPA requires ports 389 and 636 for the Directory Server.\n"
"These are currently in use:\n")
if not ds_unsecure:
msg += "\t389\n"
if not ds_secure:
msg += "\t636\n"
raise ScriptError(msg)
def check_dns_resolution(host_name, dns_servers):
"""Check forward and reverse resolution of host_name using dns_servers
"""
# Point the resolver at specified DNS server
server_ips = []
for dns_server in dns_servers:
try:
server_ips = list(
a[4][0] for a in socket.getaddrinfo(dns_server, None))
except socket.error:
pass
else:
break
if not server_ips:
logger.error(
'Could not resolve any DNS server hostname: %s', dns_servers)
return False
resolver = DNSResolver()
resolver.nameservers = server_ips
logger.debug('Search DNS server %s (%s) for %s',
dns_server, server_ips, host_name)
# Get IP addresses of host_name
addresses = set()
for rtype in 'A', 'AAAA':
try:
result = resolver.resolve(host_name, rtype)
except dnsexception.DNSException:
rrset = []
else:
rrset = result.rrset
if rrset:
addresses.update(r.address for r in result.rrset)
if not addresses:
logger.error(
'Could not resolve hostname %s using DNS. '
'Clients may not function properly. '
'Please check your DNS setup. '
'(Note that this check queries IPA DNS directly and '
'ignores /etc/hosts.)',
host_name)
return False
no_errors = True
# Check each of the IP addresses
checked = set()
for address in addresses:
if address in checked:
continue
checked.add(address)
try:
logger.debug('Check reverse address %s (%s)', address, host_name)
rrset = resolver.resolve_address(address).rrset
except Exception as e:
logger.debug('Check failed: %s %s', type(e).__name__, e)
logger.error(
'Reverse DNS resolution of address %s (%s) failed. '
'Clients may not function properly. '
'Please check your DNS setup. '
'(Note that this check queries IPA DNS directly and '
'ignores /etc/hosts.)',
address, host_name)
no_errors = False
else:
host_name_obj = dnsname.from_text(host_name)
if rrset:
names = [r.target.to_text() for r in rrset]
else:
names = []
logger.debug(
'Address %s resolves to: %s. ', address, ', '.join(names))
if not rrset or not any(
r.target == host_name_obj for r in rrset):
logger.error(
'The IP address %s of host %s resolves to: %s. '
'Clients may not function properly. '
'Please check your DNS setup. '
'(Note that this check queries IPA DNS directly and '
'ignores /etc/hosts.)',
address, host_name, ', '.join(names))
no_errors = False
return no_errors
def configure_certmonger():
dbus = services.knownservices.dbus
if not dbus.is_running():
# some platforms protect dbus with RefuseManualStart=True
try:
dbus.start()
except Exception as e:
raise ScriptError("dbus service unavailable: %s" % str(e),
rval=3)
# Ensure that certmonger has been started at least once to generate the
# cas files in /var/lib/certmonger/cas.
cmonger = services.knownservices.certmonger
try:
cmonger.restart()
except Exception as e:
raise ScriptError("Certmonger service unavailable: %s" % str(e),
rval=3)
try:
cmonger.enable()
except Exception as e:
raise ScriptError("Failed to enable Certmonger: %s" % str(e),
rval=3)
def remove_replica_info_dir(installer):
# always try to remove decrypted replica file
try:
if installer._top_dir is not None:
shutil.rmtree(installer._top_dir)
except OSError:
pass
def common_cleanup(func):
def decorated(installer):
try:
try:
func(installer)
except BaseException:
remove_replica_info_dir(installer)
raise
except KeyboardInterrupt:
raise ScriptError()
except Exception:
print(
"Your system may be partly configured.\n"
"Run /usr/sbin/ipa-server-install --uninstall to clean up.\n")
raise
return decorated
def preserve_enrollment_state(func):
"""
Makes sure the machine is unenrollled if the decorated function
failed.
"""
def decorated(installer):
try:
func(installer)
except BaseException:
if installer._enrollment_performed:
uninstall_client()
raise
return decorated
def uninstall_client():
"""
Attempts to unenroll the IPA client using the ipa-client-install utility.
An unsuccessful attempt to uninstall is ignored (no exception raised).
"""
print("Removing client side components")
ipautil.run([paths.IPA_CLIENT_INSTALL, "--unattended", "--uninstall"],
raiseonerr=False, redirect_output=True)
print()
def promote_sssd(host_name):
sssdconfig = SSSDConfig.SSSDConfig()
sssdconfig.import_config()
domains = sssdconfig.list_active_domains()
for name in domains:
domain = sssdconfig.get_domain(name)
try:
hostname = domain.get_option('ipa_hostname')
if hostname == host_name:
break
except SSSDConfig.NoOptionError:
continue
else:
raise RuntimeError("Couldn't find IPA domain in sssd.conf")
domain.set_option('ipa_server', host_name)
domain.set_option('ipa_server_mode', True)
sssdconfig.save_domain(domain)
sssd_enable_ifp(sssdconfig)
sssdconfig.write()
sssd = services.service('sssd', api)
try:
sssd.restart()
except CalledProcessError:
logger.warning("SSSD service restart was unsuccessful.")
def promote_openldap_conf(hostname, master):
"""
Reset the URI directive in openldap-client configuration file to point to
newly promoted replica. If this directive was set by third party, then
replace the added comment with the one pointing to replica
:param hostname: replica FQDN
:param master: FQDN of remote master
"""
ldap_conf = paths.OPENLDAP_LDAP_CONF
ldap_change_conf = IPAChangeConf("IPA replica installer")
ldap_change_conf.setOptionAssignment((" ", "\t"))
new_opts = []
with open(ldap_conf, 'r') as f:
old_opts = ldap_change_conf.parse(f)
for opt in old_opts:
if opt['type'] == 'comment' and master in opt['value']:
continue
if (opt['type'] == 'option' and opt['name'] == 'URI' and
master in opt['value']):
continue
new_opts.append(opt)
change_opts = [
{'action': 'addifnotset',
'name': 'URI',
'type': 'option',
'value': 'ldaps://' + hostname}
]
try:
ldap_change_conf.newConf(ldap_conf, new_opts)
ldap_change_conf.changeConf(ldap_conf, change_opts)
except Exception as e:
logger.info("Failed to update %s: %s", ldap_conf, e)
@contextlib.contextmanager
def rpc_client(api):
"""
Context manager for JSON RPC client.
:param api: api to initiate the RPC client
"""
client = rpc.jsonclient(api)
client.finalize()
client.connect()
try:
yield client
finally:
client.disconnect()
def check_remote_fips_mode(client, local_fips_mode):
"""
Verify remote server's fips-mode is the same as this server's fips-mode
:param client: RPC client
:param local_fips_mode: boolean indicating whether FIPS mode is turned on
:raises: ScriptError: if the checks fails
"""
env = client.forward(u'env', u'fips_mode')['result']
remote_fips_mode = env.get('fips_mode', False)
if local_fips_mode != remote_fips_mode:
if local_fips_mode:
raise ScriptError(
"Cannot join FIPS-enabled replica into existing topology: "
"FIPS is not enabled on the master server.")
else:
raise ScriptError(
"Cannot join replica into existing FIPS-enabled topology: "
"FIPS has to be enabled locally first.")
def check_remote_version(client, local_version):
"""
Verify remote server's version is not higher than this server's version
:param client: RPC client
:param local_version: API version of local server
:raises: ScriptError: if the checks fails
"""
env = client.forward(u'env', u'version')['result']
remote_version = parse_version(env['version'])
if remote_version > local_version:
raise ScriptError(
"Cannot install replica of a server of higher version ({}) than "
"the local version ({})".format(remote_version, local_version))
def common_check(no_ntp, skip_mem_check, setup_ca):
if not skip_mem_check:
installutils.check_available_memory(ca=setup_ca)
tasks.check_ipv6_stack_enabled()
tasks.check_selinux_status()
check_ldap_conf()
mask_str = validate_mask()
if mask_str:
raise ScriptError(
"Unexpected system mask: %s, expected 0022" % mask_str)
if is_ipa_configured():
raise ScriptError(
"IPA server is already configured on this system.\n"
"If you want to reinstall the IPA server, please uninstall "
"it first using 'ipa-server-install --uninstall'.")
check_dirsrv()
if not no_ntp:
try:
ipaclient.install.timeconf.check_timedate_services()
except ipaclient.install.timeconf.NTPConflictingService as e:
print("WARNING: conflicting time&date synchronization service "
"'{svc}' will\nbe disabled in favor of chronyd\n"
.format(svc=e.conflicting_service))
except ipaclient.install.timeconf.NTPConfigurationError:
pass
def current_domain_level(api):
"""Return the current domain level.
"""
# Detect the current domain level
try:
return api.Command['domainlevel_get']()['result']
except errors.NotFound:
# If we're joining an older master, domain entry is not
# available
return constants.DOMAIN_LEVEL_0
def check_domain_level_is_supported(current):
"""Check that the given domain level is supported by this server version.
:raises: ScriptError if DL is out of supported range for this IPA version.
"""
under_lower_bound = current < constants.MIN_DOMAIN_LEVEL
above_upper_bound = current > constants.MAX_DOMAIN_LEVEL
if under_lower_bound or above_upper_bound:
message = ("This version of IPA does not support "
"the Domain Level which is currently set for "
"this domain. The Domain Level needs to be "
"raised before installing a replica with "
"this version is allowed to be installed "
"within this domain.")
logger.error("%s", message)
raise ScriptError(message, rval=3)
def enroll_dl0_replica(installer, fstore, remote_api, debug=False):
"""
Do partial host enrollment in DL0:
* add host entry to remote master
* request host keytab from remote master
* configure client-like /etc/krb5.conf to enable GSSAPI auth further
down the replica installation
"""
logger.info("Enrolling host to IPA domain")
config = installer._config
hostname = config.host_name
try:
installer._enrollment_performed = True
host_result = remote_api.Command.host_add(
unicode(config.host_name), force=installer.no_host_dns
)['result']
host_princ = unicode(host_result['krbcanonicalname'][0])
purge_host_keytab(config.realm_name)
getkeytab_args = [
paths.IPA_GETKEYTAB,
'-s', config.master_host_name,
'-p', host_princ,
'-D', unicode(ipaldap.DIRMAN_DN),
'-w', config.dirman_password,
'-k', paths.KRB5_KEYTAB,
'--cacert', os.path.join(config.dir, 'ca.crt')
]
ipautil.run(getkeytab_args, nolog=(config.dirman_password,))
_hostname, _sep, host_domain = hostname.partition('.')
fstore.backup_file(paths.KRB5_CONF)
configure_krb5_conf(
config.realm_name,
config.domain_name,
[config.master_host_name],
[config.master_host_name],
False,
paths.KRB5_CONF,
host_domain,
hostname,
configure_sssd=False
)
except CalledProcessError as e:
raise RuntimeError("Failed to fetch host keytab: {}".format(e))
def ensure_enrolled(installer):
args = [paths.IPA_CLIENT_INSTALL, "--unattended"]
stdin = None
nolog = []
if installer.domain_name:
args.extend(["--domain", installer.domain_name])
if installer.server:
args.extend(["--server", installer.server])
if installer.realm_name:
args.extend(["--realm", installer.realm_name])
if installer.host_name:
args.extend(["--hostname", installer.host_name])
if installer.password:
args.extend(["--password", installer.password])
nolog.append(installer.password)
else:
if installer.admin_password:
# Always set principal if password was set explicitly,
# the password itself gets passed directly via stdin
args.extend(["--principal", installer.principal or "admin"])
stdin = installer.admin_password
if installer.keytab:
args.extend(["--keytab", installer.keytab])
if installer.no_dns_sshfp:
args.append("--no-dns-sshfp")
if installer.ssh_trust_dns:
args.append("--ssh-trust-dns")
if installer.no_ssh:
args.append("--no-ssh")
if installer.no_sshd:
args.append("--no-sshd")
if installer.mkhomedir:
args.append("--mkhomedir")
if installer.subid:
args.append("--subid")
if installer.force_join:
args.append("--force-join")
if installer.no_ntp:
args.append("--no-ntp")
if installer.ip_addresses:
for ip in installer.ip_addresses:
# installer.ip_addresses is of type [CheckedIPAddress]
args.extend(("--ip-address", str(ip)))
if installer.ntp_servers:
for server in installer.ntp_servers:
args.extend(("--ntp-server", server))
if installer.ntp_pool:
args.extend(("--ntp-pool", installer.ntp_pool))
try:
# Call client install script
service.print_msg("Configuring client side components")
installer._enrollment_performed = True
ipautil.run(args, stdin=stdin, nolog=nolog, redirect_output=True)
print()
except ipautil.CalledProcessError:
raise ScriptError("Configuration of client side components failed!")
def promotion_check_ipa_domain(master_ldap_conn, basedn):
entry = master_ldap_conn.get_entry(basedn, ['associatedDomain'])
if 'associatedDomain' not in entry:
raise RuntimeError('IPA domain not found in LDAP.')
if len(entry['associatedDomain']) > 1:
logger.critical(
"Multiple IPA domains found. We are so sorry :-(, you are "
"probably experiencing this bug "
"https://fedorahosted.org/freeipa/ticket/5976. Please contact us "
"for help.")
raise RuntimeError(
'Multiple IPA domains found in LDAP database ({domains}). '
'Only one domain is allowed.'.format(
domains=u', '.join(entry['associatedDomain'])
))
if entry['associatedDomain'][0] != api.env.domain:
raise RuntimeError(
"Cannot promote this client to a replica. Local domain "
"'{local}' does not match IPA domain '{ipadomain}'. ".format(
local=api.env.domain,
ipadomain=entry['associatedDomain'][0]
))
def promotion_check_host_principal_auth_ind(conn, hostdn):
entry = conn.get_entry(hostdn, ['krbprincipalauthind'])
if 'krbprincipalauthind' in entry:
raise RuntimeError(
"Client cannot be promoted to a replica if the host principal "
"has an authentication indicator set."
)
def clean_up_hsm_nicknames(api):
"""Ensure that all of the nicknames on the token are visible on
the NSS softoken.
"""
# Hardcode the token names. NSS tooling does not provide a
# public way to determine it other than scraping modutil
# output.
if tasks.is_fips_enabled():
dbname = 'NSS FIPS 140-2 Certificate DB'
else:
dbname = 'NSS Certificate DB'
api.Backend.ldap2.connect()
(token_name, _unused) = ca.lookup_hsm_configuration(api)
api.Backend.ldap2.disconnect()
if not token_name:
return
cai = cainstance.CAInstance(api.env.realm, host_name=api.env.host)
dogtag_reqs = cai.tracking_reqs.items()
kra = krainstance.KRAInstance(api.env.realm)
if kra.is_installed():
dogtag_reqs = itertools.chain(dogtag_reqs,
kra.tracking_reqs.items())
try:
tmpdir = tempfile.mkdtemp(prefix="tmp-")
pwd_file = os.path.join(tmpdir, "pwd_file")
with open(pwd_file, "w") as pwd:
with open(paths.PKI_TOMCAT_PASSWORD_CONF, 'r') as fd:
for line in fd:
(token, pin) = line.split('=', 1)
if token.startswith('hardware-'):
token = token.replace('hardware-', '')
pwd.write(f'{token}:{pin}')
elif token == INTERNAL_TOKEN:
pwd.write(f'{dbname}:{pin}')
pwd.flush()
db = certs.CertDB(api.env.realm,
nssdir=paths.PKI_TOMCAT_ALIAS_DIR,
pwd_file=pwd_file)
for (nickname, _unused) in dogtag_reqs:
try:
if nickname in (
'caSigningCert cert-pki-ca',
'Server-Cert cert-pki-ca'
):
continue
if nickname in (
'auditSigningCert cert-pki-ca',
'auditSigningCert cert-pki-kra',
):
trust = ',,P'
else:
trust = ',,'
db.run_certutil(['-M',
'-n', f"{token_name}:{nickname}",
'-t', trust])
except CalledProcessError as e:
logger.debug("Modifying trust on %s failed: %s",
nickname, e)
if db.has_nickname('Directory Server CA certificate'):
db.run_certutil(['--rename',
'-n', 'Directory Server CA certificate',
'--new-n', 'caSigningCert cert-pki-ca'],
raiseonerr=False)
finally:
shutil.rmtree(tmpdir)
def remote_connection(config):
logger.debug("Creating LDAP connection to %s", config.master_host_name)
ldapuri = 'ldaps://%s' % ipautil.format_netloc(config.master_host_name)
xmlrpc_uri = 'https://{}/ipa/xml'.format(
ipautil.format_netloc(config.master_host_name))
remote_api = create_api(mode=None)
remote_api.bootstrap(in_server=True,
context='installer',
confdir=paths.ETC_IPA,
ldap_uri=ldapuri,
xmlrpc_uri=xmlrpc_uri)
remote_api.finalize()
return remote_api
@common_cleanup
@preserve_enrollment_state
def promote_check(installer):
options = installer
installer._enrollment_performed = False
installer._top_dir = tempfile.mkdtemp("ipa")
# check selinux status, http and DS ports, NTP conflicting services
common_check(options.no_ntp, options.skip_mem_check, options.setup_ca)
if options.setup_ca and any([options.dirsrv_cert_files,
options.http_cert_files,
options.pkinit_cert_files]):
raise ScriptError("--setup-ca and --*-cert-file options are "
"mutually exclusive")
ipa_client_installed = is_ipa_client_configured(on_master=True)
if not ipa_client_installed:
# One-step replica installation
if options.password and options.admin_password:
raise ScriptError("--password and --admin-password options are "
"mutually exclusive")
ensure_enrolled(installer)
else:
if (options.domain_name or options.server or options.realm_name or
options.host_name or options.password or options.keytab):
print("IPA client is already configured on this system, ignoring "
"the --domain, --server, --realm, --hostname, --password "
"and --keytab options.")
# Make sure options.server is not used
options.server = None
# The NTP configuration can not be touched on pre-installed client:
if options.no_ntp or options.ntp_servers or options.ntp_pool:
raise ScriptError(
"NTP configuration cannot be updated during promotion")
sstore = sysrestore.StateFile(paths.SYSRESTORE)
fstore = sysrestore.FileStore(paths.SYSRESTORE)
env = Env()
env._bootstrap(context='installer', confdir=paths.ETC_IPA, log=None)
env._finalize_core(**dict(constants.DEFAULT_CONFIG))
xmlrpc_uri = 'https://{}/ipa/xml'.format(ipautil.format_netloc(env.host))
api.bootstrap(in_server=True,
context='installer',
confdir=paths.ETC_IPA,
ldap_uri=ipaldap.realm_to_ldapi_uri(env.realm),
xmlrpc_uri=xmlrpc_uri)
api.finalize()
config = ReplicaConfig()
config.realm_name = api.env.realm
config.host_name = api.env.host
config.domain_name = api.env.domain
config.master_host_name = api.env.server
if not api.env.ca_host or api.env.ca_host == api.env.host:
# ca_host has not been configured explicitly, prefer source master
config.ca_host_name = api.env.server
else:
# default to ca_host from IPA config
config.ca_host_name = api.env.ca_host
config.kra_host_name = config.ca_host_name
config.ca_ds_port = 389
config.setup_ca = options.setup_ca
config.setup_kra = options.setup_kra
config.dir = installer._top_dir
config.basedn = api.env.basedn
config.hidden_replica = options.hidden_replica
http_pkcs12_file = None
http_pkcs12_info = None
http_ca_cert = None
dirsrv_pkcs12_file = None
dirsrv_pkcs12_info = None
dirsrv_ca_cert = None
pkinit_pkcs12_file = None
pkinit_pkcs12_info = None
pkinit_ca_cert = None
if options.http_cert_files:
if options.http_pin is None:
options.http_pin = installutils.read_password(
"Enter Apache Server private key unlock",
confirm=False, validate=False, retry=False)
if options.http_pin is None:
raise ScriptError(
"Apache Server private key unlock password required")
http_pkcs12_file, http_pin, http_ca_cert = load_pkcs12(
cert_files=options.http_cert_files,
key_password=options.http_pin,
key_nickname=options.http_cert_name,
ca_cert_files=options.ca_cert_files,
host_name=config.host_name)
http_pkcs12_info = (http_pkcs12_file.name, http_pin)
if options.dirsrv_cert_files:
if options.dirsrv_pin is None:
options.dirsrv_pin = installutils.read_password(
"Enter Directory Server private key unlock",
confirm=False, validate=False, retry=False)
if options.dirsrv_pin is None:
raise ScriptError(
"Directory Server private key unlock password required")
dirsrv_pkcs12_file, dirsrv_pin, dirsrv_ca_cert = load_pkcs12(
cert_files=options.dirsrv_cert_files,
key_password=options.dirsrv_pin,
key_nickname=options.dirsrv_cert_name,
ca_cert_files=options.ca_cert_files,
host_name=config.host_name)
dirsrv_pkcs12_info = (dirsrv_pkcs12_file.name, dirsrv_pin)
if options.pkinit_cert_files:
if options.pkinit_pin is None:
options.pkinit_pin = installutils.read_password(
"Enter Kerberos KDC private key unlock",
confirm=False, validate=False, retry=False)
if options.pkinit_pin is None:
raise ScriptError(
"Kerberos KDC private key unlock password required")
pkinit_pkcs12_file, pkinit_pin, pkinit_ca_cert = load_pkcs12(
cert_files=options.pkinit_cert_files,
key_password=options.pkinit_pin,
key_nickname=options.pkinit_cert_name,
ca_cert_files=options.ca_cert_files,
realm_name=config.realm_name)
pkinit_pkcs12_info = (pkinit_pkcs12_file.name, pkinit_pin)
if (options.http_cert_files and options.dirsrv_cert_files and
http_ca_cert != dirsrv_ca_cert):
raise RuntimeError("Apache Server SSL certificate and Directory "
"Server SSL certificate are not signed by the same"
" CA certificate")
if (options.http_cert_files and
options.pkinit_cert_files and
http_ca_cert != pkinit_ca_cert):
raise RuntimeError("Apache Server SSL certificate and PKINIT KDC "
"certificate are not signed by the same CA "
"certificate")
installutils.verify_fqdn(config.host_name, options.no_host_dns)
# Inside the container environment master's IP address does not
# resolve to its name. See https://pagure.io/freeipa/issue/6210
container_environment = tasks.detect_container() is not None
installutils.verify_fqdn(config.master_host_name, options.no_host_dns,
local_hostname=not container_environment)
if config.host_name.lower() == config.domain_name.lower():
raise ScriptError("hostname cannot be the same as the domain name")
ccache = os.environ['KRB5CCNAME']
kinit_keytab('host/{env.host}@{env.realm}'.format(env=api.env),
paths.KRB5_KEYTAB,
ccache)
if ipa_client_installed:
# host was already an IPA client, refresh client cert stores to
# ensure we have up to date CA certs.
try:
ipautil.run([paths.IPA_CERTUPDATE])
except ipautil.CalledProcessError:
raise RuntimeError("ipa-certupdate failed to refresh certs.")
remote_api = remote_connection(config)
installer._remote_api = remote_api
with rpc_client(remote_api) as client:
check_remote_version(client, parse_version(api.env.version))
check_remote_fips_mode(client, api.env.fips_mode)
conn = remote_api.Backend.ldap2
replman = None
try:
# Try out authentication
conn.connect(ccache=ccache)
replman = ReplicationManager(config.realm_name,
config.master_host_name, None)
promotion_check_ipa_domain(conn, remote_api.env.basedn)
hostdn = DN(('fqdn', api.env.host),
api.env.container_host,
api.env.basedn)
promotion_check_host_principal_auth_ind(conn, hostdn)
# Make sure that domain fulfills minimal domain level
# requirement
domain_level = current_domain_level(remote_api)
check_domain_level_is_supported(domain_level)
if domain_level < constants.MIN_DOMAIN_LEVEL:
raise RuntimeError(
"Cannot promote this client to a replica. The domain level "
"must be raised to {mindomainlevel} before the replica can be "
"installed".format(
mindomainlevel=constants.MIN_DOMAIN_LEVEL
))
# Check authorization
result = remote_api.Command['hostgroup_find'](
cn=u'ipaservers',
host=[unicode(api.env.host)]
)['result']
add_to_ipaservers = not result
if add_to_ipaservers:
if options.password and not options.admin_password:
raise errors.ACIError(info="Not authorized")
if installer._ccache is None:
os.environ.pop('KRB5CCNAME', None)
else:
os.environ['KRB5CCNAME'] = installer._ccache
try:
installutils.check_creds(options, config.realm_name)
installer._ccache = os.environ.get('KRB5CCNAME')
finally:
os.environ['KRB5CCNAME'] = ccache
conn.disconnect()
conn.connect(ccache=installer._ccache)
try:
result = remote_api.Command['hostgroup_show'](
u'ipaservers',
all=True,
rights=True
)['result']
if 'w' not in result['attributelevelrights']['member']:
raise errors.ACIError(info="Not authorized")
finally:
conn.disconnect()
conn.connect(ccache=ccache)
# Check that we don't already have a replication agreement
if replman.get_replication_agreement(config.host_name):
msg = ("A replication agreement for this host already exists. "
"It needs to be removed.\n"
"Run this command on any working server:\n"
" %% ipa server-del {host} --force"
.format(host=config.host_name))
raise ScriptError(msg, rval=3)
# Detect if the other master can handle replication managers
# cn=replication managers,cn=sysaccounts,cn=etc,$SUFFIX
dn = DN(('cn', 'replication managers'),
api.env.container_sysaccounts,
ipautil.realm_to_suffix(config.realm_name))
try:
conn.get_entry(dn)
except errors.NotFound:
msg = ("The Replication Managers group is not available in "
"the domain. Replica promotion requires the use of "
"Replication Managers to be able to replicate data. "
"Upgrade the peer master or use the ipa-replica-prepare "
"command on the master and use a prep file to install "
"this replica.")
logger.error("%s", msg)
raise ScriptError(rval=3)
dns_masters = remote_api.Object['dnsrecord'].get_dns_masters()
if dns_masters:
if not options.no_host_dns:
logger.debug('Check forward/reverse DNS resolution')
resolution_ok = (
check_dns_resolution(config.master_host_name,
dns_masters) and
check_dns_resolution(config.host_name, dns_masters))
if not resolution_ok and installer.interactive:
if not ipautil.user_input("Continue?", False):
raise ScriptError(rval=0)
else:
logger.debug('No IPA DNS servers, '
'skipping forward/reverse resolution check')
entry_attrs = conn.get_ipa_config()
subject_base = entry_attrs.get('ipacertificatesubjectbase', [None])[0]
if subject_base is not None:
config.subject_base = DN(subject_base)
# Find any server with a CA
# The order of preference is
# 1. the first server specified in --server, if any
# 2. the server specified in the config file
# 3. any other
preferred_cas = [config.ca_host_name]
if options.server:
preferred_cas.insert(0, options.server)
ca_host = find_providing_server(
'CA', conn, preferred_cas
)
if ca_host is not None:
if options.setup_ca and config.master_host_name != ca_host:
conn.disconnect()
del remote_api
config.master_host_name = ca_host
remote_api = remote_connection(config)
installer._remote_api = remote_api
conn = remote_api.Backend.ldap2
conn.connect(ccache=installer._ccache)
config.ca_host_name = ca_host
ca_enabled = True # There is a CA somewhere in the topology
if options.dirsrv_cert_files:
logger.error("Certificates could not be provided when "
"CA is present on some master.")
raise ScriptError(rval=3)
if options.setup_ca and options.server and \
ca_host != options.server:
# Installer was provided with a specific master
# but this one doesn't provide CA
logger.error("The specified --server %s does not provide CA, "
"please provide a server with the CA role",
options.server)
raise ScriptError(rval=4)
else:
if options.setup_ca:
logger.error("The remote master does not have a CA "
"installed, can't set up CA")
raise ScriptError(rval=3)
ca_enabled = False
if not options.dirsrv_cert_files:
logger.error("Cannot issue certificates: a CA is not "
"installed. Use the --http-cert-file, "
"--dirsrv-cert-file options to provide "
"custom certificates.")
raise ScriptError(rval=3)
# Find any server with a KRA
# The order of preference is
# 1. the first server specified in --server, if any
# 2. the server specified in the config file
# 3. any other
preferred_kras = [config.kra_host_name]
if options.server:
preferred_kras.insert(0, options.server)
kra_host = find_providing_server(
'KRA', conn, preferred_kras
)
if kra_host is not None:
if options.setup_kra and config.master_host_name != kra_host:
conn.disconnect()
del remote_api
config.master_host_name = kra_host
remote_api = remote_connection(config)
installer._remote_api = remote_api
conn = remote_api.Backend.ldap2
conn.connect(ccache=installer._ccache)
config.kra_host_name = kra_host
if options.setup_kra: # only reset ca_host if KRA is requested
config.ca_host_name = kra_host
kra_enabled = True # There is a KRA somewhere in the topology
if options.setup_kra and options.server and \
kra_host != options.server:
# Installer was provided with a specific master
# but this one doesn't provide KRA
logger.error("The specified --server %s does not provide KRA, "
"please provide a server with the KRA role",
options.server)
raise ScriptError(rval=4)
else:
if options.setup_kra:
logger.error("There is no active KRA server in the domain, "
"can't setup a KRA clone")
raise ScriptError(rval=3)
kra_enabled = False
if ca_enabled:
options.realm_name = config.realm_name
options.host_name = config.host_name
ca.install_check(False, config, options)
if kra_enabled:
try:
kra.install_check(remote_api, config, options)
except RuntimeError as e:
raise ScriptError(e)
if options.setup_dns:
dns.install_check(False, remote_api, True, options,
config.host_name)
config.ips = dns.ip_addresses
else:
config.ips = installutils.get_server_ip_address(
config.host_name, not installer.interactive,
False, options.ip_addresses)
# check addresses here, dns module is doing own check
no_matching_interface_for_ip_address_warning(config.ips)
# Always call adtrust.install_check
# if --setup-adtrust is not specified, only the SID part is executed
adtrust.install_check(False, options, remote_api)
except errors.ACIError:
logger.debug("%s", traceback.format_exc())
raise ScriptError("\nInsufficient privileges to promote the server."
"\nPossible issues:"
"\n- A user has insufficient privileges"
"\n- This client has insufficient privileges "
"to become an IPA replica")
except errors.LDAPError:
logger.debug("%s", traceback.format_exc())
raise ScriptError("\nUnable to connect to LDAP server %s" %
config.master_host_name)
finally:
if replman and replman.conn:
replman.conn.unbind()
if conn.isconnected():
conn.disconnect()
# check connection
if not options.skip_conncheck:
if add_to_ipaservers:
# use user's credentials when the server host is not ipaservers
if installer._ccache is None:
os.environ.pop('KRB5CCNAME', None)
else:
os.environ['KRB5CCNAME'] = installer._ccache
try:
replica_conn_check(
config.master_host_name, config.host_name, config.realm_name,
options.setup_ca, 389,
options.admin_password, principal=options.principal,
ca_cert_file=paths.IPA_CA_CRT)
finally:
if add_to_ipaservers:
os.environ['KRB5CCNAME'] = ccache
installer._ca_enabled = ca_enabled
installer._kra_enabled = kra_enabled
installer._ca_file = paths.IPA_CA_CRT
installer._fstore = fstore
installer._sstore = sstore
installer._config = config
installer._add_to_ipaservers = add_to_ipaservers
installer._dirsrv_pkcs12_file = dirsrv_pkcs12_file
installer._dirsrv_pkcs12_info = dirsrv_pkcs12_info
installer._http_pkcs12_file = http_pkcs12_file
installer._http_pkcs12_info = http_pkcs12_info
installer._pkinit_pkcs12_file = pkinit_pkcs12_file
installer._pkinit_pkcs12_info = pkinit_pkcs12_info
@common_cleanup
def install(installer):
options = installer
ca_enabled = installer._ca_enabled
kra_enabled = installer._kra_enabled
fstore = installer._fstore
sstore = installer._sstore
config = installer._config
dirsrv_pkcs12_info = installer._dirsrv_pkcs12_info
http_pkcs12_info = installer._http_pkcs12_info
pkinit_pkcs12_info = installer._pkinit_pkcs12_info
remote_api = installer._remote_api
conn = remote_api.Backend.ldap2
ccache = os.environ['KRB5CCNAME']
# Be clear that the installation process is beginning but not done
sstore.backup_state('installation', 'complete', False)
if tasks.configure_pkcs11_modules(fstore):
print("Disabled p11-kit-proxy")
_hostname, _sep, host_domain = config.host_name.partition('.')
fstore.backup_file(paths.KRB5_CONF)
# Write a new krb5.conf in case any values changed finding the
# right server to configure against (for CA, KRA).
logger.debug("Installing against server %s", config.master_host_name)
configure_krb5_conf(
cli_realm=api.env.realm,
cli_domain=api.env.domain,
cli_server=[config.master_host_name],
cli_kdc=[config.master_host_name],
dnsok=False,
filename=paths.KRB5_CONF,
client_domain=host_domain,
client_hostname=config.host_name,
configure_sssd=False
)
if installer._add_to_ipaservers:
try:
conn.connect(ccache=installer._ccache)
remote_api.Command['hostgroup_add_member'](
u'ipaservers',
host=[unicode(api.env.host)],
)
finally:
if conn.isconnected():
conn.disconnect()
os.environ['KRB5CCNAME'] = ccache
config.dirman_password = ipautil.ipa_generate_password()
# FIXME: allow to use passed in certs instead
if ca_enabled:
configure_certmonger()
try:
conn.connect(ccache=ccache)
# Configure dirsrv
ds = install_replica_ds(config, options, ca_enabled,
remote_api,
ca_file=paths.IPA_CA_CRT,
pkcs12_info=dirsrv_pkcs12_info,
fstore=fstore)
# Always try to install DNS records
install_dns_records(config, options, remote_api, fstore=fstore)
finally:
if conn.isconnected():
conn.disconnect()
# Create the management framework config file. Do this irregardless
# of the state of DS installation. Even if it fails,
# we need to have master-like configuration in order to perform a
# successful uninstallation
# The configuration creation has to be here otherwise previous call
# To config certmonger would try to connect to local server
create_ipa_conf(fstore, config, ca_enabled)
krb = install_krb(
config,
setup_pkinit=not options.no_pkinit,
pkcs12_info=pkinit_pkcs12_info,
fstore=fstore)
# We need to point to the master when certmonger asks for
# a DS or HTTP certificate.
# During http installation, the <service>/hostname principal is
# created locally then the installer waits for the entry to appear
# on the master selected for the installation.
# In a later step, the installer requests a SSL certificate through
# Certmonger (and the op adds the principal if it does not exist yet).
# If xmlrpc_uri points to the soon-to-be replica,
# the httpd service is not ready yet to handle certmonger requests
# and certmonger tries to find another master. The master can be
# different from the one selected for the installation, and it is
# possible that the principal has not been replicated yet. This
# may lead to a replication conflict.
# This is why we need to force the use of the same master by
# setting xmlrpc_uri
create_ipa_conf(fstore, config, ca_enabled,
master=config.master_host_name)
# we now need to enable ssl on the ds
ds.enable_ssl()
install_http(
config,
auto_redirect=not options.no_ui_redirect,
pkcs12_info=http_pkcs12_info,
ca_is_configured=ca_enabled,
ca_file=paths.IPA_CA_CRT,
fstore=fstore)
# Need to point back to ourself after the cert for HTTP is obtained
create_ipa_conf(fstore, config, ca_enabled)
otpd = otpdinstance.OtpdInstance()
otpd.create_instance('OTPD', config.host_name,
ipautil.realm_to_suffix(config.realm_name))
if options.setup_kra and kra_enabled:
# A KRA peer always provides a CA, too.
mode = custodiainstance.CustodiaModes.KRA_PEER
elif options.setup_ca and ca_enabled:
mode = custodiainstance.CustodiaModes.CA_PEER
else:
mode = custodiainstance.CustodiaModes.MASTER_PEER
custodia = custodiainstance.get_custodia_instance(config, mode)
custodia.create_instance()
if ca_enabled:
options.realm_name = config.realm_name
options.domain_name = config.domain_name
options.host_name = config.host_name
options.dm_password = config.dirman_password
# Always call ca.install() if there is a CA in the topology
# to ensure the RA agent is present.
ca.install(False, config, options, custodia=custodia)
# configure PKINIT now that all required services are in place
krb.enable_ssl()
# Apply any LDAP updates. Needs to be done after the replica is synced-up
service.print_msg("Applying LDAP updates")
ds.apply_updates()
service.print_msg("Finalize replication settings")
ds.finalize_replica_config()
if kra_enabled:
# The KRA installer checks for itself the status of setup_kra
kra.install(api, config, options, custodia=custodia)
service.print_msg("Restarting the KDC")
krb.restart()
custodia.import_dm_password()
promote_sssd(config.host_name)
promote_openldap_conf(config.host_name, config.master_host_name)
if options.setup_dns:
dns.install(False, True, options, api)
# Always call adtrust.install
# if --setup-adtrust is not specified, only the SID part is executed
adtrust.install(False, options, fstore, api)
if options.hidden_replica:
# Set services to hidden
service.hide_services(config.host_name)
else:
# Enable configured services
service.enable_services(config.host_name)
# update DNS SRV records. Although it's only really necessary in
# enabled-service case, also perform update in hidden replica case.
api.Command.dns_update_system_records()
if options.setup_adtrust:
dns_help = adtrust.generate_dns_service_records_help(api)
if dns_help:
for line in dns_help:
service.print_msg(line, sys.stdout)
ca_servers = find_providing_servers('CA', api.Backend.ldap2, api=api)
api.Backend.ldap2.disconnect()
# Everything installed properly, activate ipa service.
sstore.delete_state('installation', 'complete')
sstore.backup_state('installation', 'complete', True)
services.knownservices.ipa.enable()
# Print a warning if CA role is only installed on one server
if len(ca_servers) == 1:
msg = textwrap.dedent(u'''
WARNING: The CA service is only installed on one server ({}).
It is strongly recommended to install it on another server.
Run ipa-ca-install(1) on another master to accomplish this.
'''.format(ca_servers[0]))
print(msg, file=sys.stderr)
if options.setup_ca:
clean_up_hsm_nicknames(api)
def init(installer):
installer.unattended = not installer.interactive
if installer.servers:
installer.server = installer.servers[0]
else:
installer.server = None
installer.password = installer.host_password
installer._ccache = os.environ.get('KRB5CCNAME')
installer._top_dir = None
installer._config = None
installer._update_hosts_file = False
installer._dirsrv_pkcs12_file = None
installer._http_pkcs12_file = None
installer._pkinit_pkcs12_file = None
installer._dirsrv_pkcs12_info = None
installer._http_pkcs12_info = None
installer._pkinit_pkcs12_info = None
| 56,797
|
Python
|
.py
| 1,303
| 33.177283
| 79
| 0.617681
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,791
|
install.py
|
freeipa_freeipa/ipaserver/install/server/install.py
|
#
# Copyright (C) 2015 FreeIPA Contributors see COPYING for license
#
from __future__ import print_function, absolute_import
import errno
import logging
import os
import pickle
import re
import shutil
import sys
import time
import tempfile
import textwrap
import six
from ipaclient.install import timeconf
from ipaclient.install.client import (
check_ldap_conf, sync_time, restore_time_sync)
from ipapython.ipachangeconf import IPAChangeConf
from ipalib.install import certmonger, sysrestore
from ipapython import ipautil, version
from ipapython.ipautil import (
ipa_generate_password, run, user_input)
from ipapython import ipaldap
from ipapython.admintool import ScriptError
from ipaplatform import services
from ipaplatform.paths import paths
from ipaplatform.tasks import tasks
from ipalib import api, errors, x509
from ipalib.constants import DOMAIN_LEVEL_0, FQDN
from ipalib.facts import is_ipa_configured, is_ipa_client_configured
from ipalib.util import (
validate_domain_name,
no_matching_interface_for_ip_address_warning,
)
from ipalib.facts import IPA_MODULES
from ipaserver.install import (
adtrust, adtrustinstance, bindinstance, ca, dns, dsinstance,
httpinstance, installutils, kra, krbinstance,
otpdinstance, custodiainstance, replication, service,
sysupgrade, cainstance)
from ipaserver.install.installutils import (
BadHostError, get_server_ip_address,
load_pkcs12, read_password, verify_fqdn, update_hosts_file,
validate_mask)
if six.PY3:
unicode = str
NoneType = type(None)
logger = logging.getLogger(__name__)
SYSRESTORE_DIR_PATH = paths.SYSRESTORE
def validate_dm_password(password):
if len(password) < 8:
raise ValueError("Password must be at least 8 characters long")
if any(ord(c) < 0x20 for c in password):
raise ValueError("Password must not contain control characters")
if any(ord(c) >= 0x7F for c in password):
raise ValueError("Password must only contain ASCII characters")
# Disallow characters that pkisilent doesn't process properly:
bad_characters = '\\'
if any(c in bad_characters for c in password):
raise ValueError('Password must not contain these characters: %s' %
', '.join('"%s"' % c for c in bad_characters))
# TODO: Check https://fedorahosted.org/389/ticket/47849
# Actual behavior of setup-ds.pl is that it does not accept white
# space characters in password when called interactively but does when
# provided such password in INF file. But it ignores leading and trailing
# whitespaces in INF file.
# Disallow leading/trailing whaitespaces
if password.strip() != password:
raise ValueError('Password must not start or end with whitespace.')
def validate_admin_password(password):
if len(password) < 8:
raise ValueError("Password must be at least 8 characters long")
if any(ord(c) < 0x20 for c in password):
raise ValueError("Password must not contain control characters")
if any(ord(c) >= 0x7F for c in password):
raise ValueError("Password must only contain ASCII characters")
# Disallow characters that pkisilent doesn't process properly:
bad_characters = '\\'
if any(c in bad_characters for c in password):
raise ValueError('Password must not contain these characters: %s' %
', '.join('"%s"' % c for c in bad_characters))
def get_min_idstart(default_idstart=60000):
"""Get mininum idstart value from /etc/login.defs
"""
config = {}
# match decimal numbers
decimal_re = re.compile(r"^([A-Z][A-Z_]+)\s*([1-9]\d*)")
try:
with open('/etc/login.defs', 'r') as f:
for line in f:
mo = decimal_re.match(line)
if mo is not None:
config[mo.group(1)] = int(mo.group(2))
except OSError:
return default_idstart
idstart = max(config.get("UID_MAX", 0), config.get("GID_MAX", 0))
if idstart == 0:
idstart = default_idstart
return idstart
def read_cache(dm_password):
"""
Returns a dict of cached answers or empty dict if no cache file exists.
"""
if not os.path.isfile(paths.ROOT_IPA_CACHE):
return {}
top_dir = tempfile.mkdtemp("ipa")
fname = "%s/cache" % top_dir
try:
installutils.decrypt_file(paths.ROOT_IPA_CACHE,
fname,
dm_password,
top_dir)
except Exception:
shutil.rmtree(top_dir)
raise Exception("Decryption of answer cache in %s failed, please "
"check your password." % paths.ROOT_IPA_CACHE)
try:
with open(fname, 'rb') as f:
try:
optdict = pickle.load(f)
except Exception as e:
raise Exception("Parse error in %s: %s" %
(paths.ROOT_IPA_CACHE, str(e)))
except IOError as e:
raise Exception("Read error in %s: %s" %
(paths.ROOT_IPA_CACHE, str(e)))
finally:
shutil.rmtree(top_dir)
# These are the only ones that may be overridden
try:
del optdict['external_cert_files']
except KeyError:
pass
return optdict
def write_cache(options):
"""
Takes a dict as input and writes a cached file of answers
"""
top_dir = tempfile.mkdtemp("ipa")
fname = "%s/cache" % top_dir
try:
with open(fname, 'wb') as f:
pickle.dump(options, f)
installutils.encrypt_file(fname,
paths.ROOT_IPA_CACHE,
options['dm_password'],
top_dir)
except IOError as e:
raise Exception("Unable to cache command-line options %s" % str(e))
finally:
shutil.rmtree(top_dir)
def read_host_name(host_default):
"""
Prompt user to input FQDN. Does not verify it.
"""
print("Enter the fully qualified domain name of the computer")
print("on which you're setting up server software. Using the form")
print("<hostname>.<domainname>")
print("Example: master.example.com")
print("")
print("")
if host_default == "":
host_default = "master.example.com"
host_name = user_input("Server host name", host_default, allow_empty=False)
print("")
if host_name.endswith('.'):
host_name = host_name[:-1]
return host_name
def read_domain_name(domain_name, unattended):
print("The domain name has been determined based on the host name.")
print("")
if not unattended:
domain_name = str(user_input("Please confirm the domain name",
domain_name))
print("")
return domain_name
def read_realm_name(domain_name, unattended):
print("The kerberos protocol requires a Realm name to be defined.")
print("This is typically the domain name converted to uppercase.")
print("")
if unattended:
return domain_name.upper()
realm_name = str(user_input("Please provide a realm name",
domain_name.upper()))
upper_dom = realm_name.upper()
if upper_dom != realm_name:
print("An upper-case realm name is required.")
if not user_input("Do you want to use " + upper_dom +
" as realm name?", True):
raise ScriptError(
"An upper-case realm name is required. Unable to continue.")
else:
realm_name = upper_dom
print("")
return realm_name
def read_dm_password():
print("Certain directory server operations require an administrative user.")
print("This user is referred to as the Directory Manager and has full "
"access")
print("to the Directory for system management tasks and will be added to "
"the")
print("instance of directory server created for IPA.")
print("The password must be at least 8 characters long.")
print("")
# TODO: provide the option of generating a random password
dm_password = read_password("Directory Manager",
validator=validate_dm_password)
return dm_password
def read_admin_password():
print("The IPA server requires an administrative user, named 'admin'.")
print("This user is a regular system account used for IPA server "
"administration.")
print("")
# TODO: provide the option of generating a random password
admin_password = read_password("IPA admin",
validator=validate_admin_password)
return admin_password
def check_dirsrv(unattended):
(ds_unsecure, ds_secure) = dsinstance.check_ports()
if not ds_unsecure or not ds_secure:
msg = ("IPA requires ports 389 and 636 for the Directory Server.\n"
"These are currently in use:\n")
if not ds_unsecure:
msg += "\t389\n"
if not ds_secure:
msg += "\t636\n"
raise ScriptError(msg)
def common_cleanup(func):
def decorated(installer):
success = False
try:
func(installer)
success = True
except KeyboardInterrupt:
ds = installer._ds
print("\nCleaning up...")
if ds:
print("Removing configuration for %s instance" % ds.serverid)
ds.stop()
if ds.serverid:
try:
dsinstance.remove_ds_instance(ds.serverid)
except ipautil.CalledProcessError:
logger.error("Failed to remove DS instance. You "
"may need to remove instance data "
"manually")
raise ScriptError()
finally:
if not success and installer._installation_cleanup:
# Do a cautious clean up as we don't know what failed and
# what is the state of the environment
try:
installer._fstore.restore_file(paths.HOSTS)
except Exception:
pass
return decorated
def remove_master_from_managed_topology(api_instance, options):
try:
# we may force the removal
server_del_options = dict(
force=True,
ignore_topology_disconnect=options.ignore_topology_disconnect,
ignore_last_of_role=options.ignore_last_of_role
)
replication.run_server_del_as_cli(
api_instance, api_instance.env.host, **server_del_options)
except errors.ServerRemovalError as e:
raise ScriptError(str(e))
except Exception as e:
# if the master was already deleted we will just get a warning
logger.warning("Failed to delete master: %s", e)
def cleanup_dogtag_server_specific_data():
"""
There are data in Dogtag database related to specific servers.
Some of these data should be left alone, e.g. range assignments.
Some of these data should be cleaned up; that's what this
subroutine does.
"""
# remove ACME user
acme_uid = cainstance.CAInstance.acme_uid(api.env.host)
cainstance.CAInstance.delete_user(acme_uid)
@common_cleanup
def install_check(installer):
options = installer
dirsrv_pkcs12_file = installer._dirsrv_pkcs12_file
http_pkcs12_file = installer._http_pkcs12_file
pkinit_pkcs12_file = installer._pkinit_pkcs12_file
dirsrv_pkcs12_info = installer._dirsrv_pkcs12_info
http_pkcs12_info = installer._http_pkcs12_info
pkinit_pkcs12_info = installer._pkinit_pkcs12_info
external_cert_file = installer._external_cert_file
external_ca_file = installer._external_ca_file
http_ca_cert = installer._ca_cert
dirsrv_ca_cert = None
pkinit_ca_cert = None
if not options.skip_mem_check:
installutils.check_available_memory(ca=options.setup_ca)
tasks.check_ipv6_stack_enabled()
tasks.check_selinux_status()
check_ldap_conf()
mask_str = validate_mask()
if mask_str:
print("Unexpected system mask: %s, expected 0022" % mask_str)
if installer.interactive:
if not user_input("Do you want to continue anyway?", True):
raise ScriptError(
"Unexpected system mask: %s" % mask_str)
else:
raise ScriptError("Unexpected system mask: %s" % mask_str)
if options.master_password:
msg = ("WARNING:\noption '-P/--master-password' is deprecated. "
"KDC master password of sufficient strength is autogenerated "
"during IPA server installation and should not be set "
"manually.")
print(textwrap.fill(msg, width=79, replace_whitespace=False))
installer._installation_cleanup = True
print("\nThe log file for this installation can be found in "
"/var/log/ipaserver-install.log")
if (not options.external_ca and not options.external_cert_files and
is_ipa_configured()):
installer._installation_cleanup = False
raise ScriptError(
"IPA server is already configured on this system.\n"
"If you want to reinstall the IPA server, please uninstall "
"it first using 'ipa-server-install --uninstall'.")
if is_ipa_client_configured(on_master=True):
installer._installation_cleanup = False
raise ScriptError(
"IPA client is already configured on this system.\n"
"Please uninstall it before configuring the IPA server, "
"using 'ipa-client-install --uninstall'")
fstore = sysrestore.FileStore(SYSRESTORE_DIR_PATH)
sstore = sysrestore.StateFile(SYSRESTORE_DIR_PATH)
# This will override any settings passed in on the cmdline
if os.path.isfile(paths.ROOT_IPA_CACHE):
if options.dm_password is not None:
dm_password = options.dm_password
else:
dm_password = read_password("Directory Manager", confirm=False)
if dm_password is None:
raise ScriptError("Directory Manager password required")
try:
cache_vars = read_cache(dm_password)
options.__dict__.update(cache_vars)
if cache_vars.get('external_ca', False):
options.external_ca = False
options.interactive = False
except Exception as e:
raise ScriptError("Cannot process the cache file: %s" % str(e))
# We only set up the CA if the PKCS#12 options are not given.
if options.dirsrv_cert_files:
setup_ca = False
else:
setup_ca = True
options.setup_ca = setup_ca
if not setup_ca and options.ca_subject:
raise ScriptError(
"--ca-subject cannot be used with CA-less installation")
if not setup_ca and options.subject_base:
raise ScriptError(
"--subject-base cannot be used with CA-less installation")
if not setup_ca and options.setup_kra:
raise ScriptError(
"--setup-kra cannot be used with CA-less installation")
if setup_ca:
if any(
(
options.token_name is not None,
options.token_library_path is not None,
options.token_password is not None,
options.token_password_file is not None,
)
):
if any(
(
options.token_name is None,
options.token_library_path is None)
):
raise ScriptError(
"Both token name and library path are required."
)
else:
if any(
(
options.token_name is not None,
options.token_library_path is not None,
options.token_password is not None,
options.token_password_file is not None,
)
):
raise ScriptError(
"HSM token options are not valid with CA-less installs."
)
print("======================================="
"=======================================")
print("This program will set up the IPA Server.")
print("Version {}".format(version.VERSION))
print("")
print("This includes:")
if setup_ca:
print(" * Configure a stand-alone CA (dogtag) for certificate "
"management")
if not options.no_ntp:
print(" * Configure the NTP client (chronyd)")
print(" * Create and configure an instance of Directory Server")
print(" * Create and configure a Kerberos Key Distribution Center (KDC)")
print(" * Configure Apache (httpd)")
if options.setup_kra:
print(" * Configure KRA (dogtag) for secret management")
if options.setup_dns:
print(" * Configure DNS (bind)")
print(" * Configure SID generation")
if options.setup_adtrust:
print(" * Configure Samba (smb) and winbind for managing AD trusts")
if not options.no_pkinit:
print(" * Configure the KDC to enable PKINIT")
if options.no_ntp:
print("")
print("Excluded by options:")
print(" * Configure the NTP client (chronyd)")
if installer.interactive:
print("")
print("To accept the default shown in brackets, press the Enter key.")
print("")
if not options.external_cert_files:
# Make sure the 389-ds ports are available
check_dirsrv(not installer.interactive)
if not options.no_ntp:
try:
timeconf.check_timedate_services()
except timeconf.NTPConflictingService as e:
print(
"WARNING: conflicting time&date synchronization service "
"'{}' will be disabled in favor of chronyd\n".format(
e.conflicting_service
)
)
except timeconf.NTPConfigurationError:
pass
if not options.setup_dns and installer.interactive:
if ipautil.user_input("Do you want to configure integrated DNS "
"(BIND)?", False):
dns.package_check(ScriptError)
options.setup_dns = True
print("")
# check bind packages are installed
if options.setup_dns:
# Don't require an external DNS to say who we are if we are
# setting up a local DNS server.
options.no_host_dns = True
# check the hostname is correctly configured, it must be as the kldap
# utilities just use the hostname as returned by getaddrinfo to set
# up some of the standard entries
if options.host_name:
host_default = options.host_name
else:
host_default = FQDN
if installer.interactive and not options.host_name:
host_name = read_host_name(host_default)
else:
host_name = host_default
try:
verify_fqdn(host_name, options.no_host_dns)
except BadHostError as e:
raise ScriptError(e)
host_name = host_name.lower()
logger.debug("will use host_name: %s\n", host_name)
if not options.domain_name:
domain_name = read_domain_name(host_name[host_name.find(".")+1:],
not installer.interactive)
logger.debug("read domain_name: %s\n", domain_name)
try:
validate_domain_name(domain_name)
except ValueError as e:
raise ScriptError("Invalid domain name: %s" % unicode(e))
else:
domain_name = options.domain_name
domain_name = domain_name.lower()
if host_name.lower() == domain_name:
raise ScriptError("hostname cannot be the same as the domain name")
if not options.realm_name:
realm_name = read_realm_name(domain_name, not installer.interactive)
logger.debug("read realm_name: %s\n", realm_name)
try:
validate_domain_name(realm_name, entity="realm")
except ValueError as e:
raise ScriptError("Invalid realm name: {}".format(unicode(e)))
else:
realm_name = options.realm_name.upper()
if not options.subject_base:
options.subject_base = installutils.default_subject_base(realm_name)
if not options.ca_subject:
options.ca_subject = \
installutils.default_ca_subject_dn(options.subject_base)
if options.http_cert_files:
if options.http_pin is None:
options.http_pin = installutils.read_password(
"Enter Apache Server private key unlock",
confirm=False, validate=False, retry=False)
if options.http_pin is None:
raise ScriptError(
"Apache Server private key unlock password required")
http_pkcs12_file, http_pin, http_ca_cert = load_pkcs12(
cert_files=options.http_cert_files,
key_password=options.http_pin,
key_nickname=options.http_cert_name,
ca_cert_files=options.ca_cert_files,
host_name=host_name)
http_pkcs12_info = (http_pkcs12_file.name, http_pin)
if options.dirsrv_cert_files:
if options.dirsrv_pin is None:
options.dirsrv_pin = read_password(
"Enter Directory Server private key unlock",
confirm=False, validate=False, retry=False)
if options.dirsrv_pin is None:
raise ScriptError(
"Directory Server private key unlock password required")
dirsrv_pkcs12_file, dirsrv_pin, dirsrv_ca_cert = load_pkcs12(
cert_files=options.dirsrv_cert_files,
key_password=options.dirsrv_pin,
key_nickname=options.dirsrv_cert_name,
ca_cert_files=options.ca_cert_files,
host_name=host_name)
dirsrv_pkcs12_info = (dirsrv_pkcs12_file.name, dirsrv_pin)
if options.pkinit_cert_files:
if options.pkinit_pin is None:
options.pkinit_pin = read_password(
"Enter Kerberos KDC private key unlock",
confirm=False, validate=False, retry=False)
if options.pkinit_pin is None:
raise ScriptError(
"Kerberos KDC private key unlock password required")
pkinit_pkcs12_file, pkinit_pin, pkinit_ca_cert = load_pkcs12(
cert_files=options.pkinit_cert_files,
key_password=options.pkinit_pin,
key_nickname=options.pkinit_cert_name,
ca_cert_files=options.ca_cert_files,
realm_name=realm_name)
pkinit_pkcs12_info = (pkinit_pkcs12_file.name, pkinit_pin)
if (options.http_cert_files and options.dirsrv_cert_files and
http_ca_cert != dirsrv_ca_cert):
raise ScriptError(
"Apache Server SSL certificate and Directory Server SSL "
"certificate are not signed by the same CA certificate")
if (options.http_cert_files and
options.pkinit_cert_files and
http_ca_cert != pkinit_ca_cert):
raise ScriptError(
"Apache Server SSL certificate and PKINIT KDC "
"certificate are not signed by the same CA certificate")
if not options.dm_password:
dm_password = read_dm_password()
if dm_password is None:
raise ScriptError("Directory Manager password required")
else:
dm_password = options.dm_password
if not options.master_password:
master_password = ipa_generate_password()
else:
master_password = options.master_password
if not options.admin_password:
admin_password = read_admin_password()
if admin_password is None:
raise ScriptError("IPA admin password required")
else:
admin_password = options.admin_password
if all(
(
options.token_password is None,
options.token_password_file is None,
options.token_name is not None
)
):
if options.unattended:
raise ScriptError("HSM token password required")
token_password = read_password(
f"HSM token '{options.token_name}'" , confirm=False)
if token_password is None:
raise ScriptError("HSM token password required")
else:
options.token_password = token_password
# Configuration for ipalib, we will bootstrap and finalize later, after
# we are sure we have the configuration file ready.
cfg = dict(
context='installer',
confdir=paths.ETC_IPA,
in_server=True,
# make sure host name specified by user is used instead of default
host=host_name,
)
if setup_ca:
# we have an IPA-integrated CA
cfg['ca_host'] = host_name
# Create the management framework config file and finalize api
target_fname = paths.IPA_DEFAULT_CONF
ipaconf = IPAChangeConf("IPA Server Install")
ipaconf.setOptionAssignment(" = ")
ipaconf.setSectionNameDelimiters(("[", "]"))
xmlrpc_uri = 'https://{0}/ipa/xml'.format(
ipautil.format_netloc(host_name))
ldapi_uri = ipaldap.realm_to_ldapi_uri(realm_name)
# [global] section
gopts = [
ipaconf.setOption('host', host_name),
ipaconf.setOption('basedn', ipautil.realm_to_suffix(realm_name)),
ipaconf.setOption('realm', realm_name),
ipaconf.setOption('domain', domain_name),
ipaconf.setOption('xmlrpc_uri', xmlrpc_uri),
ipaconf.setOption('ldap_uri', ldapi_uri),
ipaconf.setOption('mode', 'production')
]
if setup_ca:
gopts.extend([
ipaconf.setOption('enable_ra', 'True'),
ipaconf.setOption('ra_plugin', 'dogtag'),
ipaconf.setOption('dogtag_version', '10')
])
else:
gopts.extend([
ipaconf.setOption('enable_ra', 'False'),
ipaconf.setOption('ra_plugin', 'None')
])
opts = [
ipaconf.setSection('global', gopts),
{'name': 'empty', 'type': 'empty'}
]
ipaconf.newConf(target_fname, opts)
# Must be readable for everyone
os.chmod(target_fname, 0o644)
api.bootstrap(**cfg)
api.finalize()
if setup_ca:
ca.install_check(False, None, options)
if options.setup_kra:
kra.install_check(api, None, options)
if options.setup_dns:
dns.install_check(False, api, False, options, host_name)
ip_addresses = dns.ip_addresses
else:
ip_addresses = get_server_ip_address(host_name,
not installer.interactive, False,
options.ip_addresses)
# check addresses here, dns module is doing own check
no_matching_interface_for_ip_address_warning(ip_addresses)
instance_name = "-".join(realm_name.split("."))
dirsrv = services.knownservices.dirsrv
if (options.external_cert_files
and dirsrv.is_installed(instance_name)
and not dirsrv.is_running(instance_name)):
logger.debug('Starting Directory Server')
services.knownservices.dirsrv.start(instance_name)
# Always call adtrust.install_check
# if --setup-adtrust is not specified, only the SID part is executed
adtrust.install_check(False, options, api)
# installer needs to update hosts file when DNS subsystem will be
# installed or custom addresses are used
if options.ip_addresses or options.setup_dns:
installer._update_hosts_file = True
if not options.no_ntp and not options.unattended and not (
options.ntp_servers or options.ntp_pool):
options.ntp_servers, options.ntp_pool = timeconf.get_time_source()
print()
print("The IPA Master Server will be configured with:")
print("Hostname: %s" % host_name)
print("IP address(es): %s" % ", ".join(str(ip) for ip in ip_addresses))
print("Domain name: %s" % domain_name)
print("Realm name: %s" % realm_name)
print()
if setup_ca:
ca.print_ca_configuration(options)
print()
if options.setup_dns:
print("BIND DNS server will be configured to serve IPA domain with:")
print("Forwarders: %s" % (
"No forwarders" if not options.forwarders
else ", ".join([str(ip) for ip in options.forwarders])
))
print('Forward policy: %s' % options.forward_policy)
print("Reverse zone(s): %s" % (
"No reverse zone" if options.no_reverse or not dns.reverse_zones
else ", ".join(str(rz) for rz in dns.reverse_zones)
))
print()
if not options.setup_adtrust:
# If domain name and realm does not match, IPA server will not be able
# to establish trust with Active Directory. Print big fat warning.
realm_not_matching_domain = (domain_name.upper() != realm_name)
if realm_not_matching_domain:
print("WARNING: Realm name does not match the domain name.\n"
"You will not be able to establish trusts with Active "
"Directory unless\nthe realm name of the IPA server matches "
"its domain name.\n\n")
if options.ntp_servers or options.ntp_pool:
if options.ntp_servers:
for server in options.ntp_servers:
print("NTP server:\t{}".format(server))
if options.ntp_pool:
print("NTP pool:\t{}".format(options.ntp_pool))
if installer.interactive and not user_input(
"Continue to configure the system with these values?", False):
raise ScriptError("Installation aborted")
options.realm_name = realm_name
options.domain_name = domain_name
options.dm_password = dm_password
options.master_password = master_password
options.admin_password = admin_password
options._host_name_overridden = bool(options.host_name)
options.host_name = host_name
options.ip_addresses = ip_addresses
installer._fstore = fstore
installer._sstore = sstore
installer._dirsrv_pkcs12_file = dirsrv_pkcs12_file
installer._http_pkcs12_file = http_pkcs12_file
installer._pkinit_pkcs12_file = pkinit_pkcs12_file
installer._dirsrv_pkcs12_info = dirsrv_pkcs12_info
installer._http_pkcs12_info = http_pkcs12_info
installer._pkinit_pkcs12_info = pkinit_pkcs12_info
installer._external_cert_file = external_cert_file
installer._external_ca_file = external_ca_file
installer._ca_cert = http_ca_cert
@common_cleanup
def install(installer):
options = installer
fstore = installer._fstore
sstore = installer._sstore
dirsrv_pkcs12_info = installer._dirsrv_pkcs12_info
http_pkcs12_info = installer._http_pkcs12_info
pkinit_pkcs12_info = installer._pkinit_pkcs12_info
http_ca_cert = installer._ca_cert
realm_name = options.realm_name
domain_name = options.domain_name
dm_password = options.dm_password
master_password = options.master_password
admin_password = options.admin_password
host_name = options.host_name
ip_addresses = options.ip_addresses
setup_ca = options.setup_ca
# Installation has started. No IPA sysrestore items are restored in case of
# failure to enable root cause investigation
installer._installation_cleanup = False
# Be clear that the installation process is beginning but not done
sstore.backup_state('installation', 'complete', False)
if installer.interactive:
print("")
print("The following operations may take some minutes to complete.")
print("Please wait until the prompt is returned.")
print("")
# set hostname (transient and static) if user instructed us to do so
if options._host_name_overridden:
tasks.backup_hostname(fstore, sstore)
tasks.set_hostname(host_name)
if installer._update_hosts_file:
update_hosts_file(ip_addresses, host_name, fstore)
if tasks.configure_pkcs11_modules(fstore):
print("Disabled p11-kit-proxy")
# Create a directory server instance
if not options.external_cert_files:
# We have to sync time before certificate handling on master.
# As chrony configuration is moved from client here, unconfiguration of
# chrony will be handled here in uninstall() method as well by invoking
# the ipa-server-install --uninstall
if not options.no_ntp and not sync_time(
options.ntp_servers, options.ntp_pool, fstore, sstore):
print("Warning: IPA was unable to sync time with chrony!")
print(" Time synchronization is required for IPA "
"to work correctly")
if options.dirsrv_cert_files:
ds = dsinstance.DsInstance(fstore=fstore,
domainlevel=options.domainlevel,
config_ldif=options.dirsrv_config_file)
installer._ds = ds
ds.create_instance(realm_name, host_name, domain_name,
dm_password, dirsrv_pkcs12_info,
idstart=options.idstart, idmax=options.idmax,
subject_base=options.subject_base,
ca_subject=options.ca_subject,
hbac_allow=not options.no_hbac_allow,
setup_pkinit=not options.no_pkinit)
else:
ds = dsinstance.DsInstance(fstore=fstore,
domainlevel=options.domainlevel,
config_ldif=options.dirsrv_config_file)
installer._ds = ds
ds.create_instance(realm_name, host_name, domain_name,
dm_password,
idstart=options.idstart, idmax=options.idmax,
subject_base=options.subject_base,
ca_subject=options.ca_subject,
hbac_allow=not options.no_hbac_allow,
setup_pkinit=not options.no_pkinit)
else:
api.Backend.ldap2.connect()
ds = dsinstance.DsInstance(fstore=fstore,
domainlevel=options.domainlevel)
installer._ds = ds
ds.init_info(
realm_name, host_name, domain_name, dm_password,
options.subject_base, options.ca_subject, 1101, 1100, None,
setup_pkinit=not options.no_pkinit)
krb = krbinstance.KrbInstance(fstore)
if not options.external_cert_files:
krb.create_instance(realm_name, host_name, domain_name,
dm_password, master_password,
setup_pkinit=not options.no_pkinit,
pkcs12_info=pkinit_pkcs12_info,
subject_base=options.subject_base)
else:
krb.init_info(realm_name, host_name,
setup_pkinit=not options.no_pkinit,
subject_base=options.subject_base)
custodia = custodiainstance.get_custodia_instance(
options, custodiainstance.CustodiaModes.FIRST_MASTER)
custodia.create_instance()
if setup_ca:
if not options.external_cert_files and options.external_ca:
# stage 1 of external CA installation
options.realm_name = realm_name
options.domain_name = domain_name
options.master_password = master_password
options.dm_password = dm_password
options.admin_password = admin_password
options.host_name = host_name
options.reverse_zones = dns.reverse_zones
cache_vars = {n: options.__dict__[n] for o, n in installer.knobs()
if n in options.__dict__}
write_cache(cache_vars)
ca.install_step_0(False, None, options, custodia=custodia)
else:
# /etc/ipa/ca.crt is created as a side-effect of
# dsinstance::enable_ssl() via export_ca_cert()
if not options.no_pkinit:
x509.write_certificate(http_ca_cert, paths.KDC_CA_BUNDLE_PEM)
else:
with open(paths.KDC_CA_BUNDLE_PEM, 'w'):
pass
os.chmod(paths.KDC_CA_BUNDLE_PEM, 0o444)
x509.write_certificate(http_ca_cert, paths.CA_BUNDLE_PEM)
os.chmod(paths.CA_BUNDLE_PEM, 0o444)
# we now need to enable ssl on the ds
ds.enable_ssl()
if setup_ca:
ca.install_step_1(False, None, options, custodia=custodia)
otpd = otpdinstance.OtpdInstance()
otpd.create_instance('OTPD', host_name,
ipautil.realm_to_suffix(realm_name))
# Create a HTTP instance
http = httpinstance.HTTPInstance(fstore)
if options.http_cert_files:
http.create_instance(
realm_name, host_name, domain_name, dm_password,
pkcs12_info=http_pkcs12_info, subject_base=options.subject_base,
auto_redirect=not options.no_ui_redirect,
ca_is_configured=setup_ca)
else:
http.create_instance(
realm_name, host_name, domain_name, dm_password,
subject_base=options.subject_base,
auto_redirect=not options.no_ui_redirect,
ca_is_configured=setup_ca)
ca.set_subject_base_in_config(options.subject_base)
# configure PKINIT now that all required services are in place
krb.enable_ssl()
# Apply any LDAP updates. Needs to be done after the configuration file
# is created. DS is restarted in the process.
service.print_msg("Applying LDAP updates")
ds.apply_updates()
# Restart krb after configurations have been changed
service.print_msg("Restarting the KDC")
krb.restart()
if options.setup_kra:
kra.install(api, None, options, custodia=custodia)
if options.setup_dns:
dns.install(False, False, options)
# Always call adtrust installer to configure SID generation
# if --setup-adtrust is not specified, only the SID part is executed
adtrust.install(False, options, fstore, api)
# Set the admin user kerberos password
ds.change_admin_password(admin_password)
# Call client install script
service.print_msg("Configuring client side components")
try:
args = [paths.IPA_CLIENT_INSTALL, "--on-master", "--unattended",
"--domain", domain_name, "--server", host_name,
"--realm", realm_name, "--hostname", host_name, "--no-ntp"]
if options.no_dns_sshfp:
args.append("--no-dns-sshfp")
if options.ssh_trust_dns:
args.append("--ssh-trust-dns")
if options.no_ssh:
args.append("--no-ssh")
if options.no_sshd:
args.append("--no-sshd")
if options.mkhomedir:
args.append("--mkhomedir")
if options.subid:
args.append("--subid")
start = time.time()
run(args, redirect_output=True)
dur = time.time() - start
logger.debug("Client install duration: %0.3f", dur,
extra={'timing': ('clientinstall', None, None, dur)})
print()
except Exception:
raise ScriptError("Configuration of client side components failed!")
# Enable configured services and update DNS SRV records
service.enable_services(host_name)
api.Command.dns_update_system_records()
if options.setup_adtrust:
dns_help = adtrust.generate_dns_service_records_help(api)
if dns_help:
for line in dns_help:
service.print_msg(line, sys.stdout)
if not options.setup_dns:
# After DNS and AD trust are configured and services are
# enabled, create a dummy instance to dump DNS configuration.
bind = bindinstance.BindInstance(fstore)
bind.create_file_with_system_records()
# Everything installed properly, activate ipa service.
sstore.delete_state('installation', 'complete')
sstore.backup_state('installation', 'complete', True)
services.knownservices.ipa.enable()
print("======================================="
"=======================================")
print("Setup complete")
print("")
print("Next steps:")
print("\t1. You must make sure these network ports are open:")
print("\t\tTCP Ports:")
print("\t\t * 80, 443: HTTP/HTTPS")
print("\t\t * 389, 636: LDAP/LDAPS")
print("\t\t * 88, 464: kerberos")
if options.setup_dns:
print("\t\t * 53: bind")
print("\t\tUDP Ports:")
print("\t\t * 88, 464: kerberos")
if options.setup_dns:
print("\t\t * 53: bind")
if not options.no_ntp:
print("\t\t * 123: ntp")
print("")
print("\t2. You can now obtain a kerberos ticket using the command: "
"'kinit admin'")
print("\t This ticket will allow you to use the IPA tools (e.g., ipa "
"user-add)")
print("\t and the web user interface.")
if not services.knownservices.chronyd.is_running():
print("\t3. Kerberos requires time synchronization between clients")
print("\t and servers for correct operation. You should consider "
"enabling chronyd.")
print("")
if setup_ca and not options.token_name:
print(("Be sure to back up the CA certificates stored in " +
paths.CACERT_P12))
print("These files are required to create replicas. The password for "
"these")
print("files is the Directory Manager password")
if os.path.isfile(paths.ROOT_IPA_CACHE):
os.remove(paths.ROOT_IPA_CACHE)
@common_cleanup
def uninstall_check(installer):
options = installer
tasks.check_selinux_status()
installer._installation_cleanup = False
if not is_ipa_configured():
print("WARNING:\nIPA server is not configured on this system. "
"If you want to install the\nIPA server, please install "
"it using 'ipa-server-install'.")
fstore = sysrestore.FileStore(SYSRESTORE_DIR_PATH)
sstore = sysrestore.StateFile(SYSRESTORE_DIR_PATH)
# Configuration for ipalib, we will bootstrap and finalize later, after
# we are sure we have the configuration file ready.
cfg = dict(
context='installer',
confdir=paths.ETC_IPA,
in_server=True,
)
# We will need at least api.env, finalize api now. This system is
# already installed, so the configuration file is there.
api.bootstrap(**cfg)
api.finalize()
if installer.interactive:
print("\nThis is a NON REVERSIBLE operation and will delete all data "
"and configuration!\nIt is highly recommended to take a backup of "
"existing data and configuration using ipa-backup utility "
"before proceeding.\n")
if not user_input("Are you sure you want to continue with the "
"uninstall procedure?", False):
raise ScriptError("Aborting uninstall operation.")
kra.uninstall_check(options)
ca.uninstall_check(options)
try:
api.Backend.ldap2.connect(autobind=True)
domain_level = dsinstance.get_domain_level(api)
except Exception:
msg = ("\nWARNING: Failed to connect to Directory Server to find "
"information about replication agreements. Uninstallation "
"will continue despite the possible existing replication "
"agreements.\n\n"
"If this server is the last instance of CA, KRA, or DNSSEC "
"master, uninstallation may result in data loss.\n\n"
)
print(textwrap.fill(msg, width=80, replace_whitespace=False))
if (installer.interactive and not user_input(
"Are you sure you want to continue with the uninstall "
"procedure?", False)):
raise ScriptError("Aborting uninstall operation.")
else:
dns.uninstall_check(options)
ca.uninstall_crl_check(options)
cleanup_dogtag_server_specific_data()
if domain_level == DOMAIN_LEVEL_0:
rm = replication.ReplicationManager(
realm=api.env.realm,
hostname=api.env.host,
dirman_passwd=None,
conn=api.Backend.ldap2
)
agreements = rm.find_ipa_replication_agreements()
if agreements:
other_masters = [a.get('cn')[0][4:] for a in agreements]
msg = (
"\nReplication agreements with the following IPA masters "
"found: %s. Removing any replication agreements before "
"uninstalling the server is strongly recommended. You can "
"remove replication agreements by running the following "
"command on any other IPA master:\n" % ", ".join(
other_masters)
)
cmd = "$ ipa-replica-manage del %s\n" % api.env.host
print(textwrap.fill(msg, width=80, replace_whitespace=False))
print(cmd)
if (installer.interactive and
not user_input("Are you sure you want to continue with"
" the uninstall procedure?", False)):
raise ScriptError("Aborting uninstall operation.")
else:
remove_master_from_managed_topology(api, options)
api.Backend.ldap2.disconnect()
installer._fstore = fstore
installer._sstore = sstore
@common_cleanup
def uninstall(installer):
fstore = installer._fstore
sstore = installer._sstore
rv = 0
# Uninstall the KRA prior to shutting the services down so it
# can un-register with the CA.
kra.uninstall()
# Uninstall the CA priori to shutting the services down so it
# can unregister from the security domain
ca.uninstall()
print("Shutting down all IPA services")
try:
services.knownservices.ipa.stop()
except Exception:
# Fallback to direct ipactl stop only if system command fails
try:
run([paths.IPACTL, "stop"], raiseonerr=False)
except Exception:
pass
restore_time_sync(sstore, fstore)
dns.uninstall()
httpinstance.HTTPInstance(fstore).uninstall()
krbinstance.KrbInstance(fstore).uninstall()
dsinstance.DsInstance(fstore=fstore).uninstall()
adtrustinstance.ADTRUSTInstance(fstore).uninstall()
# realm isn't used, but IPAKEMKeys parses /etc/ipa/default.conf
# otherwise, see https://pagure.io/freeipa/issue/7474 .
custodiainstance.CustodiaInstance(realm='REALM.INVALID').uninstall()
otpdinstance.OtpdInstance().uninstall()
tasks.restore_hostname(fstore, sstore)
tasks.restore_pkcs11_modules(fstore)
fstore.restore_all_files()
try:
os.remove(paths.ROOT_IPA_CACHE)
except Exception:
pass
try:
os.remove(paths.ROOT_IPA_CSR)
except Exception:
pass
# ipa-client-install removes /etc/ipa/default.conf
sstore._load()
timeconf.restore_forced_timeservices(sstore)
# Clean up group_exists (unused since IPA 2.2, not being set since 4.1)
sstore.restore_state("install", "group_exists")
services.knownservices.ipa.disable()
# remove upgrade state file
sysupgrade.remove_upgrade_file()
if fstore.has_files():
logger.error('Some files have not been restored, see '
'%s/sysrestore.index', SYSRESTORE_DIR_PATH)
sstore.delete_state('installation', 'complete')
has_state = False
for module in IPA_MODULES: # from installutils
if sstore.has_state(module):
logger.error('Some installation state for %s has not been '
'restored, see %s/sysrestore.state',
module, SYSRESTORE_DIR_PATH)
has_state = True
rv = 1
if has_state:
logger.error('Some installation state has not been restored.\n'
'This may cause re-installation to fail.\n'
'It should be safe to remove %s/sysrestore.state '
'but it may\n'
'mean your system hasn\'t be restored to its '
'pre-installation state.', SYSRESTORE_DIR_PATH)
else:
# sysrestore.state has no state left, remove it
sysrestore = os.path.join(SYSRESTORE_DIR_PATH, 'sysrestore.state')
ipautil.remove_file(sysrestore)
# Note that this name will be wrong after the first uninstall.
dirname = dsinstance.config_dirname(
ipaldap.realm_to_serverid(api.env.realm))
dirs = [dirname, paths.PKI_TOMCAT_ALIAS_DIR, paths.HTTPD_ALIAS_DIR]
ids = certmonger.check_state(dirs)
if ids:
logger.error('Some certificates may still be tracked by '
'certmonger.\n'
'This will cause re-installation to fail.\n'
'Start the certmonger service and list the '
'certificates being tracked\n'
' # getcert list\n'
'These may be untracked by executing\n'
' # getcert stop-tracking -i <request_id>\n'
'for each id in: %s', ', '.join(ids))
# Remove the cert renewal lock file
try:
os.remove(paths.IPA_RENEWAL_LOCK)
except OSError as e:
if e.errno != errno.ENOENT:
logger.warning("Failed to remove file %s: %s",
paths.IPA_RENEWAL_LOCK, e)
ipautil.remove_file(paths.SVC_LIST_FILE)
ipautil.rmtree('/root/.cache/ipa')
print("Removing IPA client configuration")
try:
result = run([paths.IPA_CLIENT_INSTALL, "--on-master",
"--unattended", "--uninstall"],
raiseonerr=False, redirect_output=True)
if result.returncode not in [0, 2]:
raise RuntimeError("Failed to configure the client")
except Exception:
rv = 1
print("Uninstall of client side components failed!")
sys.exit(rv)
def init(installer):
installer.unattended = not installer.interactive
installer.domainlevel = installer.domain_level
installer._installation_cleanup = True
installer._ds = None
installer._dirsrv_pkcs12_file = None
installer._http_pkcs12_file = None
installer._pkinit_pkcs12_file = None
installer._dirsrv_pkcs12_info = None
installer._http_pkcs12_info = None
installer._pkinit_pkcs12_info = None
installer._external_cert_file = None
installer._external_ca_file = None
installer._ca_cert = None
installer._update_hosts_file = False
| 50,942
|
Python
|
.py
| 1,163
| 34.05589
| 81
| 0.626177
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,792
|
update_fix_duplicate_cacrt_in_ldap.py
|
freeipa_freeipa/ipaserver/install/plugins/update_fix_duplicate_cacrt_in_ldap.py
|
# Authors:
# Florence Blanc-Renaud <flo@redhat.com>
#
# Copyright (C) 2017 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
from ipalib import Registry, errors
from ipalib import Updater
from ipalib.install import certstore
from ipapython.dn import DN
from ipapython.certdb import get_ca_nickname
logger = logging.getLogger(__name__)
register = Registry()
@register()
class update_fix_duplicate_cacrt_in_ldap(Updater):
"""
When multiple entries exist for IPA CA cert in ldap, remove the duplicate
After this plugin has removed duplicate entries, DS needs to be
restarted. This ensures that the attribute uniqueness plugin is working
and prevents other plugins from adding duplicates.
"""
def execute(self, **options):
# If CA is disabled, no need to check for duplicates of IPA CA
ca_enabled = self.api.Command.ca_is_enabled()['result']
if not ca_enabled:
return False, []
# Look for the IPA CA cert subject
ldap = self.api.Backend.ldap2
cacert_subject = certstore.get_ca_subject(
ldap,
self.api.env.container_ca,
self.api.env.basedn)
cacert_nick = get_ca_nickname(self.api.env.realm)
# Find if there are other certificates with the same subject
# They are duplicates resulting of BZ 1480102
base_dn = DN(('cn', 'certificates'), ('cn', 'ipa'), ('cn', 'etc'),
self.api.env.basedn)
filter = ldap.combine_filters(
[
# all certificates with CA cert subject
ldap.make_filter({'ipaCertSubject': cacert_subject}),
# except the default certificate
ldap.make_filter({'cn': cacert_nick}, rules=ldap.MATCH_NONE),
],
rules=ldap.MATCH_ALL
)
try:
result, _truncated = ldap.find_entries(
base_dn=base_dn,
filter=filter,
attrs_list=[])
except errors.NotFound:
# No duplicate, we're good
logger.debug("No duplicates for IPA CA in LDAP")
return False, []
logger.debug("Found %d entrie(s) for IPA CA in LDAP", len(result))
for entry in result:
# Remove the duplicate
try:
ldap.delete_entry(entry)
logger.debug("Removed the duplicate %s", entry.dn)
except Exception as e:
logger.warning("Failed to remove the duplicate %s: %s",
entry.dn, e)
return True, []
| 3,260
|
Python
|
.py
| 78
| 33.666667
| 77
| 0.642902
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,793
|
update_referint.py
|
freeipa_freeipa/ipaserver/install/plugins/update_referint.py
|
#
# Copyright (C) 2014 FreeIPA Contributors see COPYING for license
#
import logging
from ipalib import Registry, errors
from ipalib import Updater
from ipapython.dn import DN
logger = logging.getLogger(__name__)
register = Registry()
@register()
class update_referint(Updater):
"""
Update referential integrity configuration to new style
http://directory.fedoraproject.org/docs/389ds/design/ri-plugin-configuration.html
old attr -> new attr
nsslapd-pluginArg0 -> referint-update-delay
nsslapd-pluginArg1 -> referint-logfile
nsslapd-pluginArg2 -> referint-logchanges
nsslapd-pluginArg3..N -> referint-membership-attr [3..N]
Old and new style cannot be mixed, all nslapd-pluginArg* attrs have to be removed
"""
referint_dn = DN(('cn', 'referential integrity postoperation'),
('cn', 'plugins'), ('cn', 'config'))
def execute(self, **options):
logger.debug("Upgrading referential integrity plugin configuration")
ldap = self.api.Backend.ldap2
try:
entry = ldap.get_entry(self.referint_dn)
except errors.NotFound:
logger.error("Referential integrity configuration not found")
return False, []
referint_membership_attrs = []
logger.debug("Initial value: %s", repr(entry))
# nsslapd-pluginArg0 -> referint-update-delay
update_delay = entry.get('nsslapd-pluginArg0')
if update_delay:
logger.debug("add: referint-update-delay: %s", update_delay)
entry['referint-update-delay'] = update_delay
entry['nsslapd-pluginArg0'] = None
else:
logger.debug("Plugin already uses new style, skipping")
return False, []
# nsslapd-pluginArg1 -> referint-logfile
logfile = entry.get('nsslapd-pluginArg1')
if logfile:
logger.debug("add: referint-logfile: %s", logfile)
entry['referint-logfile'] = logfile
entry['nsslapd-pluginArg1'] = None
# nsslapd-pluginArg2 -> referint-logchanges
logchanges = entry.get('nsslapd-pluginArg2')
if logchanges:
logger.debug("add: referint-logchanges: %s", logchanges)
entry['referint-logchanges'] = logchanges
entry['nsslapd-pluginArg2'] = None
# nsslapd-pluginArg3..N -> referint-membership-attr [3..N]
for key in list(entry):
if key.lower().startswith('nsslapd-pluginarg'):
arg_val = entry.single_value[key]
if arg_val:
referint_membership_attrs.append(arg_val)
entry[key] = None
if referint_membership_attrs:
# entry['referint-membership-attr'] is None, plugin doesn't allow
# mixing old and new style
entry['referint-membership-attr'] = referint_membership_attrs
logger.debug("Final value: %s", repr(entry))
try:
ldap.update_entry(entry)
except errors.EmptyModlist:
logger.debug("No modifications required")
return False, []
return False, []
| 3,185
|
Python
|
.py
| 72
| 34.916667
| 85
| 0.630779
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,794
|
update_ca_topology.py
|
freeipa_freeipa/ipaserver/install/plugins/update_ca_topology.py
|
#
# Copyright (C) 2015 FreeIPA Contributors see COPYING for license
#
from __future__ import absolute_import
import logging
from ipalib import errors
from ipalib import Registry
from ipalib import Updater
from ipapython.dn import DN
from ipaserver.install import cainstance
from ipaserver.install import ldapupdate
from ipaplatform.paths import paths
logger = logging.getLogger(__name__)
register = Registry()
@register()
class update_ca_topology(Updater):
"""
Updates CA topology configuration entries
"""
def execute(self, **options):
ca = cainstance.CAInstance(self.api.env.realm)
if not ca.is_configured():
logger.debug("CA is not configured on this host")
return False, []
ld = ldapupdate.LDAPUpdate(api=self.api)
ld.update([paths.CA_TOPOLOGY_ULDIF])
ldap = self.api.Backend.ldap2
ca_replica_dn = DN(
('cn', 'replica'),
('cn', 'o=ipaca'),
('cn', 'mapping tree'),
('cn', 'config'))
check_interval_attr = 'nsds5replicabinddngroupcheckinterval'
default_check_interval = ['60']
try:
ca_replica_entry = ldap.get_entry(ca_replica_dn)
except errors.NotFound:
pass
else:
if check_interval_attr not in ca_replica_entry:
ca_replica_entry[check_interval_attr] = default_check_interval
ldap.update_entry(ca_replica_entry)
return False, []
| 1,501
|
Python
|
.py
| 43
| 27.674419
| 78
| 0.650485
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,795
|
ca_renewal_master.py
|
freeipa_freeipa/ipaserver/install/plugins/ca_renewal_master.py
|
# Authors:
# Jan Cholasta <jcholast@redhat.com>
#
# Copyright (C) 2014 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
import logging
from ipaserver.install import cainstance
from ipalib import errors
from ipalib import Updater
from ipalib.install import certmonger
from ipalib.plugable import Registry
from ipaplatform.paths import paths
from ipapython.dn import DN
logger = logging.getLogger(__name__)
register = Registry()
@register()
class update_ca_renewal_master(Updater):
"""
Set CA renewal master in LDAP.
"""
def execute(self, **options):
ca = cainstance.CAInstance(self.api.env.realm)
if not ca.is_configured():
logger.debug("CA is not configured on this host")
return False, []
ldap = self.api.Backend.ldap2
base_dn = DN(self.api.env.container_masters, self.api.env.basedn)
dn = DN(('cn', 'CA'), ('cn', self.api.env.host), base_dn)
filter = '(&(cn=CA)(ipaConfigString=caRenewalMaster))'
try:
entries = ldap.get_entries(base_dn=base_dn, filter=filter,
attrs_list=[])
except errors.NotFound:
pass
else:
logger.debug("found CA renewal master %s", entries[0].dn[1].value)
master = False
updates = []
for entry in entries:
if entry.dn == dn:
master = True
continue
updates.append({
'dn': entry.dn,
'updates': [
dict(action='remove', attr='ipaConfigString',
value='caRenewalMaster')
],
})
if master:
return False, updates
else:
return False, []
criteria = {
'cert-file': paths.RA_AGENT_PEM,
}
request_id = certmonger.get_request_id(criteria)
if request_id is not None:
logger.debug("found certmonger request for RA cert")
ca_name = certmonger.get_request_value(request_id, 'ca-name')
if ca_name is None:
logger.warning(
"certmonger request for RA cert is missing ca_name, "
"assuming local CA is not a renewal master.")
return False, []
ca_name = ca_name.strip()
if ca_name == 'dogtag-ipa-renew-agent':
pass
elif ca_name == 'dogtag-ipa-retrieve-agent-submit':
return False, []
elif ca_name == 'dogtag-ipa-ca-renew-agent':
return False, []
else:
logger.warning(
"certmonger request for RA cert has unknown ca_name '%s', "
"assuming local CA is not a renewal master", ca_name)
return False, []
else:
logger.debug("certmonger request for RA cert not found")
if not ca.is_crlgen_enabled():
# CA is not a renewal master
return False, []
update = {
'dn': dn,
'updates': [
dict(action='add', attr='ipaConfigString',
value='caRenewalMaster')
],
}
return False, [update]
| 4,070
|
Python
|
.py
| 104
| 28.413462
| 79
| 0.570922
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,796
|
update_unhashed_password.py
|
freeipa_freeipa/ipaserver/install/plugins/update_unhashed_password.py
|
# Authors:
# Thierry Bordaz <tbordaz@redhat.com>
#
# Copyright (C) 2019 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
from ipalib import Registry, errors
from ipalib import Updater
from ipapython.dn import DN
logger = logging.getLogger(__name__)
register = Registry()
@register()
class update_unhashed_password(Updater):
"""
DS
"""
def __remove_update(self, update, key, value):
statement = dict(action='remove', attr=key, value=value)
update.setdefault('updates', []).append(statement)
def __add_update(self, update, key, value):
statement = dict(action='add', attr=key, value=value)
update.setdefault('updates', []).append(statement)
def execute(self, **options):
logger.debug("Upgrading unhashed password configuration")
ldap = self.api.Backend.ldap2
base_config = DN(('cn', 'config'))
try:
entry = ldap.get_entry(base_config,
['nsslapd-unhashed-pw-switch'])
except errors.NotFound:
logger.error("Unhashed password configuration not found")
return False, []
config_dn = entry.dn
toggle = entry.single_value.get("nsslapd-unhashed-pw-switch")
if toggle.lower() not in ['off', 'on', 'nolog']:
logger.error("Unhashed password had invalid value '%s'", toggle)
# Check if it exists winsync agreements
searchfilter = '(objectclass=nsDSWindowsReplicationAgreement)'
try:
winsync_agmts, _truncated = ldap.find_entries(
base_dn=base_config,
filter=searchfilter,
attrs_list=[]
)
except errors.NotFound:
logger.debug("Unhashed password this is not a winsync deployment")
winsync_agmts = []
update = {
'dn': config_dn,
'updates': [],
}
if len(winsync_agmts) > 0:
# We are running in a winsync environment
# Log a warning that changelog will contain sensitive data
try:
# Check if the new per-backend changelog exists...
cldb = ldap.get_entry(
DN(('cn', 'changelog'),
('cn', 'userRoot'),
('cn', 'ldbm database'),
('cn', 'plugins'),
('cn', 'config')))
# We have a backend changelog so get the db dir in this case
db_entry = ldap.get_entry(
DN(('cn', 'userRoot'),
('cn', 'ldbm database'),
('cn', 'plugins'),
('cn', 'config')),
['nsslapd-directory'])
cldb = db_entry.single_value.get("nsslapd-directory")
logger.warning("This server is configured for winsync, "
"the changelog files under %s "
"may contain clear text passwords.\n"
"Please ensure that these files can be accessed"
" only by trusted accounts.\n", cldb)
except errors.NotFound:
# Did not find backend changelog, check the global changelog
try:
cldb_e = ldap.get_entry(
DN(('cn', 'changelog5'),
('cn', 'config')),
['nsslapd-changelogdir'])
cldb = cldb_e.single_value.get("nsslapd-changelogdir")
logger.warning("This server is configured for winsync, "
"the changelog files under %s "
"may contain clear text passwords.\n"
"Please ensure that these files can be "
"accessed only by trusted accounts.\n",
cldb)
except errors.NotFound:
logger.warning("This server is configured for winsync, "
"the changelog files may contain "
"clear text passwords.\n"
"Please ensure that these files can be "
"accessed only by trusted accounts.\n")
if toggle.lower() == 'on':
# The current DS configuration already logs the
# unhashed password
updates = []
else:
self.__remove_update(update, 'nsslapd-unhashed-pw-switch',
toggle)
self.__add_update(update, 'nsslapd-unhashed-pw-switch', 'on')
updates = [update]
else:
if toggle.lower() == 'nolog':
updates = []
else:
self.__remove_update(update, 'nsslapd-unhashed-pw-switch',
toggle)
self.__add_update(update, 'nsslapd-unhashed-pw-switch',
'nolog')
updates = [update]
return False, updates
| 5,874
|
Python
|
.py
| 127
| 31.614173
| 79
| 0.529391
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,797
|
update_passsync.py
|
freeipa_freeipa/ipaserver/install/plugins/update_passsync.py
|
#
# Copyright (C) 2014 FreeIPA Contributors see COPYING for license
#
import logging
from ipalib import Registry, errors
from ipalib import Updater
from ipapython.dn import DN
from ipaserver.install import sysupgrade
logger = logging.getLogger(__name__)
register = Registry()
@register()
class update_passync_privilege_check(Updater):
def execute(self, **options):
update_done = sysupgrade.get_upgrade_state('winsync', 'passsync_privilege_updated')
if update_done:
logger.debug("PassSync privilege update pre-check not needed")
return False, []
logger.debug("Check if there is existing PassSync privilege")
passsync_privilege_dn = DN(('cn','PassSync Service'),
self.api.env.container_privilege,
self.api.env.basedn)
ldap = self.api.Backend.ldap2
try:
ldap.get_entry(passsync_privilege_dn, [''])
except errors.NotFound:
logger.debug("PassSync privilege not found, this is a new update")
sysupgrade.set_upgrade_state('winsync', 'passsync_privilege_updated', False)
else:
logger.debug("PassSync privilege found, skip updating PassSync")
sysupgrade.set_upgrade_state('winsync', 'passsync_privilege_updated', True)
return False, []
@register()
class update_passync_privilege_update(Updater):
"""
Add PassSync user as a member of PassSync privilege, if it exists
"""
def execute(self, **options):
update_done = sysupgrade.get_upgrade_state('winsync', 'passsync_privilege_updated')
if update_done:
logger.debug("PassSync privilege update not needed")
return False, []
logger.debug("Add PassSync user as a member of PassSync privilege")
ldap = self.api.Backend.ldap2
passsync_dn = DN(
('uid', 'passsync'),
self.api.env.container_sysaccounts,
self.api.env.basedn
)
passsync_privilege_dn = DN(('cn','PassSync Service'),
self.api.env.container_privilege,
self.api.env.basedn)
try:
ldap.get_entry(passsync_dn, [''])
except errors.NotFound:
logger.debug("PassSync user not found, no update needed")
sysupgrade.set_upgrade_state('winsync', 'passsync_privilege_updated', True)
return False, []
else:
logger.debug("PassSync user found, do update")
update = {'dn': passsync_privilege_dn,
'updates': [
dict(action='add', attr='member', value=passsync_dn),
]
}
sysupgrade.set_upgrade_state('winsync', 'passsync_privilege_updated', True)
return False, [update]
| 2,794
|
Python
|
.py
| 66
| 32.969697
| 91
| 0.630996
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,798
|
update_ra_cert_store.py
|
freeipa_freeipa/ipaserver/install/plugins/update_ra_cert_store.py
|
#
# Copyright (C) 2016 FreeIPA Contributors see COPYING for license
#
from __future__ import absolute_import
import logging
import os
import tempfile
from ipalib import Registry
from ipalib import Updater
from ipalib.install import certmonger
from ipaplatform.paths import paths
from ipapython.certdb import NSSDatabase
from ipaserver.install import cainstance
logger = logging.getLogger(__name__)
register = Registry()
@register()
class update_ra_cert_store(Updater):
"""
Moves the ipaCert store from /etc/httpd/alias RA_AGENT_PEM, RA_AGENT_KEY
files
"""
def execute(self, **options):
ra_nick = 'ipaCert'
ca_enabled = self.api.Command.ca_is_enabled()['result']
if not ca_enabled:
return False, []
try:
certdb = NSSDatabase(nssdir=paths.HTTPD_ALIAS_DIR)
except ValueError as e:
logger.warning("Problem opening NSS database in "
"%s. Skipping check for existing RA "
"agent certificate: %s", paths.HTTPD_ALIAS_DIR, e)
return False, []
if not certdb.has_nickname(ra_nick):
# Nothign to do
return False, []
elif os.path.exists(paths.RA_AGENT_PEM):
# even though the certificate file exists, we will overwrite it
# as it's probabably something wrong anyway
logger.warning(
"A certificate with the nickname 'ipaCert' exists in "
"the old '%s' NSS database as well as in the new "
"PEM file '%s'",
paths.HTTPD_ALIAS_DIR, paths.RA_AGENT_PEM)
_fd, p12file = tempfile.mkstemp(dir=certdb.secdir)
# no password is necessary as we will be saving it in clear anyway
certdb.export_pkcs12(ra_nick, p12file, pkcs12_passwd='')
# stop tracking the old cert and remove it
certmonger.stop_tracking(paths.HTTPD_ALIAS_DIR, nickname=ra_nick)
certdb.delete_key_and_cert(ra_nick)
if os.path.exists(paths.OLD_KRA_AGENT_PEM):
os.remove(paths.OLD_KRA_AGENT_PEM)
# get the private key and certificate from the file and start
# tracking it in certmonger
ca = cainstance.CAInstance()
ca.import_ra_cert(p12file)
os.remove(p12file)
return False, []
| 2,350
|
Python
|
.py
| 58
| 32.051724
| 77
| 0.642951
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|
16,799
|
update_ldap_server_list.py
|
freeipa_freeipa/ipaserver/install/plugins/update_ldap_server_list.py
|
#
# Copyright (C) 2016 FreeIPA Contributors see COPYING for license
#
from ipalib import Registry
from ipalib import Updater
from ipalib import errors
from ipapython.dn import DN
register = Registry()
@register()
class update_ldap_server_list(Updater):
"""
Update defaultServerList, an option that helps Solaris
clients discover LDAP server replicas.
"""
def execute(self, **options):
ldap = self.api.Backend.ldap2
dn = DN(('cn', 'default'), ('ou', 'profile'), self.api.env.basedn)
try:
entry = ldap.get_entry(dn)
srvlist = entry.single_value.get('defaultServerList', '')
srvlist = srvlist.split()
if self.api.env.host not in srvlist:
srvlist.append(self.api.env.host)
attr = ' '.join(srvlist)
entry['defaultServerList'] = attr
ldap.update_entry(entry)
except errors.NotFound:
pass
except ldap.TYPE_OR_VALUE_EXISTS:
pass
# no restart, no updates
return False, ()
| 1,082
|
Python
|
.py
| 32
| 26
| 74
| 0.619732
|
freeipa/freeipa
| 975
| 339
| 31
|
GPL-3.0
|
9/5/2024, 5:12:14 PM (Europe/Amsterdam)
|