id int64 0 458k | file_name stringlengths 4 119 | file_path stringlengths 14 227 | content stringlengths 24 9.96M | size int64 24 9.96M | language stringclasses 1 value | extension stringclasses 14 values | total_lines int64 1 219k | avg_line_length float64 2.52 4.63M | max_line_length int64 5 9.91M | alphanum_fraction float64 0 1 | repo_name stringlengths 7 101 | repo_stars int64 100 139k | repo_forks int64 0 26.4k | repo_open_issues int64 0 2.27k | repo_license stringclasses 12 values | repo_extraction_date stringclasses 433 values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
24,500 | disk.py | truenas_middleware/src/middlewared/middlewared/plugins/disk.py | import errno
import re
import subprocess
from sqlalchemy.exc import IntegrityError
from middlewared.utils import ProductType
from middlewared.schema import accepts, Bool, Datetime, Dict, Int, Patch, Str
from middlewared.service import filterable, private, CallError, CRUDService, ValidationError
import middlewared.sqlalchemy as sa
from middlewared.utils import run
from middlewared.utils.asyncio_ import asyncio_map
RE_SED_RDLOCK_EN = re.compile(r'(RLKEna = Y|ReadLockEnabled:\s*1)', re.M)
RE_SED_WRLOCK_EN = re.compile(r'(WLKEna = Y|WriteLockEnabled:\s*1)', re.M)
class DiskModel(sa.Model):
__tablename__ = 'storage_disk'
disk_identifier = sa.Column(sa.String(42), primary_key=True)
disk_name = sa.Column(sa.String(120))
disk_subsystem = sa.Column(sa.String(10), default='')
disk_number = sa.Column(sa.Integer(), default=1)
disk_serial = sa.Column(sa.String(30))
disk_lunid = sa.Column(sa.String(30), nullable=True)
disk_size = sa.Column(sa.String(20))
disk_description = sa.Column(sa.String(120))
disk_transfermode = sa.Column(sa.String(120), default="Auto")
disk_hddstandby = sa.Column(sa.String(120), default="Always On")
disk_advpowermgmt = sa.Column(sa.String(120), default="Disabled")
disk_togglesmart = sa.Column(sa.Boolean(), default=True)
disk_smartoptions = sa.Column(sa.String(120))
disk_expiretime = sa.Column(sa.DateTime(), nullable=True)
disk_enclosure_slot = sa.Column(sa.Integer(), nullable=True)
disk_passwd = sa.Column(sa.EncryptedText(), default='')
disk_critical = sa.Column(sa.Integer(), nullable=True, default=None)
disk_difference = sa.Column(sa.Integer(), nullable=True, default=None)
disk_informational = sa.Column(sa.Integer(), nullable=True, default=None)
disk_model = sa.Column(sa.String(200), nullable=True, default=None)
disk_rotationrate = sa.Column(sa.Integer(), nullable=True, default=None)
disk_type = sa.Column(sa.String(20), default='UNKNOWN')
disk_kmip_uid = sa.Column(sa.String(255), nullable=True, default=None)
disk_zfs_guid = sa.Column(sa.String(20), nullable=True)
disk_bus = sa.Column(sa.String(20))
class DiskService(CRUDService):
class Config:
datastore = 'storage.disk'
datastore_prefix = 'disk_'
datastore_extend = 'disk.disk_extend'
datastore_extend_context = 'disk.disk_extend_context'
datastore_primary_key = 'identifier'
datastore_primary_key_type = 'string'
event_register = False
event_send = False
cli_namespace = 'storage.disk'
ENTRY = Dict(
'disk_entry',
Str('identifier', required=True),
Str('name', required=True),
Str('subsystem', required=True),
Int('number', required=True),
Str('serial', required=True),
Str('lunid', required=True, null=True),
Int('size', required=True),
Str('description', required=True),
Str('transfermode', required=True),
Str(
'hddstandby', required=True, enum=[
'ALWAYS ON', '5', '10', '20', '30', '60', '120', '180', '240', '300', '330'
]
),
Bool('togglesmart', required=True),
Str('advpowermgmt', required=True, enum=['DISABLED', '1', '64', '127', '128', '192', '254']),
Str('smartoptions', required=True),
Datetime('expiretime', required=True, null=True),
Int('critical', required=True, null=True),
Int('difference', required=True, null=True),
Int('informational', required=True, null=True),
Str('model', required=True, null=True),
Int('rotationrate', required=True, null=True),
Str('type', required=True, null=True),
Str('zfs_guid', required=True, null=True),
Str('bus', required=True),
Str('devname', required=True),
Dict(
'enclosure',
Int('number'),
Int('slot'),
null=True, required=True
),
Str('pool', null=True, required=True),
Str('passwd', private=True),
Str('kmip_uid', null=True),
Bool('supports_smart', null=True),
)
@filterable
async def query(self, filters, options):
"""
Query disks.
The following extra options are supported:
include_expired: true - will also include expired disks (default: false)
passwords: true - will not hide KMIP password for the disks (default: false)
supports_smart: true - will query if disks support S.M.A.R.T. Only supported if resulting disks count is
not larger than one; otherwise, raises an error.
pools: true - will join pool name for each disk (default: false)
"""
filters = filters or []
options = options or {}
if not options.get('extra', {}).get('include_expired', False):
filters += [('expiretime', '=', None)]
return await super().query(filters, options)
@private
async def disk_extend(self, disk, context):
disk.pop('enabled', None)
for key in ['advpowermgmt', 'hddstandby']:
disk[key] = disk[key].upper()
try:
disk['size'] = int(disk['size'])
except ValueError:
disk['size'] = None
disk['devname'] = disk['name']
self._expand_enclosure(disk)
if context['passwords']:
if not disk['passwd']:
disk['passwd'] = context['disks_keys'].get(disk['identifier'], '')
else:
disk.pop('passwd')
disk.pop('kmip_uid')
disk['supports_smart'] = None
if context['supports_smart']:
if await self.middleware.call('truenas.is_ix_hardware') or disk['name'].startswith('nvme'):
disk['supports_smart'] = True
else:
disk_query = await self.middleware.call('disk.smartctl', disk['name'], ['-a', '--json=c'], {'silent': True})
disk['supports_smart'] = disk_query.get('smart_support', {}).get('available', False)
if disk['name'] in context['boot_pool_disks']:
disk['pool'] = context['boot_pool_name']
else:
disk['pool'] = context['zfs_guid_to_pool'].get(disk['zfs_guid'])
return disk
@private
async def disk_extend_context(self, rows, extra):
context = {
'passwords': extra.get('passwords', False),
'supports_smart': extra.get('supports_smart', False),
'disks_keys': {},
'pools': extra.get('pools', False),
'boot_pool_disks': [],
'boot_pool_name': None,
'zfs_guid_to_pool': {},
}
if context['passwords']:
context['disks_keys'] = await self.middleware.call('kmip.retrieve_sed_disks_keys')
if context['supports_smart']:
if len(rows) > 1:
raise CallError('`supports_smart` cannot be queried if disk count is greater than 1')
if context['pools']:
context['boot_pool_disks'] = await self.middleware.call('boot.get_disks')
context['boot_pool_name'] = await self.middleware.call('boot.pool_name')
for pool in await self.middleware.call('zfs.pool.query'):
topology = await self.middleware.call('pool.transform_topology_lightweight', pool['groups'])
for vdev in await self.middleware.call('pool.flatten_topology', topology):
if vdev['type'] == 'DISK':
context['zfs_guid_to_pool'][vdev['guid']] = pool['name']
return context
def _expand_enclosure(self, disk):
if disk['enclosure_slot'] is not None:
disk['enclosure'] = {
'number': disk['enclosure_slot'] // 1000,
'slot': disk['enclosure_slot'] % 1000
}
else:
disk['enclosure'] = None
del disk['enclosure_slot']
def _compress_enclosure(self, disk):
if disk['enclosure'] is not None:
disk['enclosure_slot'] = disk['enclosure']['number'] * 1000 + disk['enclosure']['slot']
else:
disk['enclosure_slot'] = None
del disk['enclosure']
@accepts(
Str('id'),
Patch(
'disk_entry', 'disk_update',
('rm', {'name': 'identifier'}),
('rm', {'name': 'name'}),
('rm', {'name': 'subsystem'}),
('rm', {'name': 'serial'}),
('rm', {'name': 'kmip_uid'}),
('rm', {'name': 'size'}),
('rm', {'name': 'transfermode'}),
('rm', {'name': 'expiretime'}),
('rm', {'name': 'model'}),
('rm', {'name': 'rotationrate'}),
('rm', {'name': 'type'}),
('rm', {'name': 'zfs_guid'}),
('rm', {'name': 'devname'}),
('attr', {'update': True}),
)
)
async def do_update(self, id_, data):
"""
Update disk of `id`.
If extra options need to be passed to SMART which we don't already support, they can be passed by
`smartoptions`.
`critical`, `informational` and `difference` are integer values on which alerts for SMART are configured
if the disk temperature crosses the assigned threshold for each respective attribute.
If they are set to null, then SMARTD config values are used as defaults.
Email of log level LOG_CRIT is issued when disk temperature crosses `critical`.
Email of log level LOG_INFO is issued when disk temperature crosses `informational`.
If temperature of a disk changes by `difference` degree Celsius since the last report, SMART reports this.
"""
old = await self.middleware.call(
'datastore.query', 'storage.disk', [['identifier', '=', id_]], {
'get': True, 'prefix': self._config.datastore_prefix
}
)
old.pop('enabled', None)
self._expand_enclosure(old)
new = old.copy()
new.update(data)
# prevent breaking the ability to start the smartd service if user
# provides very obvious params that conflict with our own
invalid_smart_flags = ['-a', '-d', '-n', '-W', '-m', '-M', 'exec']
for invalid in invalid_smart_flags:
if invalid in new['smartoptions']:
raise ValidationError('disk.smartoptions', f'"{invalid}" is an invalid extra smart option')
if not new['passwd'] and old['passwd'] != new['passwd']:
# We want to make sure kmip uid is None in this case
if new['kmip_uid']:
self.middleware.create_task(self.middleware.call('kmip.reset_sed_disk_password', id_, new['kmip_uid']))
new['kmip_uid'] = None
for key in ['advpowermgmt', 'hddstandby']:
new[key] = new[key].title()
self._compress_enclosure(new)
await self.middleware.call(
'datastore.update',
self._config.datastore,
id_,
new,
{'prefix': self._config.datastore_prefix}
)
if any(new[key] != old[key] for key in ['hddstandby', 'advpowermgmt']):
await self.middleware.call('disk.power_management', new['name'])
if any(
new[key] != old[key]
for key in ['togglesmart', 'smartoptions', 'hddstandby', 'critical', 'difference', 'informational']
):
if new['togglesmart']:
await self.middleware.call('disk.toggle_smart_on', new['name'])
else:
await self.middleware.call('disk.toggle_smart_off', new['name'])
await self.middleware.call('disk.update_smartctl_args_for_disks')
await self._service_change('smartd', 'restart')
await self._service_change('snmp', 'restart')
if new['passwd'] and old['passwd'] != new['passwd']:
await self.middleware.call('kmip.sync_sed_keys', [id_])
return await self.query([['identifier', '=', id_]], {'get': True})
@private
async def copy_settings(self, old, new, copy_settings, copy_description):
keys = []
if copy_settings:
keys += [
'togglesmart', 'advpowermgmt', 'hddstandby', 'smartoptions', 'critical', 'difference', 'informational',
]
if copy_description:
keys += ['description']
await self.middleware.call('disk.update', new['identifier'], {k: v for k, v in old.items() if k in keys})
changed = False
for row in await self.middleware.call('datastore.query', 'tasks.smarttest_smarttest_disks', [
['disk_id', '=', old['identifier']],
], {'relationships': False}):
try:
await self.middleware.call('datastore.insert', 'tasks.smarttest_smarttest_disks', {
'smarttest_id': row['smarttest_id'],
'disk_id': new['identifier'],
})
except IntegrityError:
pass
else:
changed = True
if changed:
self.middleware.create_task(self._service_change('smartd', 'restart'))
@private
async def check_clean(self, disk):
return not bool(await self.middleware.call('disk.list_partitions', disk))
@private
async def sed_unlock_all(self, force=False):
# on an HA system, if both controllers manage to send
# SED commands at the same time, then it can cause issues
# where, ultimately, the disks don't get unlocked
if not force: # Do not check the status if we are unlocking from vrrp_event
if await self.middleware.call('failover.licensed'):
if await self.middleware.call('failover.status') == 'BACKUP':
return
advconfig = await self.middleware.call('system.advanced.config')
disks = await self.middleware.call('disk.query', [], {'extra': {'passwords': True}})
# If no SED password was found we can stop here
if not await self.middleware.call('system.advanced.sed_global_password') and not any(
[d['passwd'] for d in disks]
):
return
result = await asyncio_map(lambda disk: self.sed_unlock(disk['name'], disk, advconfig, True), disks, 16)
locked = list(filter(lambda x: x['locked'] is True, result))
if locked:
disk_names = ', '.join([i['name'] for i in locked])
self.logger.warn(f'Failed to unlock following SED disks: {disk_names}')
raise CallError('Failed to unlock SED disks', errno.EACCES)
return True
@private
async def sed_unlock(self, disk_name, disk=None, advconfig=None, force=False):
# on an HA system, if both controllers manage to send
# SED commands at the same time, then it can cause issues
# where, ultimately, the disks don't get unlocked
if not force: # Do not check the status if we are unlocking from vrrp_event
if await self.middleware.call('failover.licensed'):
if await self.middleware.call('failover.status') == 'BACKUP':
return
if advconfig is None:
advconfig = await self.middleware.call('system.advanced.config')
devname = f'/dev/{disk_name}'
# We need two states to tell apart when disk was successfully unlocked
locked = None
unlocked = None
password = await self.middleware.call('system.advanced.sed_global_password')
if disk is None:
disk = await self.query([('name', '=', disk_name)], {'extra': {'passwords': True}})
if disk and disk[0]['passwd']:
password = disk[0]['passwd']
elif disk.get('passwd'):
password = disk['passwd']
rv = {'name': disk_name, 'locked': None}
if not password:
# If there is no password no point in continuing
return rv
# Try unlocking TCG OPAL using sedutil
cp = await run('sedutil-cli', '--query', devname, check=False)
if cp.returncode == 0:
output = cp.stdout.decode(errors='ignore')
if 'Locked = Y' in output:
locked = True
cp = await run('sedutil-cli', '--setLockingRange', '0', 'RW', password, devname, check=False)
if cp.returncode == 0:
locked = False
unlocked = True
# If we were able to unlock it, let's set mbrenable to off
cp = await run('sedutil-cli', '--setMBREnable', 'off', password, devname, check=False)
if cp.returncode:
self.logger.error(
'Failed to set MBREnable for %r to "off": %s', devname,
cp.stderr.decode(), exc_info=True
)
elif 'Locked = N' in output:
locked = False
# Try ATA Security if SED was not unlocked and its not locked by OPAL
if not unlocked and not locked:
locked, unlocked = await self.middleware.call('disk.unlock_ata_security', devname, advconfig, password)
if locked:
self.logger.error(f'Failed to unlock {disk_name}')
rv['locked'] = locked
return rv
@private
async def sed_initial_setup(self, disk_name, password):
"""
NO_SED - Does not support SED
ACCESS_GRANTED - Already setup and `password` is a valid password
LOCKING_DISABLED - Locking range is disabled
SETUP_FAILED - Initial setup call failed
SUCCESS - Setup successfully completed
"""
# on an HA system, if both controllers manage to send
# SED commands at the same time, then it can cause issues
# where, ultimately, the disks don't get unlocked
if await self.middleware.call('failover.licensed'):
if await self.middleware.call('failover.status') == 'BACKUP':
return
devname = f'/dev/{disk_name}'
cp = await run('sedutil-cli', '--isValidSED', devname, check=False)
if b' SED ' not in cp.stdout:
return 'NO_SED'
cp = await run('sedutil-cli', '--listLockingRange', '0', password, devname, check=False)
if cp.returncode == 0:
output = cp.stdout.decode()
if RE_SED_RDLOCK_EN.search(output) and RE_SED_WRLOCK_EN.search(output):
return 'ACCESS_GRANTED'
else:
return 'LOCKING_DISABLED'
try:
await run('sedutil-cli', '--initialSetup', password, devname)
except subprocess.CalledProcessError as e:
self.logger.debug(f'initialSetup failed for {disk_name}:\n{e.stdout}{e.stderr}')
return 'SETUP_FAILED'
# OPAL 2.0 disks do not enable locking range on setup like Enterprise does
try:
await run('sedutil-cli', '--enableLockingRange', '0', password, devname)
except subprocess.CalledProcessError as e:
self.logger.debug(f'enableLockingRange failed for {disk_name}:\n{e.stdout}{e.stderr}')
return 'SETUP_FAILED'
return 'SUCCESS'
@private
async def configure_power_management(self):
"""
This runs on boot to properly configure all power management options
(Advanced Power Management and IDLE) for all disks.
"""
if await self.middleware.call('system.product_type') != ProductType.SCALE_ENTERPRISE:
for disk in await self.middleware.call('disk.query'):
await self.middleware.call('disk.power_management', disk['name'], disk)
@private
async def power_management(self, dev, disk=None):
"""
Actually sets power management for `dev`.
`disk` is the disk.query entry and optional so this can be called only with disk name.
"""
if not disk:
disk = await self.middleware.call('disk.query', [('name', '=', dev)])
if not disk:
return
disk = disk[0]
return await self.middleware.call('disk.power_management_impl', dev, disk)
async def _event_system_ready(middleware, event_type, args):
middleware.create_task(middleware.call('disk.configure_power_management'))
def setup(middleware):
# Run disk tasks once system is ready (e.g. power management)
middleware.event_subscribe('system.ready', _event_system_ready)
| 20,572 | Python | .py | 420 | 38.366667 | 124 | 0.593006 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,501 | idmap.py | truenas_middleware/src/middlewared/middlewared/plugins/idmap.py | import enum
import asyncio
import errno
import datetime
import wbclient
from middlewared.schema import accepts, Bool, Dict, Int, Password, Patch, Ref, Str, LDAP_DN, OROperator
from middlewared.service import CallError, CRUDService, job, private, ValidationErrors, filterable
from middlewared.service_exception import MatchNotFound
from middlewared.utils.directoryservices.constants import SSL
from middlewared.utils.directoryservices.constants import DSType as DirectoryServiceType
from middlewared.plugins.idmap_.idmap_constants import (
BASE_SYNTHETIC_DATASTORE_ID, IDType, SID_LOCAL_USER_PREFIX, SID_LOCAL_GROUP_PREFIX, TRUENAS_IDMAP_MAX
)
from middlewared.plugins.idmap_.idmap_winbind import (WBClient, WBCErr)
from middlewared.plugins.idmap_.idmap_sss import SSSClient
from middlewared.plugins.smb_.constants import SMBBuiltin
import middlewared.sqlalchemy as sa
from middlewared.utils import filter_list
from middlewared.utils.sid import (
get_domain_rid,
BASE_RID_GROUP,
BASE_RID_USER,
DomainRid,
)
from middlewared.utils.tdb import (
get_tdb_handle,
TDBDataType,
TDBOptions,
TDBPathType,
)
from middlewared.validators import Range
try:
from pysss_murmur import murmurhash3
except ImportError:
murmurhash3 = None
WINBIND_IDMAP_FILE = '/var/run/samba-lock/gencache.tdb'
WINBIND_IDMAP_TDB_OPTIONS = TDBOptions(TDBPathType.CUSTOM, TDBDataType.BYTES)
def clear_winbind_cache():
with get_tdb_handle(WINBIND_IDMAP_FILE, WINBIND_IDMAP_TDB_OPTIONS) as hdl:
return hdl.clear()
"""
See MS-DTYP 2.4.2.4
Most of these groups will never be used on production servers.
We are statically assigning IDs (based on idmap low range)
to cover edge cases where users may have decided to copy data
from a local Windows server share (for example) and preserve
the existing Security Descriptor. We want the mapping to be
consistent so that ZFS replication of TrueNAS server A to
TrueNAS server B will result in same effective permissions
for users and groups with no unexpected elevation of permissions.
Entries may be appended to this list. Ordering is used to determine
the GID assigned to the builtin.
Once a new entry has been appended, the corresponding padding
in smb/_groupmap.py should be decreased
"""
WELL_KNOWN_SIDS = [
{"name": "NULL", "sid": "S-1-0-0", "set": False},
{"name": "EVERYONE", "sid": "S-1-1-0", "set": True},
{"name": "LOCAL", "sid": "S-1-2-0", "set": True},
{"name": "CONSOLE_LOGON", "sid": "S-1-2-1", "set": True},
{"name": "CREATOR_OWNER", "sid": "S-1-3-0", "set": True},
{"name": "CREATOR_GROUP", "sid": "S-1-3-1", "set": True},
{"name": "OWNER_RIGHTS", "sid": "S-1-3-4", "set": True},
{"name": "DIALUP", "sid": "S-1-5-1", "set": True},
{"name": "NETWORK", "sid": "S-1-5-2", "set": True},
{"name": "BATCH", "sid": "S-1-5-3", "set": True},
{"name": "INTERACTIVE", "sid": "S-1-5-4", "set": True},
{"name": "SERVICE", "sid": "S-1-5-6", "set": True},
{"name": "ANONYMOUS", "sid": "S-1-5-7", "set": True},
{"name": "AUTHENTICATED_USERS", "sid": "S-1-5-11", "set": True},
{"name": "TERMINAL_SERVER_USER", "sid": "S-1-5-13", "set": True},
{"name": "REMOTE_AUTHENTICATED_LOGON", "sid": "S-1-5-14", "set": True},
{"name": "LOCAL_SYSTEM", "sid": "S-1-5-18", "set": True},
{"name": "LOCAL_SERVICE", "sid": "S-1-5-19", "set": True},
{"name": "NETWORK_SERVICE", "sid": "S-1-5-20", "set": True},
]
class DSType(enum.Enum):
"""
The below DS_TYPES are defined for use as system domains for idmap backends.
DS_TYPE_NT4 is defined, but has been deprecated. DS_TYPE_DEFAULT_DOMAIN corresponds
with the idmap settings under services->SMB, and is represented by 'idmap domain = *'
in the smb4.conf. The only instance where the idmap backend for the default domain will
not be 'tdb' is when the server is (1) joined to active directory and (2) autorid is enabled.
"""
DS_TYPE_ACTIVEDIRECTORY = 1
DS_TYPE_LDAP = 2
DS_TYPE_NIS = 3
DS_TYPE_FREEIPA = 4
DS_TYPE_DEFAULT_DOMAIN = 5
def choices():
return [x.name for x in DSType]
class IdmapBackend(enum.Enum):
AD = {
'description': 'The AD backend provides a way for TrueNAS to read id '
'mappings from an Active Directory server that uses '
'RFC2307/SFU schema extensions. ',
'parameters': {
'schema_mode': {"required": False, "default": 'RFC2307'},
'unix_primary_group': {"required": False, "default": False},
'unix_nss_info': {"required": False, "default": False},
},
'has_secrets': False,
'services': ['AD'],
}
AUTORID = {
'description': 'Similar to the RID backend, but automatically configures '
'the range to be used for each domain, so that there is no '
'need to specify a specific range for each domain in the forest '
'The only needed configuration is the range of UID/GIDs to use '
'for user/group mappings and an optional size for the ranges.',
'parameters': {
'rangesize': {"required": False, "default": 100000},
'readonly': {"required": False, "default": False},
'ignore_builtin': {"required": False, "default": False},
},
'has_secrets': False,
'services': ['AD'],
}
LDAP = {
'description': 'Stores and retrieves mapping tables in an LDAP directory '
'service. Default for LDAP directory service.',
'parameters': {
'ldap_base_dn': {"required": True, "default": None},
'ldap_user_dn': {"required": True, "default": None},
'ldap_url': {"required": True, "default": None},
'ldap_user_dn_password': {"required": False, "default": None},
'ssl': {"required": False, "default": SSL.NOSSL.value},
'validate_certificates': {"required": False, "default": True},
'readonly': {"required": False, "default": False},
},
'has_secrets': True,
'services': ['AD', 'LDAP'],
}
NSS = {
'description': 'Provides a simple means of ensuring that the SID for a '
'Unix user is reported as the one assigned to the '
'corresponding domain user.',
'parameters': {
'linked_service': {"required": False, "default": "LOCAL_ACCOUNT"},
},
'has_secrets': False,
'services': ['AD', 'LDAP'],
}
RFC2307 = {
'description': 'Looks up IDs in the Active Directory LDAP server '
'or an extenal (non-AD) LDAP server. IDs must be stored '
'in RFC2307 ldap schema extensions. Other schema extensions '
'such as Services For Unix (SFU20/SFU30) are not supported.',
'parameters': {
'ldap_server': {"required": False, "default": "AD"},
'bind_path_user': {"required": False, "default": None},
'bind_path_group': {"required": False, "default": None},
'user_cn': {"required": False, "default": None},
'cn_realm': {"required": False, "default": None},
'ldap_domain': {"required": False, "default": None},
'ldap_url': {"required": False, "default": None},
'ldap_user_dn': {"required": True, "default": None},
'ldap_user_dn_password': {"required": False, "default": None},
'ldap_realm': {"required": False, "default": None},
'validate_certificates': {"required": False, "default": True},
'ssl': {"required": False, "default": SSL.NOSSL.value},
},
'has_secrets': True,
'services': ['AD', 'LDAP'],
}
RID = {
'description': 'Default for Active Directory service. requires '
'an explicit configuration for each domain, using '
'disjoint ranges.',
'parameters': {
'sssd_compat': {"required": False, "default": False},
},
'has_secrets': False,
'services': ['AD'],
}
TDB = {
'description': 'Default backend used to store mapping tables for '
'BUILTIN and well-known SIDs.',
'parameters': {
'readonly': {"required": False, "default": False},
},
'services': ['AD'],
}
def supported_keys(self):
return [str(x) for x in self.value['parameters'].keys()]
def required_keys(self):
ret = []
for k, v in self.value['parameters'].items():
if v['required']:
ret.append(str(k))
return ret
def defaults(self):
ret = {}
for k, v in self.value['parameters'].items():
if v['default'] is not None:
ret.update({k: v['default']})
return ret
def ds_choices():
directory_services = ['AD', 'LDAP']
ret = {}
for ds in directory_services:
ret[ds] = []
ds = {'AD': [], 'LDAP': []}
for x in IdmapBackend:
for ds in directory_services:
if ds in x.value['services']:
ret[ds].append(x.name)
return ret
def stores_secret(self):
return self.value['has_secrets']
class IdmapDomainModel(sa.Model):
__tablename__ = 'directoryservice_idmap_domain'
id = sa.Column(sa.Integer(), primary_key=True)
idmap_domain_name = sa.Column(sa.String(120), unique=True)
idmap_domain_dns_domain_name = sa.Column(sa.String(255), nullable=True, unique=True)
idmap_domain_range_low = sa.Column(sa.Integer())
idmap_domain_range_high = sa.Column(sa.Integer())
idmap_domain_idmap_backend = sa.Column(sa.String(120), default='rid')
idmap_domain_options = sa.Column(sa.JSON(dict))
idmap_domain_certificate_id = sa.Column(sa.ForeignKey('system_certificate.id'), index=True, nullable=True)
class IdmapDomainService(CRUDService):
ENTRY = Patch(
'idmap_domain_create', 'idmap_domain_entry',
('add', Int('id')),
)
class Config:
datastore = 'directoryservice.idmap_domain'
datastore_prefix = 'idmap_domain_'
namespace = 'idmap'
datastore_extend = 'idmap.idmap_extend'
cli_namespace = 'directory_service.idmap'
role_prefix = 'DIRECTORY_SERVICE'
def __wbclient_ctx(self, retry=True):
"""
Wrapper around setting up a temporary winbindd client context
If winbindd is stopped, then try to once to start it and if that
fails, present reason to caller.
"""
try:
return WBClient()
except wbclient.WBCError as e:
if not retry or e.error_code != wbclient.WBC_ERR_WINBIND_NOT_AVAILABLE:
raise e
if not self.middleware.call_sync('systemdataset.sysdataset_path'):
raise CallError(
'Unexpected filesystem mounted in the system dataset path. '
'This may indicate a failure to initialize the system dataset '
'and may be resolved by reviewing and fixing errors in the system '
'dataset configuration.', errno.EAGAIN
)
self.middleware.call_sync('service.start', 'idmap', {'silent': False})
return self.__wbclient_ctx(False)
@private
async def idmap_extend(self, data):
if data.get('idmap_backend'):
data['idmap_backend'] = data['idmap_backend'].upper()
opt_enums = ['ssl', 'linked_service']
if data.get('options'):
for i in opt_enums:
if data['options'].get(i):
data['options'][i] = data['options'][i].upper()
return data
@private
async def idmap_compress(self, data):
opt_enums = ['ssl', 'linked_service']
if data.get('options'):
for i in opt_enums:
if data['options'].get(i):
data['options'][i] = data['options'][i].lower()
data['idmap_backend'] = data['idmap_backend'].lower()
return data
@private
async def get_next_idmap_range(self):
"""
Increment next high range by 100,000,000 ids. This number has
to accomodate the highest available rid value for a domain.
Configured idmap ranges _must_ not overlap.
"""
domains = await self.query()
sorted_idmaps = sorted(domains, key=lambda domain: domain['range_high'])
low_range = sorted_idmaps[-1]['range_high'] + 1
high_range = sorted_idmaps[-1]['range_high'] + 100000000
return (low_range, high_range)
@private
async def snapshot_samba4_dataset(self):
sysdataset = (await self.middleware.call('systemdataset.config'))['basename']
ts = str(datetime.datetime.now(datetime.timezone.utc).timestamp())[:10]
await self.middleware.call('zfs.snapshot.create', {'dataset': f'{sysdataset}/samba4',
'name': f'wbc-{ts}'})
@private
@filterable
def known_domains(self, query_filters, query_options):
try:
entries = [entry.domain_info() for entry in WBClient().all_domains()]
except wbclient.WBCError as e:
match e.error_code:
case wbclient.WBC_ERR_INVALID_RESPONSE:
# Our idmap domain is not AD and so this is not expected to succeed
return []
case wbclient.WBC_ERR_WINBIND_NOT_AVAILABLE:
# winbindd process is stopped this may be in hot code path. Skip
return []
case _:
raise
return filter_list(entries, query_filters, query_options)
@private
@filterable
def online_status(self, query_filters, query_options):
try:
all_info = self.known_domains()
except wbclient.WBCError as e:
raise CallError(str(e), WBCErr[e.error_code], e.error_code)
entries = [{
'domain': dom_info['netbios_domain'],
'online': dom_info['online']
} for dom_info in all_info]
return filter_list(entries, query_filters, query_options)
@private
def domain_info(self, domain):
if domain == 'DS_TYPE_ACTIVEDIRECTORY':
return WBClient().domain_info()
elif domain == 'DS_TYPE_DEFAULT_DOMAIN':
return WBClient().domain_info('BUILTIN')
elif domain == 'DS_TYPE_LDAP':
return None
return WBClient().domain_info(domain)
@private
def parse_domain_info(self, sid):
if sid.startswith((SID_LOCAL_USER_PREFIX, SID_LOCAL_GROUP_PREFIX)):
return {'domain': 'LOCAL', 'domain_sid': None, 'online': True, 'activedirectory': False}
domain_info = self.known_domains([['sid', '=', sid.rsplit('-', 1)[0]]])
if not domain_info:
return {'domain': 'UNKNOWN', 'domain_sid': None, 'online': False, 'activedirectory': False}
return {
'domain': domain_info[0]['netbios_domain'],
'domain_sid': domain_info[0]['sid'],
'online': domain_info[0]['online'],
'activedirectory': 'ACTIVE_DIRECTORY' in domain_info[0]['domain_flags']['parsed']
}
@private
async def get_sssd_low_range(self, domain, sssd_config=None, seed=0xdeadbeef):
"""
This is best effort attempt for SSSD compatibility. It will allocate low
range for then initial slice in the SSSD environment. The SSSD allocation algorithm
is non-deterministic. Domain SID string is converted to a 32-bit hashed value
using murmurhash3 algorithm.
The modulus of this value with the total number of available slices is used to
pick the slice. This slice number is then used to calculate the low range for
RID 0. With the default settings in SSSD this will be deterministic as long as
the domain has less than 200,000 RIDs.
"""
sid = (await self.middleware.call('idmap.domain_info', domain))['sid']
sssd_config = {} if not sssd_config else sssd_config
range_size = sssd_config.get('range_size', 200000)
range_low = sssd_config.get('range_low', 10001)
range_max = sssd_config.get('range_max', 2000200000)
max_slices = (range_max - range_low) // range_size
data = sid.encode()
hash_ = murmurhash3(data, len(data), seed)
return (hash_ % max_slices) * range_size + range_size
@accepts(roles=['DIRECTORY_SERVICE_WRITE'])
@job(lock='clear_idmap_cache', lock_queue_size=1)
async def clear_idmap_cache(self, job):
"""
Stop samba, remove the winbindd_cache.tdb file, start samba, flush samba's cache.
This should be performed after finalizing idmap changes.
"""
smb_started = await self.middleware.call('service.started', 'cifs')
await self.middleware.call('service.stop', 'idmap')
try:
await self.middleware.run_in_thread(clear_winbind_cache)
except FileNotFoundError:
self.logger.debug("Failed to remove winbindd_cache.tdb. File not found.")
except Exception:
self.logger.debug("Failed to remove winbindd_cache.tdb.", exc_info=True)
await self.middleware.call('idmap.gencache.flush')
await self.middleware.call('service.start', 'idmap')
if smb_started:
await self.middleware.call('service.restart', 'cifs')
@private
async def may_enable_trusted_domains(self):
domains = await self.query([['name', '!=', 'DS_TYPE_DEFAULT_DOMAIN'], ['name', '!=', 'DS_TYPE_LDAP']])
primary = filter_list(domains, [['name', '=', 'DS_TYPE_ACTIVEDIRECTORY']], {'get': True})
if primary['idmap_backend'] == IdmapBackend.AUTORID.name or len(domains) > 1:
return True
return False
@accepts(roles=['DIRECTORY_SERVICE_READ'])
async def backend_options(self):
"""
This returns full information about idmap backend options. Not all
`options` are valid for every backend.
"""
return {x.name: x.value for x in IdmapBackend}
@accepts(roles=['DIRECTORY_SERVICE_READ'])
async def backend_choices(self):
"""
Returns array of valid idmap backend choices per directory service.
"""
return IdmapBackend.ds_choices()
@private
async def validate(self, schema_name, data, verrors):
if data['name'] == DSType.DS_TYPE_LDAP.name:
if data['idmap_backend'] not in (await self.backend_choices())['LDAP']:
verrors.add(f'{schema_name}.idmap_backend',
f'idmap backend [{data["idmap_backend"]}] is not appropriate '
f'for the system domain type {data["name"]}')
elif data['name'] == DSType.DS_TYPE_DEFAULT_DOMAIN.name:
if data['idmap_backend'] != 'TDB':
verrors.add(f'{schema_name}.idmap_backend',
'TDB is the only supported idmap backend for DS_TYPE_DEFAULT_DOMAIN.')
if data['range_high'] < data['range_low']:
"""
If we don't exit at this point further range() operations will raise an IndexError.
"""
verrors.add(f'{schema_name}.range_low', 'Idmap high range must be greater than idmap low range')
return
if data.get('certificate') and not await self.middleware.call(
'certificate.query', [['id', '=', data['certificate']]]
):
verrors.add(f'{schema_name}.certificate', 'Please specify a valid certificate.')
configured_domains = await self.query()
ds = await self.middleware.call("directoryservices.status")
match ds['status']:
case 'HEALTHY' | 'JOINING':
if ds['type'] == 'ACTIVEDIRECTORY':
ldap_enabled = False
ad_enabled = True
else:
# IPA domain or LDAP
ldap_enabled = True
ad_enabled = False
case _:
ldap_enabled = False
ad_enabled = False
new_range = range(data['range_low'], data['range_high'])
idmap_backend = data.get('idmap_backend')
for i in configured_domains:
# Do not generate validation error comparing to oneself.
if i['id'] == data.get('id', -1):
continue
if i['name'] == data['name']:
verrors.add(f'{schema_name}.name', 'Name must be unique.')
if data.get('dns_domain_name') and data['dns_domain_name'] == i['dns_domain_name']:
verrors.add(f'{schema_name}.dns_domain_name', 'Name must be unique.')
# Do not generate validation errors for overlapping with a disabled DS.
if not ldap_enabled and i['name'] == 'DS_TYPE_LDAP':
continue
if not ad_enabled and i['name'] == 'DS_TYPE_ACTIVEDIRECTORY':
continue
# Idmap settings under Services->SMB are ignored when autorid is enabled.
if idmap_backend == IdmapBackend.AUTORID.name and i['name'] == 'DS_TYPE_DEFAULT_DOMAIN':
continue
# Overlap between ranges defined for 'ad' backend are permitted.
if idmap_backend == IdmapBackend.AD.name and i['idmap_backend'] == IdmapBackend.AD.name:
continue
existing_range = range(i['range_low'], i['range_high'])
if range(max(existing_range[0], new_range[0]), min(existing_range[-1], new_range[-1]) + 1):
verrors.add(f'{schema_name}.range_low',
f'new idmap range [{data["range_low"]}-{data["range_high"]}] '
'conflicts with existing range for domain '
f'[{i["name"]}], range: [{i["range_low"]}-{i["range_high"]}].')
@private
async def validate_options(self, schema_name, data, verrors, check=['MISSING', 'EXTRA']):
supported_keys = set(IdmapBackend[data['idmap_backend']].supported_keys())
required_keys = set(IdmapBackend[data['idmap_backend']].required_keys())
provided_keys = set([str(x) for x in data['options'].keys()])
missing_keys = required_keys - provided_keys
extra_keys = provided_keys - supported_keys
if 'MISSING' in check:
for k in missing_keys:
verrors.add(f'{schema_name}.options.{k}',
f'[{k}] is a required parameter for the [{data["idmap_backend"]}] idmap backend.')
if 'EXTRA' in check:
for k in extra_keys:
verrors.add(f'{schema_name}.options.{k}',
f'[{k}] is not a valid parameter for the [{data["idmap_backend"]}] idmap backend.')
@private
async def prune_keys(self, data):
supported_keys = set(IdmapBackend[data['idmap_backend']].supported_keys())
provided_keys = set([str(x) for x in data['options'].keys()])
for k in (provided_keys - supported_keys):
data['options'].pop(k)
@private
async def idmap_conf_to_client_config(self, data):
options = data['options'].copy()
if data['idmap_backend'] not in ['LDAP', 'RFC2307']:
raise CallError(f'{data["idmap_backend"]}: invalid idmap backend')
if data['idmap_backend'] == 'LDAP':
uri = options["ldap_url"]
basedn = options["ldap_base_dn"]
else:
if data['options']['ldap_server'] == 'AD':
uri = options["ldap_domain"]
else:
uri = options["ldap_url"]
basedn = options["bind_path_user"]
credentials = {
"binddn": options["ldap_user_dn"],
"bindpw": options["ldap_user_dn_password"],
}
security = {
"ssl": options["ssl"],
"sasl": "SEAL",
"validate_certificates": options["validate_certificates"],
}
return {
"uri_list": [f'{"ldaps://" if security["ssl"] == "ON" else "ldap://"}{uri}'],
"basedn": basedn,
"bind_type": "PLAIN",
"credentials": credentials,
"security": security,
}
@filterable
async def query(self, filters, options):
extra = options.get("extra", {})
more_info = extra.get("additional_information", [])
ret = await super().query()
if 'DOMAIN_INFO' in more_info:
for entry in ret:
try:
domain_info = await self.middleware.call('idmap.domain_info', entry['name'])
except wbclient.WBCError as e:
if e.error_code != wbclient.WBC_ERR_DOMAIN_NOT_FOUND:
self.logger.debug(
"Failed to retrieve domain info for domain %s: %s",
entry['name'], e
)
domain_info = None
entry.update({'domain_info': domain_info})
return filter_list(ret, filters, options)
@accepts(Dict(
'idmap_domain_create',
Str('name', required=True),
Str('dns_domain_name', null=True),
Int('range_low', required=True, validators=[Range(min_=1000, max_=TRUENAS_IDMAP_MAX)]),
Int('range_high', required=True, validators=[Range(min_=1000, max_=TRUENAS_IDMAP_MAX)]),
Str('idmap_backend', required=True, enum=[x.name for x in IdmapBackend]),
Int('certificate', null=True),
OROperator(
Dict(
'idmap_ad_options',
Ref('nss_info_ad', 'schema_mode'),
Bool('unix_primary_group', default=False),
Bool('unix_nss_info', default=False),
),
Dict(
'idmap_autorid_options',
Int('rangesize', default=100000, validators=[Range(min_=10000, max_=1000000000)]),
Bool('readonly', default=False),
Bool('ignore_builtin', default=False),
),
Dict(
'idmap_ldap_options',
LDAP_DN('ldap_base_dn'),
LDAP_DN('ldap_user_dn'),
Password('ldap_user_dn_password'),
Str('ldap_url'),
Bool('readonly', default=False),
Ref('ldap_ssl_choice', 'ssl'),
Bool('validate_certificates', default=True),
),
Dict(
'idmap_nss_options',
Str('linked_service', default='LOCAL_ACCOUNT', enum=['LOCAL_ACCOUNT', 'LDAP']),
),
Dict(
'idmap_rfc2307_options',
Str('ldap_server', required=True, enum=['AD', 'STANDALONE']),
Bool('ldap_realm', default=False),
LDAP_DN('bind_path_user'),
LDAP_DN('bind_path_group'),
Bool('user_cn', default=False),
Str('cn_realm'),
Str('ldap_domain'),
Str('ldap_url'),
LDAP_DN('ldap_user_dn'),
Password('ldap_user_dn_password'),
Ref('ldap_ssl_choice', 'ssl'),
Bool('validate_certificates', default=True),
),
Dict(
'idmap_rid_options',
Bool('sssd_compat', default=False),
),
Dict(
'idmap_tdb_options',
),
default={},
name='options',
title='idmap_options',
),
register=True
))
async def do_create(self, data):
"""
Create a new IDMAP domain. These domains must be unique. This table
will be automatically populated after joining an Active Directory domain
if "allow trusted domains" is set to True in the AD service configuration.
There are three default system domains: DS_TYPE_ACTIVEDIRECTORY, DS_TYPE_LDAP, DS_TYPE_DEFAULT_DOMAIN.
The system domains correspond with the idmap settings under Active Directory, LDAP, and SMB
respectively.
`name` the pre-windows 2000 domain name.
`DNS_domain_name` DNS name of the domain.
`idmap_backend` provides a plugin interface for Winbind to use varying
backends to store SID/uid/gid mapping tables. The correct setting
depends on the environment in which the NAS is deployed.
`range_low` and `range_high` specify the UID and GID range for which this backend is authoritative.
`certificate_id` references the certificate ID of the SSL certificate to use for certificate-based
authentication to a remote LDAP server. This parameter is not supported for all idmap backends as some
backends will generate SID to ID mappings algorithmically without causing network traffic.
`options` are additional parameters that are backend-dependent:
`AD` idmap backend options:
`unix_primary_group` If True, the primary group membership is fetched from the LDAP attributes (gidNumber).
If False, the primary group membership is calculated via the "primaryGroupID" LDAP attribute.
`unix_nss_info` if True winbind will retrieve the login shell and home directory from the LDAP attributes.
If False or if the AD LDAP entry lacks the SFU attributes the smb4.conf parameters `template shell` and `template homedir` are used.
`schema_mode` Defines the schema that idmap_ad should use when querying Active Directory regarding user and group information.
This can be either the RFC2307 schema support included in Windows 2003 R2 or the Service for Unix (SFU) schema.
For SFU 3.0 or 3.5 please choose "SFU", for SFU 2.0 please choose "SFU20". The behavior of primary group membership is
controlled by the unix_primary_group option.
`AUTORID` idmap backend options:
`readonly` sets the module to read-only mode. No new ranges will be allocated and new mappings
will not be created in the idmap pool.
`ignore_builtin` ignores mapping requests for the BUILTIN domain.
`LDAP` idmap backend options:
`ldap_base_dn` defines the directory base suffix to use for SID/uid/gid mapping entries.
`ldap_user_dn` defines the user DN to be used for authentication.
`ldap_url` specifies the LDAP server to use for SID/uid/gid map entries.
`ssl` specifies whether to encrypt the LDAP transport for the idmap backend.
`NSS` idmap backend options:
`linked_service` specifies the auxiliary directory service ID provider.
`RFC2307` idmap backend options:
`domain` specifies the domain for which the idmap backend is being created. Numeric id, short-form
domain name, or long-form DNS domain name of the domain may be specified. Entry must be entered as
it appears in `idmap.domain`.
`range_low` and `range_high` specify the UID and GID range for which this backend is authoritative.
`ldap_server` defines the type of LDAP server to use. This can either be an LDAP server provided
by the Active Directory Domain (ad) or a stand-alone LDAP server.
`bind_path_user` specfies the search base where user objects can be found in the LDAP server.
`bind_path_group` specifies the search base where group objects can be found in the LDAP server.
`user_cn` query cn attribute instead of uid attribute for the user name in LDAP.
`realm` append @realm to cn for groups (and users if user_cn is set) in LDAP queries.
`ldmap_domain` when using the LDAP server in the Active Directory server, this allows one to
specify the domain where to access the Active Directory server. This allows using trust relationships
while keeping all RFC 2307 records in one place. This parameter is optional, the default is to access
the AD server in the current domain to query LDAP records.
`ldap_url` when using a stand-alone LDAP server, this parameter specifies the LDAP URL for accessing the LDAP server.
`ldap_user_dn` defines the user DN to be used for authentication.
`ldap_user_dn_password` is the password to be used for LDAP authentication.
`realm` defines the realm to use in the user and group names. This is only required when using cn_realm together with
a stand-alone ldap server.
`RID` backend options:
`sssd_compat` generate idmap low range based on same algorithm that SSSD uses by default.
"""
verrors = ValidationErrors()
if 'options' not in data:
data['options'] = {}
old = await self.query()
if data['name'] in [x['name'] for x in old]:
verrors.add('idmap_domain_create.name', 'Domain names must be unique.')
if data['options'].get('sssd_compat'):
status = (await self.middleware.call('directoryservices.status'))['status']
if status != 'HEALTHY':
verrors.add('idmap_domain_create.options',
'AD service must be enabled and started to '
'generate an SSSD-compatible id range')
verrors.check()
data['range_low'] = await self.get_sssd_low_range(data['name'])
data['range_high'] = data['range_low'] + 100000000
await self.validate('idmap_domain_create', data, verrors)
await self.validate_options('idmap_domain_create', data, verrors)
if data.get('certificate_id') and not data['options'].get('ssl'):
verrors.add('idmap_domain_create.certificate_id',
f'The {data["idmap_backend"]} idmap backend does not '
'generate LDAP traffic. Certificates do not apply.')
verrors.check()
if data['options'].get('ldap_user_dn_password'):
try:
DSType[data["name"]]
domain = (await self.middleware.call("smb.config"))['workgroup']
except KeyError:
domain = data["name"]
client_conf = await self.idmap_conf_to_client_config(data)
await self.middleware.call(
'ldapclient.validate_credentials',
client_conf
)
secret = data['options'].pop('ldap_user_dn_password')
await self.middleware.call(
'directoryservices.secrets.set_ldap_idmap_secret',
domain, data['options']['ldap_user_dn'], secret
)
await self.middleware.call('directoryservices.secrets.backup')
final_options = IdmapBackend[data['idmap_backend']].defaults()
final_options.update(data['options'])
data['options'] = final_options
id_ = await self.middleware.call(
'datastore.insert', self._config.datastore,
data, {'prefix': self._config.datastore_prefix}
)
out = await self.query([('id', '=', id_)], {'get': True})
await self.middleware.call('etc.generate', 'smb')
await self.middleware.call('service.restart', 'idmap')
return out
async def do_update(self, id_, data):
"""
Update a domain by id.
"""
old = await self.query([('id', '=', id_)], {'get': True})
new = old.copy()
new.update(data)
if data.get('idmap_backend') and data['idmap_backend'] != old['idmap_backend']:
"""
Remove options from previous backend because they are almost certainly
not valid for the new backend.
"""
new['options'] = data.get('options', {})
else:
new['options'] = old['options'].copy() | data.get('options', {})
tmp = data.copy()
verrors = ValidationErrors()
if old['name'] in [x.name for x in DSType] and old['name'] != new['name']:
verrors.add('idmap_domain_update.name',
f'Changing name of default domain {old["name"]} is not permitted')
if new['options'].get('sssd_compat') and not old['options'].get('sssd_compat'):
ds_state = await self.middleware.call('directoryservices.get_state')
if ds_state['activedirectory'] != 'HEALTHY':
verrors.add('idmap_domain_update.options',
'AD service must be enabled and started to '
'generate an SSSD-compatible id range')
verrors.check()
new['range_low'] = await self.get_sssd_low_range(new['name'])
new['range_high'] = new['range_low'] + 100000000
if new['idmap_backend'] == 'AUTORID' and new['name'] != 'DS_TYPE_ACTIVEDIRECTORY':
verrors.add("idmap_domain_update.idmap_backend",
"AUTORID is only permitted for the default idmap backend for "
"the active directory directory service (DS_TYPE_ACTIVEDIRECTORY).")
await self.validate('idmap_domain_update', new, verrors)
await self.validate_options('idmap_domain_update', new, verrors, ['MISSING'])
tmp['idmap_backend'] = new['idmap_backend']
if data.get('options'):
await self.validate_options('idmap_domain_update', tmp, verrors, ['EXTRA'])
if data.get('certificate_id') and not data['options'].get('ssl'):
verrors.add('idmap_domain_update.certificate_id',
f'The {new["idmap_backend"]} idmap backend does not '
'generate LDAP traffic. Certificates do not apply.')
verrors.check()
await self.prune_keys(new)
final_options = IdmapBackend[new['idmap_backend']].defaults() | new['options'].copy()
new['options'] = final_options
if new['options'].get('ldap_user_dn_password'):
try:
DSType[new["name"]]
domain = (await self.middleware.call("smb.config"))['workgroup']
except KeyError:
domain = new["name"]
client_conf = await self.idmap_conf_to_client_config(new)
await self.middleware.call(
'ldapclient.validate_credentials',
client_conf
)
secret = new['options'].pop('ldap_user_dn_password')
await self.middleware.call("directoryservices.secrets.set_ldap_idmap_secret",
domain, new['options']['ldap_user_dn'], secret)
await self.middleware.call("directoryservices.secrets.backup")
await self.middleware.call(
'datastore.update', self._config.datastore,
new['id'], new, {'prefix': self._config.datastore_prefix}
)
out = await self.query([('id', '=', id_)], {'get': True})
await self.middleware.call('etc.generate', 'smb')
cache_job = await self.middleware.call('idmap.clear_idmap_cache')
await cache_job.wait()
return out
async def do_delete(self, id_):
"""
Delete a domain by id. Deletion of default system domains is not permitted.
"""
entry = await self.get_instance(id_)
if entry['name'] in DSType.choices():
raise CallError(f'Deleting system idmap domain [{entry["name"]}] is not permitted.', errno.EPERM)
ret = await self.middleware.call('datastore.delete', self._config.datastore, id_)
await self.middleware.call('etc.generate', 'smb')
return ret
@private
def convert_sids(self, sidlist):
"""
Internal bulk conversion method Windows-style SIDs to Unix IDs (uid or gid)
This ends up being a de-facto wrapper around wbcCtxSidsToUnixIds from
libwbclient (single winbindd request), and so it is the preferred
method of batch conversion.
"""
if not sidlist:
raise CallError("List of SIDS to convert must contain at least one entry")
try:
client = self.__wbclient_ctx()
except wbclient.WBCError as e:
raise CallError(str(e), WBCErr[e.error_code], e.error_code)
mapped = {}
unmapped = {}
to_check = []
server_sid = self.middleware.call_sync('smb.local_server_sid')
netbiosname = self.middleware.call_sync('smb.config')['netbiosname']
for sid in sidlist:
try:
entry = self.__local_sid_to_entry(server_sid, netbiosname, sid, client.separator)
except (KeyError, ValidationErrors):
# This is a Unix SID or a local SID, but account doesn't exist
unmapped.update({sid: sid})
continue
if entry:
mapped[sid] = entry
continue
to_check.append(sid)
# First try to retrieve SIDs via SSSD since SSSD and
# winbind are both running when we are joined to an IPA
# domain. Former provides authoritative SID<->XID resolution
# IPA accounts. The latter is authoritative for local accounts.
if self.middleware.call_sync('directoryservices.status')['type'] == DirectoryServiceType.IPA.value:
if to_check:
sss_ctx = SSSClient()
results = sss_ctx.sids_to_idmap_entries(to_check)
mapped |= results['mapped']
to_check = list(results['unmapped'].keys())
if to_check:
try:
results = client.sids_to_idmap_entries(to_check)
except wbclient.WBCError as e:
raise CallError(str(e), WBCErr[e.error_code], e.error_code)
mapped |= results['mapped']
unmapped |= results['unmapped']
return {'mapped': mapped, 'unmapped': unmapped}
@private
def convert_unixids(self, id_list):
"""
Internal bulk conversion method for Unix IDs (uid or gid) to Windows-style
SIDs. This ends up being a de-facto wrapper around wbcCtxUnixIdsToSids
from libwbclient (single winbindd request), and so it is the preferred
method of batch conversion.
"""
output = {'mapped': {}, 'unmapped': {}}
if not id_list:
return output
if self.middleware.call_sync('directoryservices.status')['type'] == DirectoryServiceType.IPA.value:
try:
dom_info = self.middleware.call_sync('directoryservices.connection.ipa_get_smb_domain_info')
except Exception:
dom_info = None
if dom_info:
sss_ctx = SSSClient()
results = sss_ctx.users_and_groups_to_idmap_entries(id_list)
if not results['unmapped']:
# short-circuit
return results
output['mapped'] = results['mapped']
id_list = []
for entry in results['unmapped'].keys():
id_type, xid = entry.split(':')
xid = int(xid)
if xid >= dom_info['range_id_min'] and xid <= dom_info['range_id_max']:
# ID is provided by SSSD but does not have a SID allocated
# do not include in list to look up via winbind since
# we do not want to introduce potential for hanging for
# the winbind request timeout.
continue
id_list.append({
'id_type': 'USER' if id_type == 'UID' else 'GROUP',
'id': int(xid)
})
if id_list:
try:
client = self.__wbclient_ctx()
results = client.users_and_groups_to_idmap_entries(id_list)
except wbclient.WBCError as e:
raise CallError(str(e), WBCErr[e.error_code], e.error_code)
output['mapped'] |= results['mapped']
output['unmapped'] = results['unmapped']
return output
def __unixsid_to_entry(self, sid, separator):
if not sid.startswith((SID_LOCAL_USER_PREFIX, SID_LOCAL_GROUP_PREFIX)):
return None
if sid.startswith(SID_LOCAL_USER_PREFIX):
uid = int(sid[len(SID_LOCAL_USER_PREFIX):])
u = self.middleware.call_sync('user.get_user_obj', {'uid': uid})
return {
'name': f'Unix User{separator}{u["pw_name"]}',
'id': uid,
'id_type': IDType.USER.name,
'sid': sid
}
gid = int(sid[len(SID_LOCAL_GROUP_PREFIX):])
g = self.middleware.call_sync('group.get_group_obj', {'gid': gid})
return {
'name': f'Unix Group{separator}{g["gr_name"]}',
'id': gid,
'id_type': IDType.GROUP.name,
'sid': sid
}
def __local_sid_to_entry(self, server_sid, netbiosname, sid, separator):
"""
Attempt to resolve SID to an ID entry without querying winbind or
SSSD for it. This should be possible for local user accounts.
"""
if (entry := self.__unixsid_to_entry(sid, separator)) is not None:
return entry
if not sid.startswith(server_sid):
return None
rid = get_domain_rid(sid)
if rid == DomainRid.ADMINS:
return {
'name': f'{netbiosname}{separator}{SMBBuiltin.ADMINISTRATORS.nt_name}',
'id': SMBBuiltin.ADMINISTRATORS.rid,
'id_type': IDType.GROUP.name,
'sid': sid,
}
elif rid == DomainRid.GUESTS:
return {
'name': f'{netbiosname}{separator}{SMBBuiltin.GUESTS.nt_name}',
'id': SMBBuiltin.GUESTS.rid,
'id_type': IDType.GROUP.name,
'sid': sid,
}
elif rid > BASE_RID_GROUP:
id_type = IDType.GROUP.name
method = 'group.get_instance'
xid_key = 'gid'
name_key = 'name'
db_id = rid - BASE_RID_GROUP
elif rid > BASE_RID_USER:
id_type = IDType.USER.name
method = 'user.get_instance'
xid_key = 'uid'
name_key = 'username'
db_id = rid - BASE_RID_USER
else:
# Log an error message and fall through to winbind or sssd to resolve it
self.logger.warning('%s: unexpected local SID value', sid)
return None
entry = self.middleware.call_sync(method, db_id)
return {
'name': f'{netbiosname}{separator}{entry[name_key]}',
'id': entry[xid_key],
'id_type': id_type,
'sid': sid
}
@private
@filterable
async def builtins(self, filters, options):
out = []
idmap_backend = await self.middleware.call("smb.getparm", "idmap config * : backend", "GLOBAL")
if idmap_backend != "tdb":
"""
idmap_autorid and potentially other allocating idmap backends may be used for
the default domain.
"""
return []
idmap_range = await self.middleware.call("smb.getparm", "idmap config * : range", "GLOBAL")
low_range = int(idmap_range.split("-")[0].strip())
for idx, entry in enumerate(WELL_KNOWN_SIDS):
finalized_entry = entry.copy()
finalized_entry.update({
'id': idx,
'gid': low_range + 3 + idx
})
out.append(finalized_entry)
return filter_list(out, filters, options)
@private
async def id_to_name(self, xid, id_type):
"""
Helper method to retrieve the name for the specified uid or gid. This method
passes through user.query or group.query rather than user.get_user_obj or
group.get_group_obj because explicit request for a uid / gid will trigger
a directory service cache insertion if it does not already exist. This allows
some lazily fill cache if enumeration for directory services is disabled.
"""
idtype = IDType[id_type]
idmap_timeout = 5.0
match idtype:
# IDType.BOTH is possible return by nss_winbind / nss_sss
# and is special case when idmapping backend converts a SID
# to both a user and a group. For most practical purposes it
# can be treated interally as a group.
case IDType.GROUP | IDType.BOTH:
method = 'group.query'
filters = [['gid', '=', xid]]
key = 'group'
case IDType.USER:
method = 'user.query'
filters = [['uid', '=', xid]]
key = 'username'
case _:
raise CallError(f"Unsupported id_type: [{idtype.name}]")
try:
ret = await asyncio.wait_for(
self.middleware.create_task(self.middleware.call(method, filters, {'get': True, 'order_by': [key]})),
timeout=idmap_timeout
)
name = ret[key]
except asyncio.TimeoutError:
self.logger.debug(
"timeout encountered while trying to convert %s id %d "
"to name. This may indicate significant networking issue.",
id_type.lower(), xid
)
name = None
except MatchNotFound:
name = None
return name
@private
async def has_id_type_both(self, xid):
"""
Check whether xid comes from domain that returns ID_TYPE_BOTH from idmap
backend. In this case the xid is both a user and a group with special
behavior in OS.
"""
domains = await self.query()
for d in domains:
if d['name'] == 'DS_TYPE_LDAP':
continue
if xid >= d['range_low'] and xid <= d['range_high']:
# currently in AD case these are only two supported backends that provide
# this special ID type
return d['idmap_backend'] in ['AUTORID', 'RID']
return False
@private
async def synthetic_user(self, passwd, sid):
match passwd['source']:
case 'LOCAL':
# local user, should be retrieved via user.query
return None
case 'ACTIVEDIRECTORY':
id_type_both = await self.has_id_type_both(passwd['pw_uid'])
case _:
id_type_both = False
return {
'id': BASE_SYNTHETIC_DATASTORE_ID + passwd['pw_uid'],
'uid': passwd['pw_uid'],
'username': passwd['pw_name'],
'unixhash': None,
'smbhash': None,
'group': {},
'home': passwd['pw_dir'],
'shell': passwd['pw_shell'],
'full_name': passwd['pw_gecos'],
'builtin': False,
'email': None,
'password_disabled': False,
'locked': False,
'sudo_commands': [],
'sudo_commands_nopasswd': [],
'attributes': {},
'groups': [],
'sshpubkey': None,
'local': False,
'id_type_both': id_type_both,
'roles': [],
'api_keys': [],
'two_factor_auth_configured': False,
'immutable': True,
'smb': True,
'sid': sid
}
@private
async def synthetic_group(self, grp, sid):
match grp['source']:
case 'LOCAL':
# local group, should be retrieved via group.query
return None
case 'ACTIVEDIRECTORY':
id_type_both = await self.has_id_type_both(grp['gr_gid'])
case _:
id_type_both = False
return {
'id': BASE_SYNTHETIC_DATASTORE_ID + grp['gr_gid'],
'gid': grp['gr_gid'],
'name': grp['gr_name'],
'group': grp['gr_name'],
'builtin': False,
'sudo_commands': [],
'sudo_commands_nopasswd': [],
'users': [],
'local': False,
'id_type_both': id_type_both,
'roles': [],
'smb': True,
'sid': sid
}
| 52,618 | Python | .py | 1,087 | 36.940202 | 140 | 0.581772 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,502 | sysdataset.py | truenas_middleware/src/middlewared/middlewared/plugins/sysdataset.py | import errno
import json
import os
import shutil
import subprocess
import threading
import uuid
from contextlib import contextmanager, suppress
from pathlib import Path
import middlewared.sqlalchemy as sa
from middlewared.plugins.boot import BOOT_POOL_NAME_VALID
from middlewared.plugins.system_dataset.hierarchy import get_system_dataset_spec
from middlewared.plugins.system_dataset.utils import SYSDATASET_PATH
from middlewared.schema import accepts, Bool, Dict, Int, returns, Str
from middlewared.service import CallError, ConfigService, ValidationErrors, job, private
from middlewared.service_exception import InstanceNotFound
from middlewared.utils import filter_list, MIDDLEWARE_RUN_DIR
from middlewared.utils.directoryservices.constants import DSStatus, DSType
from middlewared.utils.size import format_size
from middlewared.utils.tdb import close_sysdataset_tdb_handles
class SystemDatasetModel(sa.Model):
__tablename__ = 'system_systemdataset'
id = sa.Column(sa.Integer(), primary_key=True)
sys_pool = sa.Column(sa.String(1024))
sys_uuid = sa.Column(sa.String(32))
sys_uuid_b = sa.Column(sa.String(32), nullable=True)
class SystemDatasetService(ConfigService):
class Config:
datastore = 'system.systemdataset'
datastore_extend = 'systemdataset.config_extend'
datastore_prefix = 'sys_'
cli_namespace = 'system.system_dataset'
ENTRY = Dict(
'systemdataset_entry',
Int('id', required=True),
Str('pool', required=True),
Bool('pool_set', required=True),
Str('uuid', required=True),
Str('basename', required=True),
Str('path', required=True, null=True),
)
force_pool = None
sysdataset_release_lock = threading.Lock()
@private
def sysdataset_path(self, expected_datasetname=None):
"""
This function returns either None or SYSDATASET_PATH,
and is called potentially quite frequently (once per ZFS event
or pool.dataset.query, etc).
Best case scenario we have one cache lookup and one statvfs() call.
Worst case, a mount_info lookup is added to the mix.
`None` indicates that there was an issue with filesystem mounted
at SYSDATASET_PATH. Typically this could indicate a failed migration
of system dataset or problem importing expected pool for system dataset.
Heuristic for wrong path is to first check result we cached from last time
we checked the past. If dataset name matches, then perform an statvfs() on
SYSDATASET_PATH to verify that the FSID matches.
If we lack a cache entry, then look up SYSDATASET_PATH in mountinfo
and make sure the two match. If they don't None is returned.
If the mountinfo and expected value match, cache fsid and dataset name.
"""
if expected_datasetname is None:
db_pool = self.middleware.call_sync(
'datastore.config',
'system.systemdataset'
)['sys_pool']
pool = self.force_pool or db_pool or self.middleware.call_sync('boot.pool_name')
ds_name = f'{pool}/.system'
else:
ds_name = expected_datasetname
try:
cached_entry = self.middleware.call_sync('cache.get', 'SYSDATASET_PATH')
except KeyError:
cached_entry = None
try:
fsid = os.statvfs(SYSDATASET_PATH).f_fsid
except FileNotFoundError:
# SYSDATASET_PATH may not exist on first boot. Do not log.
return None
except OSError:
self.logger.warning('Failed to stat sysdataset fd', exc_info=True)
return None
if cached_entry and cached_entry['dataset'] == ds_name:
if fsid == cached_entry['fsid']:
return SYSDATASET_PATH
mntinfo = self.middleware.call_sync(
'filesystem.mount_info',
[['mountpoint', '=', SYSDATASET_PATH]]
)
if not mntinfo:
self.logger.warning('%s: mountpoint not found', SYSDATASET_PATH)
return None
if mntinfo[0]['mount_source'] != ds_name:
self.logger.warning('Unexpected dataset mounted at %s, %r present, but %r expected. fsid: %d',
SYSDATASET_PATH, mntinfo[0]['mount_source'], ds_name, fsid)
return None
self.middleware.call_sync('cache.put', 'SYSDATASET_PATH', {'dataset': ds_name, 'fsid': fsid})
return SYSDATASET_PATH
@private
async def config_extend(self, config):
# Treat empty system dataset pool as boot pool
config['pool_set'] = bool(config['pool'])
config['pool'] = self.force_pool or config['pool'] or await self.middleware.call('boot.pool_name')
config['basename'] = f'{config["pool"]}/.system'
# Make `uuid` point to the uuid of current node
uuid_key = 'uuid'
if await self.middleware.call('failover.node') == 'B':
uuid_key = 'uuid_b'
config['uuid'] = config['uuid_b']
del config['uuid_b']
if not config['uuid']:
config['uuid'] = uuid.uuid4().hex
await self.middleware.call(
'datastore.update', 'system.systemdataset', config['id'], {uuid_key: config['uuid']}, {'prefix': 'sys_'}
)
config['path'] = await self.middleware.run_in_thread(self.sysdataset_path, config['basename'])
return config
@private
async def ensure_standby_uuid(self):
remote_uuid_key = 'uuid_b'
if await self.middleware.call('failover.node') == 'B':
remote_uuid_key = 'uuid'
local_config = await self.middleware.call('datastore.config', 'system.systemdataset', {'prefix': 'sys_'})
if local_config[remote_uuid_key]:
self.logger.debug('We already know the standby controller system dataset UUID')
return
remote_config = await self.middleware.call(
'failover.call_remote', 'datastore.config', ['system.systemdataset', {'prefix': 'sys_'}],
)
if not remote_config[remote_uuid_key]:
self.logger.warning('Standby controller does not yet have the system dataset UUID')
return
self.logger.info(f'Setting {remote_uuid_key}={remote_config[remote_uuid_key]!r}')
await self.middleware.call(
'datastore.update',
'system.systemdataset',
local_config['id'],
{remote_uuid_key: remote_config[remote_uuid_key]},
{'prefix': 'sys_'},
)
@private
async def is_boot_pool(self):
pool = (await self.config())['pool']
if not pool:
raise CallError('System dataset pool is not set. This may prevent '
'system services from functioning properly.')
return pool in BOOT_POOL_NAME_VALID
@accepts(Bool('include_current_pool', default=True))
@returns(Dict('systemdataset_pool_choices', additional_attrs=True))
async def pool_choices(self, include_current_pool):
"""
Retrieve pool choices which can be used for configuring system dataset.
"""
boot_pool = await self.middleware.call('boot.pool_name')
current_pool = (await self.config())['pool']
valid_pools = await self.query_pools_names_for_system_dataset()
pools = [boot_pool]
if include_current_pool:
pools.append(current_pool)
pools.extend(valid_pools)
return {
p: p for p in sorted(set(pools))
}
@private
async def _post_setup_service_restart(self):
await self.middleware.call('smb.setup_directories')
# The following should be backgrounded since they may be quite
# long-running.
await self.middleware.call('smb.configure', False)
@accepts(Dict(
'sysdataset_update',
Str('pool', null=True),
Str('pool_exclude', null=True),
update=True
))
@job(lock='sysdataset_update')
async def do_update(self, job, data):
"""
Update System Dataset Service Configuration.
`pool` is the name of a valid pool configured in the system which will be used to host the system dataset.
`pool_exclude` can be specified to make sure that we don't place the system dataset on that pool if `pool`
is not provided.
"""
data.setdefault('pool_exclude', None)
config = await self.config()
new = config.copy()
new.update(data)
verrors = ValidationErrors()
if new['pool'] != config['pool']:
system_ready = await self.middleware.call('system.ready')
ds = await self.middleware.call('directoryservices.status')
if system_ready and ds['type'] == DSType.AD.value and ds['status'] == DSStatus.HEALTHY.name:
verrors.add(
'sysdataset_update.pool',
'System dataset location may not be moved while the Active Directory service is enabled.',
errno.EPERM
)
if new['pool']:
if error := await self.destination_pool_error(new['pool']):
verrors.add('sysdataset_update.pool', error)
if new['pool']:
if new['pool'] not in await self.pool_choices(False):
verrors.add(
'sysdataset_update.pool',
'The system dataset cannot be placed on this pool.'
)
else:
for pool in await self.query_pools_names_for_system_dataset(data['pool_exclude']):
if await self.destination_pool_error(pool):
continue
new['pool'] = pool
break
else:
# If a data pool could not be found, reset it to blank
# Which will eventually mean its back to boot pool (temporarily)
new['pool'] = ''
verrors.check()
update_dict = {k: v for k, v in new.items() if k in ['pool']}
await self.middleware.call(
'datastore.update',
'system.systemdataset',
config['id'],
update_dict,
{'prefix': 'sys_'}
)
new = await self.config()
if config['pool'] != new['pool']:
await self.middleware.call('systemdataset.migrate', config['pool'], new['pool'])
await self.middleware.call('systemdataset.setup', data['pool_exclude'])
if await self.middleware.call('failover.licensed'):
if await self.middleware.call('failover.status') == 'MASTER':
try:
await self.middleware.call(
'failover.call_remote', 'system.reboot', ['Failover system dataset change'],
)
except Exception as e:
self.logger.debug('Failed to reboot standby storage controller after system dataset change: %s', e)
return await self.config()
@private
async def destination_pool_error(self, new_pool):
config = await self.config()
try:
existing_dataset = await self.middleware.call('zfs.dataset.get_instance', config['basename'])
except InstanceNotFound:
return
used = existing_dataset['properties']['used']['parsed']
try:
new_dataset = await self.middleware.call('zfs.dataset.get_instance', new_pool)
except InstanceNotFound:
return f'Dataset {new_pool} does not exist'
available = new_dataset['properties']['available']['parsed']
# 1.1 is a safety margin because same files won't take exactly the same amount of space on a different pool
used = int(used * 1.1)
if available < used:
return (
f'Insufficient disk space available on {new_pool} ({format_size(available)}). '
f'Need {format_size(used)}'
)
@accepts(Str('exclude_pool', default=None, null=True))
@private
def setup(self, exclude_pool):
self.middleware.call_hook_sync('sysdataset.setup', data={'in_progress': True})
try:
return self.setup_impl(exclude_pool)
finally:
self.middleware.call_hook_sync('sysdataset.setup', data={'in_progress': False})
@private
def setup_impl(self, exclude_pool):
self.force_pool = None
config = self.middleware.call_sync('systemdataset.config')
boot_pool = self.middleware.call_sync('boot.pool_name')
# If the system dataset is configured in a data pool we need to make sure it exists.
# In case it does not we need to use another one.
filters = [('name', '=', config['pool'])]
if config['pool'] != boot_pool and not self.middleware.call_sync('pool.query', filters):
self.logger.debug('Pool %r does not exist, moving system dataset to another pool', config['pool'])
job = self.middleware.call_sync('systemdataset.update', {'pool': None, 'pool_exclude': exclude_pool})
job.wait_sync()
if job.error:
raise CallError(job.error)
return
# If we dont have a pool configured in the database try to find the first data pool
# to put it on.
if not config['pool_set']:
if pool := self.query_pool_for_system_dataset(exclude_pool):
self.logger.debug('Sysdataset pool was not set, moving it to first available pool %r', pool['name'])
job = self.middleware.call_sync('systemdataset.update', {'pool': pool['name']})
job.wait_sync()
if job.error:
raise CallError(job.error)
self.middleware.call_sync('systemdataset._post_setup_service_restart')
return
mntinfo = self.middleware.call_sync('filesystem.mount_info')
if config['pool'] != boot_pool:
if not any(filter_list(mntinfo, [['mount_source', '=', config['pool']]])):
ds = self.middleware.call_sync('zfs.dataset.query', [['id', '=', config['basename']]])
if not ds:
# Pool is not mounted (e.g. HA node B), temporary set up system dataset on the boot pool
msg = 'Root dataset for pool %r is not available, and dataset %r does not exist, '
msg += 'temporarily setting up system dataset on boot pool'
self.logger.debug(msg, config['pool'], config['basename'])
self.force_pool = boot_pool
config = self.middleware.call_sync('systemdataset.config')
elif ds[0]['encrypted'] and ds[0]['locked'] and ds[0]['key_format']['value'] != 'PASSPHRASE':
self.logger.debug(
'Root dataset for pool %r is not available, temporarily setting up system dataset on boot pool',
config['pool'],
)
self.force_pool = boot_pool
config = self.middleware.call_sync('systemdataset.config')
else:
self.logger.debug('Root dataset for pool %r is not available, but system dataset may be manually '
'mounted. Proceeding with normal setup.', config['pool'])
mounted_pool = mounted = None
sysds_mntinfo = filter_list(mntinfo, [['mountpoint', '=', '/var/db/system']])
if sysds_mntinfo:
mounted_pool = sysds_mntinfo[0]['mount_source'].split('/')[0]
if mounted_pool and mounted_pool.split('/')[0] != config['pool']:
self.logger.debug('Abandoning dataset on %r in favor of %r', mounted_pool, config['pool'])
with self.release_system_dataset():
self.__umount(mounted_pool, config['uuid'])
self.middleware.call_sync('systemdataset.setup_datasets', config['pool'], config['uuid'])
mounted = self.__mount(config['pool'], config['uuid'])
else:
self.middleware.call_sync('systemdataset.setup_datasets', config['pool'], config['uuid'])
# refresh our mountinfo in case it changed
mntinfo = self.middleware.call_sync('filesystem.mount_info')
sysds_mntinfo = filter_list(mntinfo, [['mountpoint', "=", SYSDATASET_PATH]])
if not os.path.isdir(SYSDATASET_PATH) and os.path.exists(SYSDATASET_PATH):
os.unlink(SYSDATASET_PATH)
os.makedirs(SYSDATASET_PATH, mode=0o755, exist_ok=True)
ds_mntinfo = filter_list(mntinfo, [['mount_source', '=', config['basename']]])
if ds_mntinfo:
acl_enabled = 'POSIXACL' in ds_mntinfo[0]['super_opts'] or 'NFSV4ACL' in ds_mntinfo[0]['super_opts']
else:
ds = self.middleware.call_sync('zfs.dataset.query', [('id', '=', config['basename'])])
acl_enabled = ds and ds[0]['properties']['acltype']['value'] != 'off'
if acl_enabled:
self.middleware.call_sync(
'zfs.dataset.update', config['basename'], {'properties': {'acltype': {'value': 'off'}}}
)
if mounted is None:
mounted = self.__mount(config['pool'], config['uuid'])
corepath = f'{SYSDATASET_PATH}/cores'
if os.path.exists(corepath):
if self.middleware.call_sync('keyvalue.get', 'run_migration', False):
try:
cores = Path(corepath)
for corefile in cores.iterdir():
corefile.unlink()
except Exception:
self.logger.warning("Failed to clear old core files.", exc_info=True)
subprocess.run(['umount', '/var/lib/systemd/coredump'], check=False)
os.makedirs('/var/lib/systemd/coredump', exist_ok=True)
subprocess.run(['mount', '--bind', corepath, '/var/lib/systemd/coredump'])
if mounted:
self.middleware.call_sync('systemdataset._post_setup_service_restart')
return self.middleware.call_sync('systemdataset.config')
@private
def query_pool_for_system_dataset(self, exclude_pool):
for p in self.middleware.call_sync('zfs.pool.query_imported_fast').values():
if exclude_pool and p['name'] == exclude_pool:
continue
ds = self.middleware.call_sync(
'pool.dataset.query',
[['id', '=', p['name']]],
{'extra': {'retrieve_children': False}}
)
if not ds:
continue
if not ds[0]['encrypted'] or not ds[0]['locked'] or ds[0]['key_format']['value'] == 'PASSPHRASE':
return p
@private
async def query_pools_names_for_system_dataset(self, exclude_pool=None):
"""
Pools with passphrase-locked root level datasets are permitted as system
dataset targets. This is because ZFS encryption is at the dataset level
rather than pool level, and we use a legacy mount for the system dataset.
Key format is only exposed via libzfs and so reading mountinfo here is
insufficient.
"""
pools = []
for p in (await self.middleware.call('zfs.pool.query_imported_fast')).values():
if exclude_pool and p['name'] == exclude_pool:
continue
ds = await self.middleware.call(
'pool.dataset.query',
[['id', '=', p['name']]],
{'extra': {'retrieve_children': False}}
)
if not ds:
continue
if not ds[0]['encrypted'] or not ds[0]['locked'] or ds[0]['key_format']['value'] == 'PASSPHRASE':
pools.append(p['name'])
return pools
@private
async def setup_datasets(self, pool, uuid):
"""
Make sure system datasets for `pool` exist and have the right mountpoint property
"""
boot_pool = await self.middleware.call('boot.pool_name')
root_dataset_is_passphrase_encrypted = (
pool != boot_pool and
(await self.middleware.call('pool.dataset.get_instance', pool))['key_format']['value'] == 'PASSPHRASE'
)
datasets = {i['name']: i for i in get_system_dataset_spec(pool, uuid)}
datasets_prop = {
i['id']: i['properties']
for i in await self.middleware.call('zfs.dataset.query', [('id', 'in', list(datasets))])
}
for dataset, config in datasets.items():
props = config['props']
# Disable encryption for pools with passphrase-encrypted root datasets so that system dataset could be
# automatically mounted on system boot.
if root_dataset_is_passphrase_encrypted:
props['encryption'] = 'off'
is_cores_ds = dataset.endswith('/cores')
if is_cores_ds:
props['quota'] = '1G'
if dataset not in datasets_prop:
await self.middleware.call('zfs.dataset.create', {
'name': dataset,
'properties': props,
})
elif is_cores_ds and datasets_prop[dataset]['used']['parsed'] >= 1024 ** 3:
try:
await self.middleware.call('zfs.dataset.delete', dataset, {'force': True, 'recursive': True})
await self.middleware.call('zfs.dataset.create', {
'name': dataset,
'properties': props,
})
except Exception:
self.logger.warning("Failed to replace dataset [%s].", dataset, exc_info=True)
else:
update_props_dict = {
k: {'value': v} for k, v in props.items()
if datasets_prop[dataset][k]['value'] != v
}
if update_props_dict:
await self.middleware.call(
'zfs.dataset.update',
dataset,
{'properties': update_props_dict},
)
try:
await self.middleware.run_in_thread(self.__create_relevant_paths, config.get('create_paths', []))
except Exception:
self.logger.error('Failed to create relevant paths for %r', dataset, exc_info=True)
def __create_relevant_paths(self, create_paths):
for create_path_config in create_paths:
os.makedirs(create_path_config['path'], exist_ok=True)
cpath_stat = os.stat(create_path_config['path'])
if all(create_path_config[k] for k in ('uid', 'gid')) and (
cpath_stat.st_uid != create_path_config['uid'] or cpath_stat.st_gid != create_path_config['gid']
):
os.chown(create_path_config['path'], create_path_config['uid'], create_path_config['gid'])
def __mount(self, pool, uuid, path=SYSDATASET_PATH):
"""
Mount group of datasets associated with our system dataset.
`path` will be either SYSDATASET_PATH or temp dir in the middlewared
rundir. The latter occurs when migrating dataset between pools.
"""
mounted = False
for ds_config in get_system_dataset_spec(pool, uuid):
dataset, name = ds_config['name'], os.path.basename(ds_config['name'])
mountpoint = ds_config.get('mountpoint', f'{SYSDATASET_PATH}/{name}').replace(SYSDATASET_PATH, path)
if os.path.ismount(mountpoint):
continue
with suppress(FileExistsError):
os.mkdir(mountpoint)
subprocess.run(['mount', '-t', 'zfs', dataset, mountpoint], check=True)
chown_config = ds_config['chown_config']
mode_perms = chown_config.pop('mode')
mountpoint_stat = os.stat(mountpoint)
if mountpoint_stat.st_uid != chown_config['uid'] or mountpoint_stat.st_gid != chown_config['gid']:
os.chown(mountpoint, **chown_config)
if (mountpoint_stat.st_mode & 0o777) != mode_perms:
os.chmod(mountpoint, mode_perms)
mounted = True
if path == SYSDATASET_PATH:
self.__post_mount_actions(ds_config['name'], ds_config.get('post_mount_actions', []))
if mounted and path == SYSDATASET_PATH:
fsid = os.statvfs(SYSDATASET_PATH).f_fsid
self.middleware.call_sync('cache.put', 'SYSDATASET_PATH', {'dataset': f'{path}/.system', 'fsid': fsid})
return mounted
def __post_mount_actions(self, ds_name, actions):
for action in actions:
try:
self.middleware.call_sync(action['method'], *action.get('args', []))
except Exception:
self.logger.error(
'Failed to run post mount action %r endpoint for %r dataset',
action['method'], ds_name, exc_info=True,
)
else:
self.logger.info(
'Successfully ran post mount action %r endpoint for %r dataset', action['method'], ds_name
)
def __umount(self, pool, uuid, retry=True):
"""
Umount the group of datasets associated with the system dataset.
When migrating between system datasets, `pool` will be filesystem
mounted in middleware rundir for one of the umount calls.
This is why mount info is checked before manipulating sysdataset_path.
"""
current = self.middleware.call_sync('filesystem.mount_info', [['mountpoint', '=', SYSDATASET_PATH]])
if current and current[0]['mount_source'].split('/')[0] == pool:
try:
self.middleware.call_sync('cache.pop', 'SYSDATASET_PATH')
except KeyError:
pass
if not (mntinfo := self.middleware.call_sync('filesystem.mount_info', [['mount_source', '=', f'{pool}/.system']])):
# Pool's system dataset not mounted
return
mp = mntinfo[0]['mountpoint']
if retry:
flags = '-f' if not self.middleware.call_sync('failover.licensed') else '-l'
else:
# We're doing a retry and have logged a warning message pointing fingers
# at offending processes so that a dev can hopefully fix it later on.
flags = '-lf'
try:
subprocess.run(['umount', flags, '--recursive', mp], check=True, capture_output=True)
except subprocess.CalledProcessError as e:
stderr = e.stderr.decode()
if 'no mount point specified' in stderr:
return
else:
return
error = f'Unable to umount {mp}: {stderr}'
if 'target is busy' in stderr:
# error message is of format "umount: <mountpoint>: target is busy"
ds_mp = stderr.split(':')[1].strip()
processes = self.middleware.call_sync('pool.dataset.processes_using_paths', [ds_mp], True, True)
if retry:
self.logger.warning("The following processes are using %s: %s",
ds_mp, json.dumps(processes, indent=2))
return self.__umount(pool, uuid, False)
error += f'\nThe following processes are using {ds_mp!r}: ' + json.dumps(processes, indent=2)
raise CallError(error) from None
@private
def migrate(self, _from, _to):
"""
Migrate system dataset to a new pool. If it is moving from
an existing pool, then the new datasets are mounted in
the middleware rundir temprorarily so that data can be
rsynced from the old pool.
"""
config = self.middleware.call_sync('systemdataset.config')
os.makedirs(SYSDATASET_PATH, mode=0o755, exist_ok=True)
self.middleware.call_sync('systemdataset.setup_datasets', _to, config['uuid'])
if _from:
path = f'{MIDDLEWARE_RUN_DIR}/system.new'
if not os.path.exists(f'{MIDDLEWARE_RUN_DIR}/system.new'):
os.mkdir(f'{MIDDLEWARE_RUN_DIR}/system.new')
else:
# Make sure we clean up any previous attempts
subprocess.run(['umount', '-R', path], check=False)
else:
path = SYSDATASET_PATH
self.__mount(_to, config['uuid'], path=path)
# context manager handles service stop / restart
with self.release_system_dataset():
if _from:
cp = subprocess.run(
['rsync', '-az', f'{SYSDATASET_PATH}/', f'{MIDDLEWARE_RUN_DIR}/system.new'],
check=False,
capture_output=True
)
if cp.returncode == 0:
# Let's make sure that we don't have coredump directory mounted
subprocess.run(['umount', '/var/lib/systemd/coredump'], check=False)
self.__umount(_from, config['uuid'])
self.__umount(_to, config['uuid'])
self.__mount(_to, config['uuid'], SYSDATASET_PATH)
proc = subprocess.Popen(f'zfs list -H -o name {_from}/.system|xargs zfs destroy -r', shell=True)
proc.communicate()
os.rmdir(f'{MIDDLEWARE_RUN_DIR}/system.new')
else:
raise CallError(f'Failed to rsync from {SYSDATASET_PATH}: {cp.stderr.decode()}')
@contextmanager
@private
def release_system_dataset(self):
"""
This context manager is used to toggle system-dataset dependent services and
tasks for cases where the dataset is unmounted / remounted.
The operations are performed under a lock because systemdataset.update() and
systemdataset.setup() both can lead to this being called, and we don't want
simultaneous releases of system dataset.
"""
with self.sysdataset_release_lock:
# TODO: Review these services because /var/log no longer sits on
# the system dataset so any service that could potentially open
# a file descriptor underneath /var/log will no longer need to be
# stopped/restarted to allow the system dataset to migrate
restart = ['netdata']
if self.middleware.call_sync('service.started', 'nfs'):
restart.append('nfs')
if self.middleware.call_sync('service.started', 'cifs'):
restart.insert(0, 'cifs')
if self.middleware.call_sync('service.started', 'open-vm-tools'):
restart.append('open-vm-tools')
if self.middleware.call_sync('service.started', 'idmap'):
restart.append('idmap')
if self.middleware.call_sync('service.started', 'nmbd'):
restart.append('nmbd')
if self.middleware.call_sync('service.started', 'wsdd'):
restart.append('wsdd')
try:
for i in restart:
self.middleware.call_sync('service.stop', i)
close_sysdataset_tdb_handles()
yield
finally:
restart.reverse()
for i in restart:
self.middleware.call_sync('service.start', i)
@private
def get_system_dataset_spec(self, pool, uid):
return get_system_dataset_spec(pool, uid)
async def pool_post_create(middleware, pool):
if (await middleware.call('systemdataset.config'))['pool'] == await middleware.call('boot.pool_name'):
await middleware.call('systemdataset.setup')
async def pool_post_import(middleware, pool):
"""
On pool import we may need to reconfigure system dataset.
"""
await middleware.call('systemdataset.setup')
async def pool_pre_export(middleware, pool, options, job):
sysds = await middleware.call('systemdataset.config')
if sysds['pool'] == pool:
job.set_progress(40, 'Reconfiguring system dataset')
sysds_job = await middleware.call('systemdataset.update', {
'pool': None, 'pool_exclude': pool,
})
await sysds_job.wait()
if sysds_job.error:
raise CallError(f'This pool contains system dataset, but its reconfiguration failed: {sysds_job.error}')
async def setup(middleware):
def setup_paths():
os.makedirs(SYSDATASET_PATH, mode=0o755, exist_ok=True)
if not os.path.exists('/var/cache/nscd') or not os.path.islink('/var/cache/nscd'):
if os.path.exists('/var/cache/nscd'):
shutil.rmtree('/var/cache/nscd')
os.makedirs('/var/run/nscd/cache', exist_ok=True)
if not os.path.islink('/var/cache/nscd'):
os.symlink('/var/run/nscd/cache', '/var/cache/nscd')
middleware.register_hook('pool.post_create', pool_post_create)
# Reconfigure system dataset first thing after we import a pool.
middleware.register_hook('pool.post_import', pool_post_import, order=-10000)
middleware.register_hook('pool.pre_export', pool_pre_export, order=40, raise_error=True)
try:
await middleware.run_in_thread(setup_paths)
except Exception:
middleware.logger.error('Error moving cache away from boot pool', exc_info=True)
| 33,484 | Python | .py | 655 | 39.105344 | 123 | 0.594671 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,503 | support.py | truenas_middleware/src/middlewared/middlewared/plugins/support.py | import asyncio
import errno
import json
import shutil
import tempfile
import time
import aiohttp
import async_timeout
import requests
from middlewared.pipe import Pipes
from middlewared.plugins.system.utils import DEBUG_MAX_SIZE
from middlewared.schema import accepts, Bool, Dict, Int, List, Password, returns, Str
from middlewared.service import CallError, ConfigService, job, ValidationErrors
import middlewared.sqlalchemy as sa
from middlewared.utils.network import INTERNET_TIMEOUT
from middlewared.validators import Email
from middlewared.utils import PRODUCT
ADDRESS = 'support-proxy.ixsystems.com'
async def post(url, data, timeout=INTERNET_TIMEOUT):
try:
async with async_timeout.timeout(timeout):
async with aiohttp.ClientSession(
raise_for_status=True, trust_env=True,
) as session:
req = await session.post(url, headers={"Content-Type": "application/json"}, data=data)
except asyncio.TimeoutError:
raise CallError('Connection timed out', errno.ETIMEDOUT)
except aiohttp.ClientResponseError as e:
raise CallError(f'Invalid proxy server response: {e}', errno.EBADMSG)
try:
return await req.json()
except aiohttp.client_exceptions.ContentTypeError:
raise CallError('Invalid proxy server response', errno.EBADMSG)
class SupportModel(sa.Model):
__tablename__ = 'system_support'
id = sa.Column(sa.Integer(), primary_key=True)
enabled = sa.Column(sa.Boolean(), nullable=True, default=True)
name = sa.Column(sa.String(200))
title = sa.Column(sa.String(200))
email = sa.Column(sa.String(200))
phone = sa.Column(sa.String(200))
secondary_name = sa.Column(sa.String(200))
secondary_title = sa.Column(sa.String(200))
secondary_email = sa.Column(sa.String(200))
secondary_phone = sa.Column(sa.String(200))
class SupportService(ConfigService):
class Config:
datastore = 'system.support'
cli_namespace = 'system.support'
role_prefix = 'SUPPORT'
ENTRY = Dict(
'support_entry',
Bool('enabled', null=True, required=True),
Str('name', required=True),
Str('title', required=True),
Str('email', required=True),
Str('phone', required=True),
Str('secondary_name', required=True),
Str('secondary_title', required=True),
Str('secondary_email', required=True),
Str('secondary_phone', required=True),
Int('id', required=True),
)
async def do_update(self, data):
"""
Update Proactive Support settings.
"""
config_data = await self.config()
config_data.update(data)
verrors = ValidationErrors()
if config_data['enabled']:
for key in ['name', 'title', 'email', 'phone']:
for prefix in ['', 'secondary_']:
field = prefix + key
if not config_data[field]:
verrors.add(f'support_update.{field}', 'This field is required')
verrors.check()
await self.middleware.call(
'datastore.update',
self._config.datastore,
config_data['id'],
config_data,
)
return await self.config()
@accepts(roles=['SUPPORT_READ'])
@returns(Bool('proactive_support_is_available'))
async def is_available(self):
"""
Returns whether Proactive Support is available for this product type and current license.
"""
if await self.middleware.call('system.vendor.name'):
return False
if not await self.middleware.call('system.is_enterprise'):
return False
license_ = await self.middleware.call('system.license')
if license_ is None:
return False
return license_['contract_type'] in ['SILVER', 'GOLD']
@accepts(roles=['SUPPORT_READ'])
@returns(Bool('proactive_support_is_available_and_enabled'))
async def is_available_and_enabled(self):
"""
Returns whether Proactive Support is available and enabled.
"""
return await self.is_available() and (await self.config())['enabled']
@accepts(roles=['SUPPORT_READ'])
@returns(List('support_fields', items=[List('support_field', items=[Str('field')])]))
async def fields(self):
"""
Returns list of pairs of field names and field titles for Proactive Support.
"""
return [
['name', 'Contact Name'],
['title', 'Contact Title'],
['email', 'Contact E-mail'],
['phone', 'Contact Phone'],
['secondary_name', 'Secondary Contact Name'],
['secondary_title', 'Secondary Contact Title'],
['secondary_email', 'Secondary Contact E-mail'],
['secondary_phone', 'Secondary Contact Phone'],
]
@accepts(Str('query'), roles=['SUPPORT_READ'])
@returns(List('similar_issues', items=[Dict(
'similar_issue',
Str('url'),
Str('summary'),
additional_attrs=True,
)]))
async def similar_issues(self, query):
await self.middleware.call('network.general.will_perform_activity', 'support')
data = await post(
f'https://{ADDRESS}/freenas/api/v1.0/similar_issues',
data=json.dumps({
'query': query,
}),
)
if 'error' in data:
raise CallError(data['message'], errno.EINVAL)
return data
@accepts(Dict(
'new_ticket',
Str('title', required=True, max_length=200),
Str('body', required=True, max_length=20000),
Str('category'),
Bool('attach_debug', default=False),
Password('token'),
Str('type', enum=['BUG', 'FEATURE']),
Str('criticality'),
Str('environment', max_length=None),
Str('phone'),
Str('name'),
Str('email', validators=[Email()]),
List('cc', items=[Str('email', validators=[Email()])])
), roles=['SUPPORT_WRITE', 'READONLY_ADMIN'])
@returns(Dict(
'new_ticket_response',
Int('ticket', null=True),
Str('url', null=True),
Bool('has_debug'),
register=True
))
@job()
async def new_ticket(self, job, data):
"""
Creates a new ticket for support.
This is done using the support proxy API.
For TrueNAS SCALE it will be created on JIRA and for TrueNAS SCALE Enterprise on Salesforce.
For SCALE `criticality`, `environment`, `phone`, `name` and `email` attributes are not required.
For SCALE Enterprise `token` and `type` attributes are not required.
"""
vendor = await self.middleware.call('system.vendor.name')
if vendor:
raise CallError(f'Support is not available for this product ({vendor})', errno.EINVAL)
await self.middleware.call('network.general.will_perform_activity', 'support')
job.set_progress(1, 'Gathering data')
sw_name = 'freenas' if not await self.middleware.call('system.is_enterprise') else 'truenas'
if sw_name == 'freenas':
required_attrs = ('type', 'token')
else:
required_attrs = ('category', 'phone', 'name', 'email', 'criticality', 'environment')
data['serial'] = (await self.middleware.call('system.dmidecode_info'))['system-serial-number']
license_ = await self.middleware.call('system.license')
if license_:
data['company'] = license_['customer_name']
else:
data['company'] = 'Unknown'
for i in required_attrs:
if i not in data:
raise CallError(f'{i} is required', errno.EINVAL)
data['version'] = f'{PRODUCT}-{await self.middleware.call("system.version_short")}'
debug = data.pop('attach_debug')
type_ = data.get('type')
if type_:
data['type'] = type_.lower()
job.set_progress(20, 'Submitting ticket')
result = await post(
f'https://{ADDRESS}/{sw_name}/api/v1.0/ticket',
data=json.dumps(data),
)
if result['error']:
raise CallError(result['message'], errno.EINVAL)
ticket = result.get('ticketnum')
url = result.get('message')
if not ticket:
raise CallError('New ticket number was not informed', errno.EINVAL)
job.set_progress(50, f'Ticket created: {ticket}', extra={'ticket': ticket})
has_debug = False
if debug:
job.set_progress(60, 'Generating debug file')
debug_job = await self.middleware.call(
'system.debug', pipes=Pipes(output=self.middleware.pipe()),
)
if await self.middleware.call('failover.licensed'):
debug_name = 'debug-{}.tar'.format(time.strftime('%Y%m%d%H%M%S'))
else:
debug_name = 'debug-{}-{}.txz'.format(
(await self.middleware.call('system.hostname')).split('.')[0],
time.strftime('%Y%m%d%H%M%S'),
)
with tempfile.NamedTemporaryFile("w+b") as f:
def copy1():
nonlocal has_debug
try:
rbytes = 0
while True:
r = debug_job.pipes.output.r.read(1048576)
if r == b'':
break
rbytes += len(r)
if rbytes > DEBUG_MAX_SIZE * 1048576:
return
f.write(r)
f.seek(0)
has_debug = True
finally:
debug_job.pipes.output.r.read()
await self.middleware.run_in_thread(copy1)
await debug_job.wait()
if has_debug:
job.set_progress(80, 'Attaching debug file')
t = {
'ticket': ticket,
'filename': debug_name,
}
if 'token' in data:
t['token'] = data['token']
tjob = await self.middleware.call(
'support.attach_ticket', t, pipes=Pipes(input_=self.middleware.pipe()),
)
def copy2():
try:
shutil.copyfileobj(f, tjob.pipes.input.w)
finally:
tjob.pipes.input.w.close()
await self.middleware.run_in_thread(copy2)
await tjob.wait()
else:
job.set_progress(100)
return {
'ticket': ticket,
'url': url,
'has_debug': has_debug,
}
@accepts(Dict(
'attach_ticket',
Int('ticket', required=True),
Str('filename', required=True, max_length=None),
Password('token'),
), roles=['SUPPORT_WRITE', 'READONLY_ADMIN'])
@returns()
@job(pipes=["input"])
def attach_ticket(self, job, data):
"""
Method to attach a file to a existing ticket.
"""
self.middleware.call_sync('network.general.will_perform_activity', 'support')
sw_name = 'freenas' if not self.middleware.call_sync('system.is_enterprise') else 'truenas'
data['ticketnum'] = data.pop('ticket')
filename = data.pop('filename')
try:
r = requests.post(
f'https://{ADDRESS}/{sw_name}/api/v1.0/ticket/attachment',
data=data,
timeout=300,
files={'file': (filename, job.pipes.input.r)},
)
except requests.ConnectionError as e:
raise CallError(f'Connection error {e}', errno.EBADF)
except requests.Timeout:
raise CallError('Connection time out', errno.ETIMEDOUT)
try:
data = r.json()
except ValueError:
self.logger.debug(f'Failed to decode ticket attachment response: {r.text}')
raise CallError('Invalid proxy server response', errno.EBADMSG)
if data['error']:
raise CallError(data['message'], errno.EINVAL)
@accepts(roles=['SUPPORT_READ'])
@returns(Int())
async def attach_ticket_max_size(self):
"""
Returns maximum uploaded file size for `support.attach_ticket`
"""
return DEBUG_MAX_SIZE
async def setup(middleware):
await middleware.call('network.general.register_activity', 'support', 'Support')
| 12,767 | Python | .py | 306 | 30.650327 | 106 | 0.571763 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,504 | vmware.py | truenas_middleware/src/middlewared/middlewared/plugins/vmware.py | from collections import defaultdict
from datetime import datetime
import errno
import socket
import ssl
import uuid
from middlewared.async_validators import resolve_hostname
from middlewared.schema import accepts, Any, Bool, Dict, Int, Str, Password, Patch
from middlewared.service import CallError, CRUDService, job, private, ValidationErrors
import middlewared.sqlalchemy as sa
from middlewared.utils.time_utils import utc_now
from pyVim import connect, task as VimTask
from pyVmomi import vim, vmodl
NFS_VOLUME_TYPES = ('NFS', 'NFS41')
class VMWareModel(sa.Model):
__tablename__ = 'storage_vmwareplugin'
id = sa.Column(sa.Integer(), primary_key=True)
hostname = sa.Column(sa.String(200))
username = sa.Column(sa.String(200))
password = sa.Column(sa.EncryptedText())
filesystem = sa.Column(sa.String(200))
datastore = sa.Column(sa.String(200))
state = sa.Column(sa.JSON())
class VMWareService(CRUDService):
class Config:
datastore = 'storage.vmwareplugin'
cli_namespace = 'storage.vmware'
ENTRY = Patch(
"vmware_create",
"vmware_entry",
("add", Int("id")),
("add", Dict("state", additional_attrs=True)),
)
@private
async def validate_data(self, data, schema_name):
verrors = ValidationErrors()
await resolve_hostname(self.middleware, verrors, f'{schema_name}.hostname', data['hostname'])
if data['filesystem'] not in (await self.middleware.call('pool.filesystem_choices')):
verrors.add(
f'{schema_name}.filesystem',
'Invalid ZFS filesystem'
)
datastore = data.get('datastore')
try:
ds = await self.middleware.run_in_thread(
self.get_datastores,
{
'hostname': data.get('hostname'),
'username': data.get('username'),
'password': data.get('password'),
}
)
if data.get('datastore') not in ds:
verrors.add(
f'{schema_name}.datastore',
f'Datastore "{datastore}" not found on the server'
)
except Exception as e:
verrors.add(
f'{schema_name}.datastore',
'Failed to connect: ' + str(e)
)
verrors.check()
@accepts(
Dict(
'vmware_create',
Str('datastore', required=True),
Str('filesystem', required=True),
Str('hostname', required=True),
Password('password', required=True),
Str('username', required=True),
register=True
)
)
async def do_create(self, data):
"""
Create VMWare snapshot.
`hostname` is a valid IP address / hostname of a VMWare host. When clustering, this is the vCenter server for
the cluster.
`username` and `password` are the credentials used to authorize access to the VMWare host.
`datastore` is a valid datastore name which exists on the VMWare host.
"""
await self.middleware.call('vmware.validate_data', data, 'vmware_create')
data['id'] = await self.middleware.call(
'datastore.insert',
self._config.datastore,
{**data, 'state': {'state': 'PENDING'}},
)
return await self.get_instance(data['id'])
@accepts(
Int('id', required=True),
Patch('vmware_create', 'vmware_update', ('attr', {'update': True}))
)
async def do_update(self, id_, data):
"""
Update VMWare snapshot of `id`.
"""
old = await self.get_instance(id_)
old.pop('state')
new = old.copy()
new.update(data)
await self.middleware.call('vmware.validate_data', new, 'vmware_update')
await self.middleware.call(
'datastore.update',
self._config.datastore,
id_,
{**new, 'state': {'state': 'PENDING'}},
)
await self.middleware.call("alert.oneshot_delete", "VMWareLoginFailed", old["hostname"])
return await self.get_instance(id_)
@accepts(
Int('id')
)
async def do_delete(self, id_):
"""
Delete VMWare snapshot of `id`.
"""
vmsnapobj = await self.get_instance(id_)
response = await self.middleware.call(
'datastore.delete',
self._config.datastore,
id_
)
await self.middleware.call("alert.oneshot_delete", "VMWareLoginFailed", vmsnapobj["hostname"])
return response
@accepts(Dict(
'vmware-creds',
Str('hostname', required=True),
Str('username', required=True),
Str('password', private=True, required=True),
), roles=['READONLY_ADMIN'])
def get_datastores(self, data):
"""
Get datastores from VMWare.
"""
return sorted(list(self.__get_datastores(data).keys()))
@accepts(Dict(
'vmware-creds',
Str('hostname', required=True),
Str('username', required=True),
Str('password', private=True, required=True),
))
def match_datastores_with_datasets(self, data):
"""
Requests datastores from vCenter server and tries to match them with local filesystems.
Returns a list of datastores, a list of local filesystems and guessed relationship between them.
.. examples(websocket)::
:::javascript
{
"id": "d51da71b-bb48-4b8b-a8f7-6046fcc892b4",
"msg": "method",
"method": "vmware.match_datastores_with_datasets",
"params": [{"hostname": "10.215.7.104", "username": "root", "password": "password"}]
}
returns
{
"datastores": [
{
"name": "10.215.7.102",
"description": "NFS mount '/mnt/tank' on 10.215.7.102",
"filesystems": ["tank"]
},
{
"name": "datastore1",
"description": "mpx.vmhba0:C0:T0:L0",
"filesystems": []
},
{
"name": "zvol",
"description": "iSCSI extent naa.6589cfc000000b3f0a891a2c4e187594",
"filesystems": ["tank/vol"]
}
],
"filesystems": [
{
"type": "FILESYSTEM",
"name": "tank",
"description": "NFS mount '/mnt/tank' on 10.215.7.102"
},
{
"type": "VOLUME",
"name": "tank/vol",
"description": "iSCSI extent naa.6589cfc000000b3f0a891a2c4e187594"
}
]
}
"""
datastores = []
for k, v in self.__get_datastores(data).items():
if v["type"] in NFS_VOLUME_TYPES:
description = f"NFS mount {v['remote_path']!r} on {' or '.join(v['remote_hostnames'])}"
matches = [f"{hostname}:{v['remote_path']}" for hostname in v["remote_hostnames"]]
elif v["type"] == "VMFS":
description = (
f"iSCSI extent {', '.join(v['extent'])}"
if any(extent.startswith("naa.") for extent in v["extent"])
else ", ".join(v["extent"])
)
matches = v["extent"]
else:
self.logger.debug("Unknown volume type %r", v["type"])
continue
datastores.append({
"name": k,
"description": description,
"matches": matches,
})
ip_addresses = sum([
[alias["address"] for alias in interface["state"]["aliases"] if alias["type"] in ["INET", "INET6"]]
for interface in self.middleware.call_sync("interface.query")
], [])
iscsi_extents = defaultdict(list)
for extent in self.middleware.call_sync("iscsi.extent.query", [], {"select": ["path", "naa"]}):
if extent["path"].startswith("zvol/"):
zvol = extent["path"][len("zvol/"):]
iscsi_extents[zvol].append(f"naa.{extent['naa'][2:]}")
filesystems = []
zpools = [v["name"] for k, v in self.middleware.call_sync("zfs.pool.query_imported_fast").items()]
options = {"extra": {"retrieve_properties": False}}
for fs in self.middleware.call_sync("pool.dataset.query", [("pool", "in", zpools)], options):
if fs["type"] == "FILESYSTEM":
filesystems.append({
"type": "FILESYSTEM",
"name": fs["name"],
"description": f"NFS mount {fs['mountpoint']!r} on {' or '.join(ip_addresses)}",
"matches": [f"{ip_address}:{fs['mountpoint']}" for ip_address in ip_addresses],
})
if fs["type"] == "VOLUME":
filesystems.append({
"type": "VOLUME",
"name": fs["name"],
"description": (
f"iSCSI extent {', '.join(iscsi_extents[fs['name']])}"
if iscsi_extents[fs["name"]]
else "Not shared via iSCSI"
),
"matches": iscsi_extents[fs["name"]],
})
for datastore in datastores:
datastore["filesystems"] = [filesystem["name"] for filesystem in filesystems
if set(filesystem["matches"]) & set(datastore["matches"])]
datastore.pop("matches")
for filesystem in filesystems:
filesystem.pop("matches")
return {
"datastores": sorted(datastores, key=lambda datastore: datastore["name"]),
"filesystems": sorted(filesystems, key=lambda filesystem: filesystem["name"]),
}
def __get_datastores(self, data):
self.middleware.call_sync('network.general.will_perform_activity', 'vmware')
try:
server_instance = self.connect(data)
except (vim.fault.InvalidLogin, vim.fault.NoPermission, vim.fault.RestrictedVersion) as e:
raise CallError(e.msg, errno.EPERM)
except vmodl.RuntimeFault as e:
raise CallError(e.msg)
except (socket.gaierror, socket.error, OSError) as e:
raise CallError(str(e), e.errno)
content = server_instance.RetrieveContent()
objview = content.viewManager.CreateContainerView(
content.rootFolder, [vim.HostSystem], True
)
esxi_hosts = objview.view
objview.Destroy()
datastores = {}
for esxi_host in esxi_hosts:
storage_system = esxi_host.configManager.storageSystem
if storage_system.fileSystemVolumeInfo is None:
continue
for host_mount_info in storage_system.fileSystemVolumeInfo.mountInfo:
if host_mount_info.volume.type == 'VMFS':
datastores[host_mount_info.volume.name] = {
'type': host_mount_info.volume.type,
'uuid': host_mount_info.volume.uuid,
'capacity': host_mount_info.volume.capacity,
'vmfs_version': host_mount_info.volume.version,
'extent': [
partition.diskName
for partition in host_mount_info.volume.extent
],
'local': host_mount_info.volume.local,
'ssd': host_mount_info.volume.ssd
}
elif host_mount_info.volume.type in NFS_VOLUME_TYPES:
datastores[host_mount_info.volume.name] = {
'type': host_mount_info.volume.type,
'capacity': host_mount_info.volume.capacity,
'remote_host': host_mount_info.volume.remoteHost,
'remote_path': host_mount_info.volume.remotePath,
'remote_hostnames': host_mount_info.volume.remoteHostNames,
'username': host_mount_info.volume.userName,
}
elif host_mount_info.volume.type in ('other', 'OTHER', 'VFFS'):
# Ignore VFFS type, it does not store VM's
# Ignore other type, it does not seem to be meaningful
pass
else:
self.logger.debug(f'Unknown volume type "{host_mount_info.volume.type}": {host_mount_info.volume}')
continue
connect.Disconnect(server_instance)
return datastores
@accepts(Str('dataset'), Bool('recursive'), roles=['READONLY_ADMIN'])
def dataset_has_vms(self, dataset, recursive):
"""
Returns "true" if `dataset` is configured with a VMWare snapshot
"""
return len(self._dataset_get_vms(dataset, recursive)) > 0
def _dataset_get_vms(self, dataset, recursive):
f = ["filesystem", "=", dataset]
if recursive:
f = [
"OR", [
f,
["filesystem", "^", dataset + "/"],
],
]
return self.middleware.call_sync("vmware.query", [f])
@private
def snapshot_begin(self, dataset, recursive):
qs = self._dataset_get_vms(dataset, recursive)
return self.snapshot_proceed(dataset, qs)
@private
def snapshot_proceed(self, dataset, qs):
self.middleware.call_sync('network.general.will_perform_activity', 'vmware')
# Generate a unique snapshot name that won't collide with anything that exists on the VMWare side.
vmsnapname = str(uuid.uuid4())
# Generate a helpful description that is visible on the VMWare side. Since we
# are going to be creating VMWare snaps, if one gets left dangling this will
# help determine where it came from.
vmsnapdescription = f"{datetime.now().strftime('%Y-%m-%d %H:%M:%S')} TrueNAS Created Snapshot"
# We keep track of snapshots per VMWare "task" because we are going to iterate
# over all the VMWare tasks for a given ZFS filesystem, do all the VMWare snapshotting
# then take the ZFS snapshot, then iterate again over all the VMWare "tasks" and undo
# all the snaps we created in the first place.
vmsnapobjs = []
for vmsnapobj in qs:
# Data structures that will be used to keep track of VMs that are snapped,
# as wel as VMs we tried to snap and failed, and VMs we realized we couldn't
# snapshot.
snapvms = []
snapvmfails = []
snapvmskips = []
try:
si = self.connect(vmsnapobj)
content = si.RetrieveContent()
except Exception as e:
self.logger.warning("VMware login to %s failed", vmsnapobj["hostname"], exc_info=True)
self.alert_vmware_login_failed(vmsnapobj, e)
continue
# There's no point to even consider VMs that are paused or powered off.
vm_view = content.viewManager.CreateContainerView(content.rootFolder, [vim.VirtualMachine], True)
for vm in vm_view.view:
if vm.summary.runtime.powerState != "poweredOn":
continue
if self._doesVMDependOnDataStore(vm, vmsnapobj["datastore"]):
try:
if self._canSnapshotVM(vm):
if not self._findVMSnapshotByName(vm, vmsnapname):
# have we already created a snapshot of the VM for this volume
# iteration? can happen if the VM uses two datasets (a and b)
# where both datasets are mapped to the same ZFS volume in TrueNAS.
VimTask.WaitForTask(vm.CreateSnapshot_Task(
name=vmsnapname,
description=vmsnapdescription,
memory=False, quiesce=True,
))
else:
self.logger.debug("Not creating snapshot %s for VM %s because it "
"already exists", vmsnapname, vm)
else:
# TODO:
# we can try to shutdown the VM, if the user provided us an ok to do
# so (might need a new list property in obj to know which VMs are
# fine to shutdown and a UI to specify such exceptions)
# otherwise can skip VM snap and then make a crash-consistent zfs
# snapshot for this VM
self.logger.info("Can't snapshot VM %s that depends on "
"datastore %s and filesystem %s. "
"Possibly using PT devices. Skipping.",
vm.name, vmsnapobj["datastore"], dataset)
snapvmskips.append([vm.config.uuid, vm.name])
except Exception as e:
self.logger.warning("Snapshot of VM %s failed", vm.name, exc_info=True)
self.middleware.call_sync("alert.oneshot_create", "VMWareSnapshotCreateFailed", {
"hostname": vmsnapobj["hostname"],
"vm": vm.name,
"snapshot": vmsnapname,
"error": self._vmware_exception_message(e),
})
snapvmfails.append([vm.config.uuid, vm.name])
snapvms.append(vm.config.uuid)
self.disconnect(si)
vmsnapobjs.append({
"vmsnapobj": vmsnapobj,
"snapvms": snapvms,
"snapvmfails": snapvmfails,
"snapvmskips": snapvmskips,
})
# At this point we've completed snapshotting VMs.
if not vmsnapobjs:
return None
return {
"vmsnapname": vmsnapname,
"vmsnapobjs": vmsnapobjs,
"vmsynced": vmsnapobjs and all(len(vmsnapobj["snapvms"]) > 0 and len(vmsnapobj["snapvmfails"]) == 0
for vmsnapobj in vmsnapobjs)
}
@private
def snapshot_end(self, context):
self.middleware.call_sync('network.general.will_perform_activity', 'vmware')
vmsnapname = context["vmsnapname"]
for elem in context["vmsnapobjs"]:
vmsnapobj = elem["vmsnapobj"]
try:
si = self.connect(vmsnapobj)
self.delete_vmware_login_failed_alert(vmsnapobj)
except Exception as e:
self.logger.warning("VMware login failed to %s", vmsnapobj["hostname"])
self.alert_vmware_login_failed(vmsnapobj, e)
for vm_uuid in elem["snapvms"]:
self.middleware.call_sync("vmware.defer_deleting_snapshot", vmsnapobj, vm_uuid, vmsnapname)
continue
# vm is an object, so we'll dereference that object anywhere it's user facing.
for vm_uuid in elem["snapvms"]:
for vm in self.find_vms_by_uuid(si, vm_uuid):
if [vm_uuid, vm.name] not in elem["snapvmfails"] and [vm_uuid, vm.name] not in elem["snapvmskips"]:
try:
self.delete_snapshot(vm, vmsnapname)
except Exception as e:
self.logger.debug(
"Exception removing snapshot %s on %s", vmsnapname, vm.name, exc_info=True
)
self.middleware.call_sync("alert.oneshot_create", "VMWareSnapshotDeleteFailed", {
"hostname": vmsnapobj["hostname"],
"vm": vm.name,
"snapshot": vmsnapname,
"error": self._vmware_exception_message(e),
})
self.middleware.call_sync("vmware.defer_deleting_snapshot", vmsnapobj, vm_uuid, vmsnapname)
self.disconnect(si)
@private
def periodic_snapshot_task_begin(self, task_id):
task = self.middleware.call_sync("pool.snapshottask.query",
[["id", "=", task_id]],
{"get": True})
# If there's a VMWare Plugin object for this filesystem
# snapshot the VMs before taking the ZFS snapshot.
# Once we've taken the ZFS snapshot we're going to log back in
# to VMWare and destroy all the VMWare snapshots we created.
# We do this because having VMWare snapshots in existence impacts
# the performance of your VMs.
qs = self._dataset_get_vms(task["dataset"], task["recursive"])
if qs:
return {
"dataset": task["dataset"],
"qs": qs,
}
@private
@accepts(Any("context", private=True))
@job()
def periodic_snapshot_task_proceed(self, job, context):
return self.snapshot_proceed(context["dataset"], context["qs"])
@private
@accepts(Any("context", private=True))
@job()
def periodic_snapshot_task_end(self, job, context):
return self.snapshot_end(context)
@private
def connect(self, vmsnapobj):
ssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ssl_context.verify_mode = ssl.CERT_NONE
si = connect.SmartConnect(host=vmsnapobj["hostname"], user=vmsnapobj["username"],
pwd=vmsnapobj["password"], sslContext=ssl_context)
return si
@private
def disconnect(self, si):
connect.Disconnect(si)
@private
def find_vms_by_uuid(self, si, vm_uuid):
return si.content.searchIndex.FindAllByUuid(None, vm_uuid, True)
@private
def delete_snapshot(self, vm, vmsnapname):
snap = self._findVMSnapshotByName(vm, vmsnapname)
if snap:
VimTask.WaitForTask(snap.RemoveSnapshot_Task(True))
# Check if a VM is using a certain datastore
def _doesVMDependOnDataStore(self, vm, dataStore):
try:
# simple case, VM config data is on a datastore.
# not sure how critical it is to snapshot the store that has config data, but best to do so
for i in vm.datastore:
if i.info.name.startswith(dataStore):
return True
# check if VM has disks on the data store
# we check both "diskDescriptor" and "diskExtent" types of files
for device in vm.config.hardware.device:
if device.backing is None:
continue
if hasattr(device.backing, 'fileName'):
if device.backing.datastore is None:
continue
if device.backing.datastore.info.name == dataStore:
return True
except Exception:
self.logger.debug('Exception in doesVMDependOnDataStore', exc_info=True)
return False
# check if VMware can snapshot a VM
def _canSnapshotVM(self, vm):
try:
# check for PCI pass-through devices
for device in vm.config.hardware.device:
if isinstance(device, vim.VirtualPCIPassthrough):
return False
# consider supporting more cases of VMs that can't be snapshoted
# https://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=1006392
except Exception:
self.logger.debug('Exception in canSnapshotVM', exc_info=True)
return True
def _findVMSnapshotByName(self, vm, snapshotName):
try:
if vm.snapshot is None:
return None
for tree in vm.snapshot.rootSnapshotList:
result = self._findVMSnapshotByNameInTree(tree, snapshotName)
if result:
return result
except Exception:
self.logger.debug('Exception in _findVMSnapshotByName', exc_info=True)
return None
def _findVMSnapshotByNameInTree(self, tree, snapshotName):
if tree.name == snapshotName:
return tree.snapshot
for i in tree.childSnapshotList:
if i.name == snapshotName:
return i.snapshot
if hasattr(i, "childSnapshotList"):
result = self._findVMSnapshotByNameInTree(i, snapshotName)
if result:
return result
return None
def _vmware_exception_message(self, e):
if hasattr(e, "msg"):
return e.msg
else:
return str(e)
@private
def alert_vmware_login_failed(self, vmsnapobj, e):
error = self._vmware_exception_message(e)
self.middleware.call_sync("alert.oneshot_create", "VMWareLoginFailed", {
"hostname": vmsnapobj["hostname"],
"error": error,
})
self.set_vmsnapobj_state(vmsnapobj, {
"state": "ERROR",
"error": error,
})
@private
def delete_vmware_login_failed_alert(self, vmsnapobj):
self.middleware.call_sync("alert.oneshot_delete", "VMWareLoginFailed", vmsnapobj["hostname"])
self.set_vmsnapobj_state(vmsnapobj, {
"state": "SUCCESS",
})
@private
def set_vmsnapobj_state(self, vmsnapobj, state):
for vmware in self.middleware.call_sync(
"datastore.query",
"storage.vmwareplugin",
[["hostname", "=", vmsnapobj["hostname"]],
["username", "=", vmsnapobj["username"]]],
):
if vmware["password"] == vmsnapobj["password"]: # These need to be decoded to be compared
self.set_vmsnapobj_state_by_id(vmware["id"], state)
@private
def set_vmsnapobj_state_by_id(self, id_, state):
self.middleware.call_sync("datastore.update", "storage.vmwareplugin", id_, {
"state": {
**state,
"datetime": utc_now(),
},
})
async def setup(middleware):
await middleware.call('network.general.register_activity', 'vmware', 'VMware Snapshots')
| 27,005 | Python | .py | 582 | 32.316151 | 119 | 0.544854 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,505 | rsync.py | truenas_middleware/src/middlewared/middlewared/plugins/rsync.py | # Copyright 2017 iXsystems, Inc.
# All rights reserved
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted providing that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
#####################################################################
import asyncio
import asyncssh
import contextlib
import enum
import glob
import os
import pathlib
import shlex
import tempfile
from middlewared.common.attachment import LockableFSAttachmentDelegate
from middlewared.plugins.rsync_.utils import get_host_key_file_contents_from_ssh_credentials
from middlewared.schema import accepts, Bool, Cron, Dict, Str, Int, List, Patch, returns
from middlewared.validators import Range
from middlewared.service import (
CallError, ValidationErrors, job, item_method, private, TaskPathService,
)
import middlewared.sqlalchemy as sa
from middlewared.utils import run
from middlewared.utils.user_context import run_command_with_user_context
from middlewared.utils.service.task_state import TaskStateMixin
RSYNC_PATH_LIMIT = 1023
class RsyncReturnCode(enum.Enum):
# from rsync's "errcode.h"
OK = 0
SYNTAX = 1 # syntax or usage error
PROTOCOL = 2 # protocol incompatibility
FILESELECT = 3 # errors selecting input/output files, dirs
UNSUPPORTED = 4 # requested action not supported
STARTCLIENT = 5 # error starting client-server protocol
SOCKETIO = 10 # error in socket IO
FILEIO = 11 # error in file IO
STREAMIO = 12 # error in rsync protocol data stream
MESSAGEIO = 13 # errors with program diagnostics
IPC = 14 # error in IPC code
CRASHED = 15 # sibling crashed
TERMINATED = 16 # sibling terminated abnormally
SIGNAL1 = 19 # status returned when sent SIGUSR1
SIGNAL = 20 # status returned when sent SIGINT, SIGTERM, SIGHUP
WAITCHILD = 21 # some error returned by waitpid()
MALLOC = 22 # error allocating core memory buffers
PARTIAL = 23 # partial transfer
VANISHED = 24 # file(s) vanished on sender side
DEL_LIMIT = 25 # skipped some deletes due to --max-delete
TIMEOUT = 30 # timeout in data send/receive
CONTIMEOUT = 35 # timeout waiting for daemon connection
@classmethod
def nonfatals(cls):
return tuple([rc.value for rc in [
cls.OK,
cls.VANISHED,
cls.DEL_LIMIT
]])
class RsyncTaskModel(sa.Model):
__tablename__ = 'tasks_rsync'
id = sa.Column(sa.Integer(), primary_key=True)
rsync_path = sa.Column(sa.String(255))
rsync_remotehost = sa.Column(sa.String(120), nullable=True)
rsync_remoteport = sa.Column(sa.SmallInteger(), nullable=True)
rsync_remotemodule = sa.Column(sa.String(120), nullable=True)
rsync_ssh_credentials_id = sa.Column(sa.ForeignKey('system_keychaincredential.id'), index=True, nullable=True)
rsync_desc = sa.Column(sa.String(120))
rsync_minute = sa.Column(sa.String(100), default="00")
rsync_hour = sa.Column(sa.String(100), default="*")
rsync_daymonth = sa.Column(sa.String(100), default="*")
rsync_month = sa.Column(sa.String(100), default='*')
rsync_dayweek = sa.Column(sa.String(100), default="*")
rsync_user = sa.Column(sa.String(60))
rsync_recursive = sa.Column(sa.Boolean(), default=True)
rsync_times = sa.Column(sa.Boolean(), default=True)
rsync_compress = sa.Column(sa.Boolean(), default=True)
rsync_archive = sa.Column(sa.Boolean(), default=False)
rsync_delete = sa.Column(sa.Boolean(), default=False)
rsync_quiet = sa.Column(sa.Boolean(), default=False)
rsync_preserveperm = sa.Column(sa.Boolean(), default=False)
rsync_preserveattr = sa.Column(sa.Boolean(), default=False)
rsync_extra = sa.Column(sa.Text())
rsync_enabled = sa.Column(sa.Boolean(), default=True)
rsync_mode = sa.Column(sa.String(20), default='module')
rsync_remotepath = sa.Column(sa.String(255))
rsync_direction = sa.Column(sa.String(10), default='PUSH')
rsync_delayupdates = sa.Column(sa.Boolean(), default=True)
rsync_job = sa.Column(sa.JSON(None))
class RsyncTaskService(TaskPathService, TaskStateMixin):
share_task_type = 'Rsync'
task_state_methods = ['rsynctask.run']
class Config:
datastore = 'tasks.rsync'
datastore_prefix = 'rsync_'
datastore_extend = 'rsynctask.rsync_task_extend'
datastore_extend_context = 'rsynctask.rsync_task_extend_context'
cli_namespace = 'task.rsync'
ENTRY = Patch(
'rsync_task_create', 'rsync_task_entry',
('rm', {'name': 'ssh_credentials'}),
('rm', {'name': 'validate_rpath'}),
('rm', {'name': 'ssh_keyscan'}),
('add', Int('id')),
('add', Dict('ssh_credentials', null=True, additional_attrs=True)),
('add', Bool('locked')),
('add', Dict('job', null=True, additional_attrs=True)),
)
@private
async def rsync_task_extend(self, data, context):
try:
data['extra'] = shlex.split(data['extra'].replace('"', r'"\"').replace("'", r'"\"'))
except ValueError:
# This is to handle the case where the extra value is misconfigured for old cases
# Moving on, we are going to verify that it can be split successfully using shlex
data['extra'] = data['extra'].split()
for field in ('mode', 'direction'):
data[field] = data[field].upper()
Cron.convert_db_format_to_schedule(data)
if job := await self.get_task_state_job(context['task_state'], data['id']):
data['job'] = job
return data
@private
async def rsync_task_extend_context(self, rows, extra):
return {
'task_state': await self.get_task_state_context(),
}
@private
async def validate_rsync_task(self, data, schema):
verrors = ValidationErrors()
# Windows users can have spaces in their usernames
# http://www.freebsd.org/cgi/query-pr.cgi?pr=164808
username = data.get('user')
if ' ' in username:
verrors.add(f'{schema}.user', 'User names cannot have spaces')
raise verrors
user = None
with contextlib.suppress(KeyError):
user = await self.middleware.call('user.get_user_obj', {'username': username})
if not user:
verrors.add(f'{schema}.user', f'Provided user "{username}" does not exist')
raise verrors
await self.validate_path_field(data, schema, verrors)
data['extra'] = ' '.join(data['extra'])
try:
shlex.split(data['extra'].replace('"', r'"\"').replace("'", r'"\"'))
except ValueError as e:
verrors.add(f'{schema}.extra', f'Please specify valid value: {e}')
if data['mode'] == 'MODULE':
if not data['remotehost']:
verrors.add(f'{schema}.remotehost', 'This field is required')
if not data['remotemodule']:
verrors.add(f'{schema}.remotemodule', 'This field is required')
if data['mode'] == 'SSH':
connect_kwargs = None
if data['ssh_credentials']:
try:
ssh_credentials = await self.middleware.call(
'keychaincredential.get_of_type',
data['ssh_credentials'],
'SSH_CREDENTIALS',
)
except CallError as e:
verrors.add(f'{schema}.ssh_credentials', e.errmsg)
else:
ssh_keypair = await self.middleware.call(
'keychaincredential.get_of_type',
ssh_credentials['attributes']['private_key'],
'SSH_KEY_PAIR',
)
connect_kwargs = {
"host": ssh_credentials['attributes']['host'],
"port": ssh_credentials['attributes']['port'],
'username': ssh_credentials['attributes']['username'],
'client_keys': [asyncssh.import_private_key(ssh_keypair['attributes']['private_key'])],
'known_hosts': asyncssh.SSHKnownHosts(get_host_key_file_contents_from_ssh_credentials(
ssh_credentials['attributes'],
))
}
else:
if not data['remotehost']:
verrors.add(f'{schema}.remotehost', 'This field is required')
if not data['remoteport']:
verrors.add(f'{schema}.remoteport', 'This field is required')
search = os.path.join(user['pw_dir'], '.ssh', 'id_[edr]*')
exclude_from_search = os.path.join(user['pw_dir'], '.ssh', 'id_[edr]*pub')
key_files = set(glob.glob(search)) - set(glob.glob(exclude_from_search))
if not key_files:
verrors.add(
f'{schema}.user',
'In order to use rsync over SSH you need a user'
' with a private key (DSA/ECDSA/RSA) set up in home dir.'
)
else:
for file in set(key_files):
# file holds a private key and it's permissions should be 600
if os.stat(file).st_mode & 0o077 != 0:
verrors.add(
f'{schema}.user',
f'Permissions {str(oct(os.stat(file).st_mode & 0o777))[2:]} for {file} are too open. '
f'Please correct them by running chmod 600 {file}'
)
key_files.discard(file)
if key_files:
if '@' in data['remotehost']:
remote_username, remote_host = data['remotehost'].rsplit('@', 1)
else:
remote_username = username
remote_host = data['remotehost']
connect_kwargs = {
'host': remote_host,
'port': data['remoteport'],
'username': remote_username,
'client_keys': key_files,
}
remote_path = data.get('remotepath')
if not remote_path:
verrors.add(f'{schema}.remotepath', 'This field is required')
if data['enabled'] and connect_kwargs:
ssh_dir_path = pathlib.Path(os.path.join(user['pw_dir'], '.ssh'))
known_hosts_path = pathlib.Path(os.path.join(ssh_dir_path, 'known_hosts'))
if 'known_hosts' not in connect_kwargs:
try:
try:
known_hosts_text = await self.middleware.run_in_thread(known_hosts_path.read_text)
except FileNotFoundError:
known_hosts_text = ''
known_hosts = asyncssh.SSHKnownHosts(known_hosts_text)
except Exception as e:
verrors.add(
f'{schema}.remotehost',
f'Failed to load {known_hosts_path}: {e}',
)
else:
if data['ssh_keyscan']:
if not known_hosts.match(connect_kwargs['host'], '', None)[0]:
if known_hosts_text and not known_hosts_text.endswith("\n"):
known_hosts_text += '\n'
known_hosts_text += (await run(
['ssh-keyscan', '-p', str(connect_kwargs['port']), connect_kwargs['host']],
encoding='utf-8',
errors='ignore',
)).stdout
# If for whatever reason the dir does not exist, let's create it
# An example of this is when we run rsync tests we nuke the directory
def handle_ssh_dir():
with contextlib.suppress(FileExistsError):
ssh_dir_path.mkdir(0o700)
os.chown(ssh_dir_path.absolute(), user['pw_uid'], user['pw_gid'])
known_hosts_path.write_text(known_hosts_text)
os.chown(known_hosts_path.absolute(), user['pw_uid'], user['pw_gid'])
await self.middleware.run_in_thread(handle_ssh_dir)
known_hosts = asyncssh.SSHKnownHosts(known_hosts_text)
if not verrors:
connect_kwargs['known_hosts'] = known_hosts
if data['validate_rpath']:
try:
async with await asyncssh.connect(
**connect_kwargs,
options=asyncssh.SSHClientConnectionOptions(connect_timeout=5),
) as conn:
await conn.run(f'test -d {shlex.quote(remote_path)}', check=True)
except asyncio.TimeoutError:
verrors.add(
f'{schema}.remotehost',
'SSH timeout occurred. Remote path cannot be validated.'
)
except OSError as e:
if e.errno == 113:
verrors.add(
f'{schema}.remotehost',
f'Connection to the remote host {connect_kwargs["host"]} on port '
f'{connect_kwargs["port"]} failed.'
)
else:
verrors.add(
f'{schema}.remotehost',
e.__str__()
)
except asyncssh.HostKeyNotVerifiable as e:
verrors.add(
f'{schema}.remotehost',
f'Failed to verify remote host key: {e.reason}',
CallError.ESSLCERTVERIFICATIONERROR,
)
except asyncssh.DisconnectError as e:
verrors.add(
f'{schema}.remotehost',
f'Disconnect Error [error code {e.code}: {e.reason}] was generated when trying to '
f'communicate with remote host {connect_kwargs["host"]} and remote user '
f'{connect_kwargs["username"]}.'
)
except asyncssh.ProcessError as e:
if e.code == '1':
verrors.add(
f'{schema}.remotepath',
'The Remote Path you specified does not exist or is not a directory.'
'Either create one yourself on the remote machine or uncheck the '
'validate_rpath field'
)
else:
verrors.add(
f'{schema}.remotepath',
f'Connection to Remote Host was successful but failed to verify '
f'Remote Path. {e.__str__()}'
)
except asyncssh.Error as e:
if e.__class__.__name__ in e.__str__():
exception_reason = e.__str__()
else:
exception_reason = e.__class__.__name__ + ' ' + e.__str__()
verrors.add(
f'{schema}.remotepath',
f'Remote Path could not be validated. An exception was raised. {exception_reason}'
)
else:
if not known_hosts.match(connect_kwargs['host'], '', None)[0]:
verrors.add(
f'{schema}.remotehost',
f'Host key not found in {known_hosts_path}',
CallError.ESSLCERTVERIFICATIONERROR,
)
data.pop('validate_rpath', None)
data.pop('ssh_keyscan', None)
# Keeping compatibility with legacy UI
for field in ('mode', 'direction'):
data[field] = data[field].lower()
return verrors, data
@accepts(Dict(
'rsync_task_create',
Str('path', required=True, max_length=RSYNC_PATH_LIMIT),
Str('user', required=True),
Str('mode', enum=['MODULE', 'SSH'], default='MODULE'),
Str('remotehost', null=True, default=None),
Int('remoteport', null=True, default=None),
Str('remotemodule', null=True, default=None),
Int('ssh_credentials', null=True, default=None),
Str('remotepath'),
Bool('validate_rpath', default=True),
Bool('ssh_keyscan', default=False),
Str('direction', enum=['PULL', 'PUSH'], default='PUSH'),
Str('desc'),
Cron(
'schedule',
defaults={'minute': '00'},
),
Bool('recursive'),
Bool('times'),
Bool('compress'),
Bool('archive'),
Bool('delete'),
Bool('quiet'),
Bool('preserveperm'),
Bool('preserveattr'),
Bool('delayupdates'),
List('extra', items=[Str('extra')]),
Bool('enabled', default=True),
register=True,
))
async def do_create(self, data):
"""
Create a Rsync Task.
See the comment in Rsyncmod about `path` length limits.
`remotehost` is ip address or hostname of the remote system. If username differs on the remote host,
"username@remote_host" format should be used.
`mode` represents different operating mechanisms for Rsync i.e Rsync Module mode / Rsync SSH mode.
In SSH mode, if `ssh_credentials` (a keychain credential of `SSH_CREDENTIALS` type) is specified then it is used
to connect to the remote host. If it is not specified, then keys in `user`'s .ssh directory are used.
`remotehost` and `remoteport` are not used in this case.
`remotemodule` is the name of remote module, this attribute should be specified when `mode` is set to MODULE.
`remotepath` specifies the path on the remote system.
`validate_rpath` is a boolean which when sets validates the existence of the remote path.
`ssh_keyscan` will automatically add remote host key to user's known_hosts file.
`direction` specifies if data should be PULLED or PUSHED from the remote system.
`compress` when set reduces the size of the data which is to be transmitted.
`archive` when set makes rsync run recursively, preserving symlinks, permissions, modification times, group,
and special files.
`delete` when set deletes files in the destination directory which do not exist in the source directory.
`preserveperm` when set preserves original file permissions.
.. examples(websocket)::
Create a Rsync Task which pulls data from a remote system every 5 minutes.
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "rsynctask.create",
"params": [{
"enabled": true,
"schedule": {
"minute": "5",
"hour": "*",
"dom": "*",
"month": "*",
"dow": "*"
},
"desc": "Test rsync task",
"user": "root",
"mode": "MODULE",
"remotehost": "root@192.168.0.10",
"compress": true,
"archive": true,
"direction": "PULL",
"path": "/mnt/vol1/rsync_dataset",
"remotemodule": "remote_module1"
}]
}
"""
verrors, data = await self.validate_rsync_task(data, 'rsync_task_create')
verrors.check()
Cron.convert_schedule_to_db_format(data)
data['id'] = await self.middleware.call(
'datastore.insert',
self._config.datastore,
data,
{'prefix': self._config.datastore_prefix}
)
await self.middleware.call('service.restart', 'cron')
return await self.get_instance(data['id'])
@accepts(
Int('id', validators=[Range(min_=1)]),
Patch('rsync_task_create', 'rsync_task_update', ('attr', {'update': True}))
)
async def do_update(self, id_, data):
"""
Update Rsync Task of `id`.
"""
data.setdefault('validate_rpath', True)
data.setdefault('ssh_keyscan', False)
old = await self.query(filters=[('id', '=', id_)], options={'get': True})
old.pop(self.locked_field)
old.pop('job')
new = old.copy()
if new['ssh_credentials']:
new['ssh_credentials'] = new['ssh_credentials']['id']
new.update(data)
verrors, new = await self.validate_rsync_task(new, 'rsync_task_update')
verrors.check()
Cron.convert_schedule_to_db_format(new)
await self.middleware.call(
'datastore.update',
self._config.datastore,
id_,
new,
{'prefix': self._config.datastore_prefix}
)
await self.middleware.call('service.restart', 'cron')
return await self.get_instance(id_)
async def do_delete(self, id_):
"""
Delete Rsync Task of `id`.
"""
res = await self.middleware.call('datastore.delete', self._config.datastore, id_)
await self.middleware.call('service.restart', 'cron')
return res
@private
@contextlib.contextmanager
def commandline(self, id_):
"""
Helper method to generate the rsync command avoiding code duplication.
"""
rsync = self.middleware.call_sync('rsynctask.get_instance', id_)
path = shlex.quote(rsync['path'])
with contextlib.ExitStack() as exit_stack:
line = ['rsync']
for name, flag in (
('archive', '-a'),
('compress', '-zz'),
('delayupdates', '--delay-updates'),
('delete', '--delete-delay'),
('preserveattr', '-X'),
('preserveperm', '-p'),
('recursive', '-r'),
('times', '-t'),
):
if rsync[name]:
line.append(flag)
if rsync['extra']:
line.append(' '.join(rsync['extra']))
if not rsync['ssh_credentials']:
# Do not use username if one is specified in host field
# See #5096 for more details
if '@' in rsync['remotehost']:
remote = rsync['remotehost']
else:
remote = f'"{rsync["user"]}"@{rsync["remotehost"]}'
if rsync['mode'] == 'MODULE':
module_args = [path, f'rsync://{remote}/"{rsync["remotemodule"]}"']
if rsync['direction'] != 'PUSH':
module_args.reverse()
line += module_args
else:
if rsync['ssh_credentials']:
credentials = rsync['ssh_credentials']['attributes']
key_pair = self.middleware.call_sync(
'keychaincredential.get_of_type',
credentials['private_key'],
'SSH_KEY_PAIR',
)
remote = f'"{credentials["username"]}"@{credentials["host"]}'
port = credentials['port']
user = self.middleware.call_sync('user.get_user_obj', {'username': rsync['user']})
private_key_file = exit_stack.enter_context(tempfile.NamedTemporaryFile('w'))
os.fchmod(private_key_file.fileno(), 0o600)
os.fchown(private_key_file.fileno(), user['pw_uid'], user['pw_gid'])
private_key_file.write(key_pair['attributes']['private_key'])
private_key_file.flush()
host_key_file = exit_stack.enter_context(tempfile.NamedTemporaryFile('w'))
os.fchmod(host_key_file.fileno(), 0o600)
os.fchown(host_key_file.fileno(), user['pw_uid'], user['pw_gid'])
host_key_file.write(get_host_key_file_contents_from_ssh_credentials(credentials))
host_key_file.flush()
extra_args = f'-i {private_key_file.name} -o UserKnownHostsFile={host_key_file.name}'
else:
port = rsync['remoteport']
extra_args = ''
remote_username, remote_host = remote.rsplit('@', 1)
if ':' in remote_host:
remote_host = f'[{remote_host}]'
remote = f'{remote_username}@{remote_host}'
line += [
'-e',
f'"ssh -p {port} -o BatchMode=yes -o StrictHostKeyChecking=yes {extra_args}"'
]
path_args = [path, f'{remote}:{shlex.quote(rsync["remotepath"])}']
if rsync['direction'] != 'PUSH':
path_args.reverse()
line += path_args
if rsync['quiet']:
line += ['>', '/dev/null', '2>&1']
yield ' '.join(line)
@item_method
@accepts(Int('id'))
@returns()
@job(lock=lambda args: args[-1], lock_queue_size=1, logs=True)
def run(self, job, id_):
"""
Job to run rsync task of `id`.
Output is saved to job log excerpt (not syslog).
"""
self.middleware.call_sync('network.general.will_perform_activity', 'rsync')
rsync = self.middleware.call_sync('rsynctask.get_instance', id_)
if rsync['locked']:
self.middleware.call_sync('rsynctask.generate_locked_alert', id_)
return
with self.commandline(id_) as commandline:
cp = run_command_with_user_context(
commandline, rsync['user'], output=False, callback=lambda v: job.logs_fd.write(v),
)
for klass in ('RsyncSuccess', 'RsyncFailed') if not rsync['quiet'] else ():
self.middleware.call_sync('alert.oneshot_delete', klass, rsync['id'])
if cp.returncode not in RsyncReturnCode.nonfatals():
err = None
if cp.returncode == RsyncReturnCode.STREAMIO and rsync['compress']:
err = (
"rsync command with compression enabled failed with STREAMIO error. "
"This may indicate that remote server lacks support for the new-style "
"compression used by TrueNAS."
)
if not rsync['quiet']:
self.middleware.call_sync('alert.oneshot_create', 'RsyncFailed', {
'id': rsync['id'],
'direction': rsync['direction'],
'path': rsync['path'],
})
if err:
msg = f'{err} Check logs for further information'
else:
try:
rc_name = RsyncReturnCode(cp.returncode).name
except ValueError:
rc_name = 'UNKNOWN'
msg = (
f'rsync command returned {cp.returncode} - {rc_name}. '
'Check logs for further information.'
)
raise CallError(msg)
elif not rsync['quiet']:
self.middleware.call_sync('alert.oneshot_create', 'RsyncSuccess', {
'id': rsync['id'],
'direction': rsync['direction'],
'path': rsync['path'],
})
class RsyncFSAttachmentDelegate(LockableFSAttachmentDelegate):
name = 'rsync'
title = 'Rsync Task'
service_class = RsyncTaskService
resource_name = 'path'
async def restart_reload_services(self, attachments):
await self.middleware.call('service.restart', 'cron')
async def setup(middleware):
await middleware.call('pool.dataset.register_attachment_delegate', RsyncFSAttachmentDelegate(middleware))
await middleware.call('network.general.register_activity', 'rsync', 'Rsync')
await middleware.call('rsynctask.persist_task_state_on_job_complete')
| 30,684 | Python | .py | 611 | 35 | 120 | 0.529245 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,506 | nfs.py | truenas_middleware/src/middlewared/middlewared/plugins/nfs.py | import enum
import errno
import ipaddress
import itertools
import os
import shutil
from middlewared.common.listen import SystemServiceListenMultipleDelegate
from middlewared.schema import accepts, Bool, Dict, Dir, Int, IPAddr, List, Patch, returns, Str
from middlewared.async_validators import check_path_resides_within_volume, validate_port
from middlewared.validators import Match, NotMatch, Port, Range, IpAddress
from middlewared.service import private, SharingService, SystemServiceService
from middlewared.service import CallError, ValidationError, ValidationErrors
import middlewared.sqlalchemy as sa
from middlewared.utils.asyncio_ import asyncio_map
from middlewared.plugins.nfs_.utils import get_domain, leftmost_has_wildcards, get_wildcard_domain
from middlewared.plugins.system_dataset.utils import SYSDATASET_PATH
NFS_RDMA_DEFAULT_PORT = 20049
# Support the nfsv4recoverydir procfs entry. This may deprecate.
NFSV4_RECOVERY_DIR_PROCFS_PATH = '/proc/fs/nfsd/nfsv4recoverydir'
class NFSServicePathInfo(enum.Enum):
# nfs conf sections that use STATEDIR: exportd, mountd, statd
STATEDIR = (os.path.join(SYSDATASET_PATH, 'nfs'), 0o755, True, {'uid': 0, 'gid': 0})
CLDDIR = (os.path.join(SYSDATASET_PATH, 'nfs', 'nfsdcld'), 0o700, True, {'uid': 0, 'gid': 0})
CLDTRKDIR = (os.path.join(SYSDATASET_PATH, 'nfs', 'nfsdcltrack'), 0o700, True, {'uid': 0, 'gid': 0})
# Fix up the uid and gid in setup_directories
SMDIR = (os.path.join(SYSDATASET_PATH, 'nfs', 'sm'), 0o755, True, {'uid': 'statd', 'gid': 'nogroup'})
SMBAKDIR = (os.path.join(SYSDATASET_PATH, 'nfs', 'sm.bak'), 0o755, True, {'uid': 'statd', 'gid': 'nogroup'})
V4RECOVERYDIR = (os.path.join(SYSDATASET_PATH, 'nfs', 'v4recovery'), 0o755, True, {'uid': 0, 'gid': 0})
def path(self):
return self.value[0]
def mode(self):
return self.value[1]
def is_dir(self):
return self.value[2]
def owner(self):
return self.value[3]
class NFSProtocol(str, enum.Enum):
NFSv3 = 'NFSV3'
NFSv4 = 'NFSV4'
def choices():
return [x.value for x in NFSProtocol]
class NFSModel(sa.Model):
__tablename__ = 'services_nfs'
id = sa.Column(sa.Integer(), primary_key=True)
nfs_srv_servers = sa.Column(sa.Integer(), nullable=True)
nfs_srv_allow_nonroot = sa.Column(sa.Boolean(), default=False)
nfs_srv_protocols = sa.Column(sa.JSON(list), default=[NFSProtocol.NFSv3, NFSProtocol.NFSv4])
nfs_srv_v4_krb = sa.Column(sa.Boolean(), default=False)
nfs_srv_bindip = sa.Column(sa.MultiSelectField())
nfs_srv_mountd_port = sa.Column(sa.SmallInteger(), nullable=True)
nfs_srv_rpcstatd_port = sa.Column(sa.SmallInteger(), nullable=True)
nfs_srv_rpclockd_port = sa.Column(sa.SmallInteger(), nullable=True)
nfs_srv_16 = sa.Column(sa.Boolean(), default=False)
nfs_srv_mountd_log = sa.Column(sa.Boolean(), default=True)
nfs_srv_statd_lockd_log = sa.Column(sa.Boolean(), default=False)
nfs_srv_v4_domain = sa.Column(sa.String(120))
nfs_srv_v4_owner_major = sa.Column(sa.String(1023), default='')
nfs_srv_rdma = sa.Column(sa.Boolean(), default=False)
class NFSService(SystemServiceService):
class Config:
service = "nfs"
service_verb = "restart"
datastore = "services.nfs"
datastore_prefix = "nfs_srv_"
datastore_extend = 'nfs.nfs_extend'
cli_namespace = "service.nfs"
role_prefix = "SHARING_NFS"
ENTRY = Dict(
'nfs_entry',
Int('id', required=True),
Int('servers', null=True, validators=[Range(min_=1, max_=256)], required=True),
Bool('allow_nonroot', required=True),
List('protocols', items=[Str('protocol', enum=NFSProtocol.choices())], required=True),
Bool('v4_krb', required=True),
Str('v4_domain', required=True),
List('bindip', items=[IPAddr('ip')], required=True),
Int('mountd_port', null=True, validators=[Port(exclude=[NFS_RDMA_DEFAULT_PORT])], required=True),
Int('rpcstatd_port', null=True, validators=[Port(exclude=[NFS_RDMA_DEFAULT_PORT])], required=True),
Int('rpclockd_port', null=True, validators=[Port(exclude=[NFS_RDMA_DEFAULT_PORT])], required=True),
Bool('mountd_log', required=True),
Bool('statd_lockd_log', required=True),
Bool('v4_krb_enabled', required=True),
Bool('userd_manage_gids', required=True),
Bool('keytab_has_nfs_spn', required=True),
Bool('managed_nfsd', default=True),
Bool('rdma', default=False),
)
@private
def name_to_id_conversion(self, name, name_type='user'):
''' Convert built-in user or group name to associated UID or GID '''
if any((not isinstance(name, str), isinstance(name, int))):
# it's not a string (NoneType, float, w/e) or it's an int
# so there is nothing to do
return name
if name_type == 'user':
method = 'user.get_builtin_user_id'
elif name_type == 'group':
method = 'group.get_builtin_group_id'
else:
self.logger.error('Unexpected name_type (%r)', name_type)
return name
try:
return self.middleware.call_sync(method, name)
except Exception as e:
if hasattr(e, 'errno') and e.errno == errno.ENOENT:
self.logger.error('Failed to resolve builtin %s %r', name_type, name)
else:
self.logger.error('Unexpected error resolving builtin %s %r', name_type, name, exc_info=True)
return name
@private
def update_procfs_v4recoverydir(self):
'''
The proc file /proc/fs/nfsd/nfsv4recoverydir is part of the legacy NFS client management.
It's usefulness is debatable and by default it reports a path that TrueNAS does not use.
While this entry exists TrueNAS will attempt to make it consistent with actual.
NOTE: NFS will function correctly even if this is reporting an inconsistent value.
'''
procfs_path = NFSV4_RECOVERY_DIR_PROCFS_PATH
try:
with open(procfs_path, 'r+') as fp:
fp.write(f'{NFSServicePathInfo.V4RECOVERYDIR.path()}\n')
except FileNotFoundError:
# This usually happens after a reboot
self.logger.info("%r: Missing or has been removed", procfs_path)
except Exception as e:
# errno=EBUSY usually happens on a system dataset move
if e.errno != errno.EBUSY:
self.logger.info("Unable to update %r: %r", procfs_path, str(e))
else:
self.logger.debug("%r: updated with %r", procfs_path, NFSServicePathInfo.V4RECOVERYDIR.path())
@private
def setup_directories(self):
'''
We are moving the NFS state directory from /var/lib/nfs to
the system dataset: /var/db/system/nfs.
When setup_directories is called /var/db/system/nfs is expected to exist.
If STATEDIR is empty, then this might be an initialization
and there might be current info in /var/lib/nfs.
We always make sure the expected directories are present
'''
# Initialize the system dataset NFS state directory
state_dir = NFSServicePathInfo.STATEDIR.path()
try:
shutil.copytree('/var/lib/nfs', state_dir)
except FileExistsError:
# destination file/dir already exists so ignore error
pass
except Exception:
self.logger.error('Unexpected error initializing %r', state_dir, exc_info=True)
# Make sure we have the necessary directories
for i in NFSServicePathInfo:
uid = self.name_to_id_conversion(i.owner()['uid'], name_type='user')
gid = self.name_to_id_conversion(i.owner()['gid'], name_type='group')
path = i.path()
if i.is_dir():
os.makedirs(path, exist_ok=True)
try:
os.chmod(path, i.mode())
os.chown(path, uid, gid)
except Exception:
self.logger.error('Unexpected failure initializing %r', path, exc_info=True)
@private
async def nfs_extend(self, nfs):
keytab_has_nfs = await self.middleware.call("kerberos.keytab.has_nfs_principal")
nfs["v4_krb_enabled"] = (nfs["v4_krb"] or keytab_has_nfs)
nfs["userd_manage_gids"] = nfs.pop("16")
nfs["v4_owner_major"] = nfs.pop("v4_owner_major")
nfs["keytab_has_nfs_spn"] = keytab_has_nfs
# 'None' indicates we are to dynamically manage the number of nfsd
if nfs['servers'] is None:
nfs['managed_nfsd'] = True
cpu_info = await self.middleware.call("system.cpu_info")
# Default calculation:
# Number of nfsd == number of cores, but not zero or greater than 32
nfs['servers'] = min(max(cpu_info['core_count'], 1), 32)
else:
nfs['managed_nfsd'] = False
return nfs
@private
async def nfs_compress(self, nfs):
nfs.pop('managed_nfsd')
nfs.pop("v4_krb_enabled")
nfs.pop("keytab_has_nfs_spn")
nfs["16"] = nfs.pop("userd_manage_gids")
return nfs
@accepts()
@returns(Dict(additional_attrs=True))
async def bindip_choices(self):
"""
Returns ip choices for NFS service to use
"""
return {
d['address']: d['address'] for d in await self.middleware.call(
'interface.ip_in_use', {'static': True}
)
}
@private
async def bindip(self, config):
bindip = [addr for addr in config['bindip'] if addr not in ['0.0.0.0', '::']]
if bindip:
found = False
for iface in await self.middleware.call('interface.query'):
for alias in iface['state']['aliases']:
if alias['address'] in bindip:
found = True
break
if found:
break
else:
found = True
if found:
await self.middleware.call('alert.oneshot_delete', 'NFSBindAddress', None)
return bindip
else:
if await self.middleware.call('cache.has_key', 'interfaces_are_set_up'):
await self.middleware.call('alert.oneshot_create', 'NFSBindAddress', None)
return []
@accepts(
Patch(
'nfs_entry', 'nfs_update',
('rm', {'name': 'id'}),
('rm', {'name': 'v4_krb_enabled'}),
('rm', {'name': 'keytab_has_nfs_spn'}),
('rm', {'name': 'managed_nfsd'}),
('attr', {'update': True}),
),
audit='Update NFS configuration',
)
async def do_update(self, data):
"""
Update NFS Service Configuration.
`servers` - Represents number of servers to create.
By default, the number of nfsd is determined by the capabilities of the system.
To specify the number of nfsd, set a value between 1 and 256.
'Unset' the field to return to default.
This field will always report the number of nfsd to start.
INPUT: 1 .. 256 or 'unset'
where unset will enable the automatic determination
and 1 ..256 will set the number of nfsd
Default: Number of nfsd is automatically determined and will be no less
than 1 and no more than 32
The number of mountd will be 1/4 the number of reported nfsd.
`allow_nonroot` - If 'enabled' it allows non-root mount requests to be served.
INPUT: enable/disable (True/False)
Default: disabled
`bindip` - Limit the server IP addresses available for NFS
By default, NFS will listen on all IP addresses that are active on the server.
To specify the server interface or a set of interfaces provide a list of IP's.
If the field is unset/empty, NFS listens on all available server addresses.
INPUT: list of IP addresses available configured on the server
Default: Use all available addresses (empty list)
`protocols` - enable/disable NFSv3, NFSv4
Both can be enabled or NFSv4 or NFSv4 by themselves. At least one must be enabled.
Note: The 'showmount' command is available only if NFSv3 is enabled.
INPUT: Select NFSv3 or NFSv4 or NFSv3,NFSv4
Default: NFSv3,NFSv4
`v4_krb` - Force Kerberos authentication on NFS shares
If enabled, NFS shares will fail if the Kerberos ticket is unavilable
INPUT: enable/disable
Default: disabled
`v4_domain` - Specify a DNS domain (NFSv4 only)
If set, the value will be used to override the default DNS domain name for NFSv4.
Specifies the 'Domain' idmapd.conf setting.
INPUT: a string
Default: unset, i.e. an empty string.
`mountd_port` - mountd port binding
The value set specifies the port mountd(8) binds to.
INPUT: unset or an integer between 1 .. 65535
Default: unset
`rpcstatd_port` - statd port binding
The value set specifies the port rpc.statd(8) binds to.
INPUT: unset or an integer between 1 .. 65535
Default: unset
`rpclockd_port` - lockd port binding
The value set specifies the port rpclockd_port(8) binds to.
INPUT: unset or an integer between 1 .. 65535
Default: unset
`rdma` - Enable/Disable NFS over RDMA support
Available on supported platforms and requires an installed and RDMA capable NIC.
NFS over RDMA uses port 20040.
INPUT: Enable/Disable
Default: Disable
.. examples(websocket)::
Update NFS Service Configuration to listen on 192.168.0.10 and use NFSv4
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "pool.resilver.update",
"params": [{
"bindip": [
"192.168.0.10"
],
"protocols": ["NFSV3", "NFSV4"]
}]
}
"""
if 'protocols' in data:
if not data['protocols']:
raise ValidationError(
'nfs_update.protocols',
'Must specify at least one value ("NFSV3", "NFSV4") in the "protocols" list.'
)
old = await self.config()
# Fixup old 'servers' entry before processing changes
if old['managed_nfsd']:
old['servers'] = None
new = old.copy()
new.update(data)
verrors = ValidationErrors()
keytab_has_nfs = await self.middleware.call("kerberos.keytab.has_nfs_principal")
new_v4_krb_enabled = new["v4_krb"] or keytab_has_nfs
for k in ['mountd_port', 'rpcstatd_port', 'rpclockd_port']:
for bindip in (new['bindip'] or ['0.0.0.0']):
verrors.extend(await validate_port(self.middleware, f'nfs_update.{k}', new[k], 'nfs', bindip))
if await self.middleware.call("failover.licensed") and NFSProtocol.NFSv4 in new["protocols"] and new_v4_krb_enabled:
gc = await self.middleware.call("datastore.config", "network.globalconfiguration")
if not gc["gc_hostname_virtual"] or not gc["gc_domain"]:
verrors.add(
"nfs_update.v4",
"Enabling kerberos authentication on TrueNAS HA requires setting the virtual hostname and "
"domain"
)
bindip_choices = await self.bindip_choices()
for i, bindip in enumerate(new['bindip']):
if bindip not in bindip_choices:
verrors.add(f'nfs_update.bindip.{i}', 'Please provide a valid ip address')
if NFSProtocol.NFSv4 in new["protocols"] and new_v4_krb_enabled:
"""
In environments with kerberized NFSv4 enabled, we need to tell winbindd to not prefix
usernames with the short form of the AD domain. Directly update the db and regenerate
the smb.conf to avoid having a service disruption due to restarting the samba server.
"""
ad_config = await self.middleware.call('activedirectory.config')
if ad_config['enable'] and not ad_config['use_default_domain']:
await self.middleware.call(
'datastore.update', 'directoryservice.activedirectory', ad_config['id'],
{'use_default_domain': True}, {'prefix': 'ad_'}
)
await self.middleware.call('etc.generate', 'smb')
await self.middleware.call('service.reload', 'idmap')
if NFSProtocol.NFSv4 not in new["protocols"] and new["v4_domain"]:
verrors.add("nfs_update.v4_domain", "This option does not apply to NFSv3")
if new["rdma"]:
available_rdma_services = await self.middleware.call('rdma.capable_services')
if "NFS" not in available_rdma_services:
verrors.add(
"nfs_update.rdma",
"This platform cannot support NFS over RDMA or is missing an RDMA capable NIC."
)
verrors.check()
await self.nfs_compress(new)
await self._update_service(old, new, "restart")
if old['mountd_log'] != new['mountd_log']:
await self.middleware.call('service.reload', 'syslogd')
return await self.config()
class NFSShareModel(sa.Model):
__tablename__ = 'sharing_nfs_share'
id = sa.Column(sa.Integer(), primary_key=True)
nfs_path = sa.Column(sa.Text())
nfs_aliases = sa.Column(sa.JSON(list))
nfs_comment = sa.Column(sa.String(120))
nfs_network = sa.Column(sa.Text())
nfs_hosts = sa.Column(sa.Text())
nfs_ro = sa.Column(sa.Boolean(), default=False)
nfs_maproot_user = sa.Column(sa.String(120), nullable=True, default='')
nfs_maproot_group = sa.Column(sa.String(120), nullable=True, default='')
nfs_mapall_user = sa.Column(sa.String(120), nullable=True, default='')
nfs_mapall_group = sa.Column(sa.String(120), nullable=True, default='')
nfs_security = sa.Column(sa.MultiSelectField())
nfs_enabled = sa.Column(sa.Boolean(), default=True)
class SharingNFSService(SharingService):
share_task_type = 'NFS'
class Config:
namespace = "sharing.nfs"
datastore = "sharing.nfs_share"
datastore_prefix = "nfs_"
datastore_extend = "sharing.nfs.extend"
cli_namespace = "sharing.nfs"
role_prefix = "SHARING_NFS"
ENTRY = Patch(
'sharingnfs_create', 'sharing_nfs_entry',
('add', Int('id')),
('add', Bool('locked')),
register=True,
)
@accepts(
Dict(
"sharingnfs_create",
Dir("path", required=True),
List("aliases", items=[Str("path", validators=[Match(r"^/.*")])]),
Str("comment", default=""),
List("networks", items=[IPAddr("network", network=True)], unique=True),
List(
"hosts", items=[Str("host", validators=[NotMatch(
r'.*[\s"]', explanation='Name cannot contain spaces or quotes')]
)],
unique=True
),
Bool("ro", default=False),
Str("maproot_user", required=False, default=None, null=True),
Str("maproot_group", required=False, default=None, null=True),
Str("mapall_user", required=False, default=None, null=True),
Str("mapall_group", required=False, default=None, null=True),
List(
"security",
items=[Str("provider", enum=["SYS", "KRB5", "KRB5I", "KRB5P"])],
),
Bool("enabled", default=True),
register=True,
strict=True,
),
audit='NFS share create', audit_extended=lambda data: data["path"]
)
async def do_create(self, data):
"""
Create a NFS Share.
`path` local path to be exported.
`aliases` IGNORED, for now.
`networks` is a list of authorized networks that are allowed to access the share having format
"network/mask" CIDR notation. If empty, all networks are allowed.
`hosts` is a list of IP's/hostnames which are allowed to access the share. If empty, all IP's/hostnames are
allowed.
"""
verrors = ValidationErrors()
await self.validate(data, "sharingnfs_create", verrors)
verrors.check()
await self.compress(data)
data["id"] = await self.middleware.call(
"datastore.insert", self._config.datastore, data,
{
"prefix": self._config.datastore_prefix
},
)
await self.extend(data)
await self._service_change("nfs", "reload")
return await self.get_instance(data["id"])
@accepts(
Int("id"),
Patch(
"sharingnfs_create",
"sharingnfs_update",
("attr", {"update": True})
),
audit='NFS share update',
audit_callback=True,
)
async def do_update(self, audit_callback, id_, data):
"""
Update NFS Share of `id`.
"""
verrors = ValidationErrors()
old = await self.get_instance(id_)
audit_callback(old['path'])
new = old.copy()
new.update(data)
await self.validate(new, "sharingnfs_update", verrors, old=old)
verrors.check()
await self.compress(new)
await self.middleware.call(
"datastore.update", self._config.datastore, id_, new,
{
"prefix": self._config.datastore_prefix
}
)
await self._service_change("nfs", "reload")
return await self.get_instance(id_)
@accepts(
Int("id"),
audit='NFS share delete',
audit_callback=True,
)
@returns()
async def do_delete(self, audit_callback, id_):
"""
Delete NFS Share of `id`.
"""
nfs_share = await self.get_instance(id_)
audit_callback(nfs_share['path'])
await self.middleware.call("datastore.delete", self._config.datastore, id_)
await self._service_change("nfs", "reload")
@private
async def validate(self, data, schema_name, verrors, old=None):
"""
Perform advanced validation that does not get trapped by the schema checks
* Path must reside within a user volume
* Networks and users: Mostly follow overlap rules from exports man page.
The order of precedence for match is:
single host, IP networks, wildcards, netgroups, anonymous
Rule from exports man page:
If a client matches more than one of the specifications above, then
the first match from the above list order takes precedence - regardless
of the order they appear on the export line. However, if a client matches
more than one of the same type of specification (e.g. two netgroups), then
the first match from the order they appear on the export line takes precedence.
Notes:
- Those rules apply to a 'single' entry in the exports.
Our rules:
- Host cannot be specified more than once for the same share.
- Networks cannot overlap on the same share.
"""
if len(data["aliases"]):
data['aliases'] = []
# This feature was originally intended to be provided by nfs-ganesha
# since we no longer have ganesha, planning will need to be made about
# how to implement for kernel NFS server. One candidate is using bind mounts,
# but this will require careful design and testing. For now we will keep it disabled.
"""
if len(data["aliases"]) != len(data["paths"]):
verrors.add(
f"{schema_name}.aliases",
"This field should be either empty of have the same number of elements as paths",
)
"""
# need to make sure that the nfs share is within the zpool mountpoint
await check_path_resides_within_volume(
verrors, self.middleware, f'{schema_name}.path', data['path'],
)
filters = []
if old:
filters.append(["id", "!=", old["id"]])
other_shares = await self.middleware.call(
"sharing.nfs.query", filters, {"extra": {"retrieve_locked_info": False}}
)
dns_cache = await self.resolve_hostnames(
sum([share["hosts"] for share in other_shares], []) + data['hosts']
)
self.validate_share_networks(data['networks'], dns_cache, schema_name, verrors)
# Stop here if the input generated errors for the user to fix
verrors.check()
await self.validate_hosts_and_networks(
other_shares, data, schema_name, verrors, dns_cache
)
# Stop here if the input generated errors for the user to fix
verrors.check()
# Confirm the share will not collide with an existing share
await self.validate_share_path(other_shares, data, schema_name, verrors)
# Stop here if the input generated errors for the user to fix
verrors.check()
for k in ["maproot", "mapall"]:
map_user = data[f"{k}_user"]
map_group = data[f"{k}_group"]
if not map_user and not map_group:
pass
elif not map_user and map_group:
verrors.add(f"{schema_name}.{k}_user", "This field is required when map group is specified")
else:
try:
await self.middleware.call('user.get_user_obj', {'username': map_user})
except KeyError:
verrors.add(f"{schema_name}.{k}_user", f"User not found: {map_user}")
if map_group:
try:
await self.middleware.call('group.get_group_obj', {'groupname': map_group})
except KeyError:
verrors.add(f"{schema_name}.{k}_group", f"Group not found: {map_group}")
if data["maproot_user"] and data["mapall_user"]:
verrors.add(f"{schema_name}.mapall_user", "maproot_user disqualifies mapall_user")
v4_sec = list(filter(lambda sec: sec != "SYS", data.get("security", [])))
if v4_sec:
nfs_config = await self.middleware.call("nfs.config")
if NFSProtocol.NFSv4 not in nfs_config["protocols"]:
verrors.add(
f"{schema_name}.security",
f"The following security flavor(s) require NFSv4 to be enabled: {','.join(v4_sec)}."
)
@private
def validate_share_networks(self, networks, dns_cache, schema_name, verrors):
"""
The network field is strictly limited to CIDR formats:
The input validator should enforce the CIDR format and a single address per entry.
This validation is limited to:
* Collisions with resolved hostnames
* Overlapping subnets
"""
dns_cache_values = list(dns_cache.values())
for IPaddr in networks:
IPinterface = ipaddress.ip_interface(IPaddr)
if str(IPinterface.ip) in dns_cache_values:
key = next(key for key, value in dns_cache.items() if value == str(IPinterface.ip))
verrors.add(
f"{schema_name}.networks",
f"ERROR - Resolved hostname to duplicate address: host '{key}' resolves to '{IPaddr}'"
)
overlaps = self.test_for_overlapped_networks(networks)
if overlaps:
verrors.add(
f"{schema_name}.networks",
f"ERROR - Overlapped subnets: {overlaps}"
)
@private
def test_for_overlapped_networks(self, networks, this_network=None):
"""
INPUT: networks a list of ip_networks
this_network optional ip_network to test against networks
if this_network is None, then check networks list for overlaps
else check this_network for overlaps with entries in networks
We set strict to False to allow entries like: 1.2.3.4/24
"""
overlaps = []
if this_network is not None:
this_network = ipaddress.ip_network(this_network, strict=False)
for that_network in networks:
that_network = ipaddress.ip_network(that_network, strict=False)
if this_network.overlaps(that_network):
overlaps.append((this_network, that_network))
else:
for n1, n2 in itertools.combinations(networks, 2):
# Check for overlapped networks
ipn1 = ipaddress.ip_network(n1, strict=False)
ipn2 = ipaddress.ip_network(n2, strict=False)
if ipn1.overlaps(ipn2):
overlaps.append((n1, n2))
return overlaps if overlaps else None
@private
async def resolve_hostnames(self, hostnames):
hostnames = list(set(hostnames))
async def resolve(hostname):
try:
try:
# If this is an IP address, just return it
ip = IpAddress()
ip(hostname)
return hostname
except ValueError:
# Not an IP address, should be a name
if domain := get_wildcard_domain(hostname):
hostname = domain
if leftmost_has_wildcards(hostname):
# We know this will not resolve
return None
else:
try:
dns_addresses = [x['address'] for x in await self.middleware.call('dnsclient.forward_lookup', {
'names': [hostname]
})]
# We might get both IPv4 and IPv6 addresses, the caller expects a single response
return dns_addresses[0]
except Exception as e:
self.logger.warning("Unable to resolve host %r: %r", hostname, e)
return None
except Exception as e:
self.logger.warning("Unable to resolve or invalid host %r: %r", hostname, e)
return None
resolved_hostnames = await asyncio_map(resolve, hostnames, 8)
return dict(zip(hostnames, resolved_hostnames))
@private
async def validate_hosts_and_networks(self, other_shares, data, schema_name, verrors, dns_cache):
"""
We attempt to prevent share situation where the same host is provided access to a
share but with potentially different permissions.
This module does checks that encompass both hosts and networks.
"""
tgt_realpath = (await self.middleware.call('filesystem.stat', data['path']))['realpath']
used_networks = set()
used_hosts = set() # host names without an entry in the cache
for share in other_shares:
try:
shr_realpath = (await self.middleware.call('filesystem.stat', share['path']))['realpath']
except CallError as e:
if e.errno != errno.ENOENT:
raise
# Allow for locked filesystems
shr_realpath = share['path']
if tgt_realpath == shr_realpath:
for host in share["hosts"]:
host_ip = dns_cache.get(host)
if host_ip is None:
used_hosts.add(host)
continue
if host.startswith('@'):
continue
try:
network = ipaddress.ip_network(host_ip, strict=False)
except Exception:
self.logger.warning("Got invalid host %r", host)
continue
else:
used_hosts.add(str(network))
for network in share["networks"]:
try:
network = ipaddress.ip_network(network, strict=False)
except Exception:
self.logger.warning("Got invalid network %r", network)
continue
else:
used_networks.add(network)
if not share["hosts"] and not share["networks"]:
used_networks.add(ipaddress.ip_network("0.0.0.0/0"))
used_networks.add(ipaddress.ip_network("::/0"))
for network in set(data["networks"]):
network = ipaddress.ip_network(network, strict=False)
# Look for exact match. This also works for IPv6
if network in used_networks:
verrors.add(
f"{schema_name}.networks",
f"ERROR - Another NFS share already exports {data['path']} for network {network}"
)
# Look for subnet or supernet overlaps
# Works for IPv4 and IPv6, but ignores mixed tests
overlaps = self.test_for_overlapped_networks(used_networks, network)
if overlaps:
# Each overlap entry is a tuple: 'this' is overlapped by 'that'
# There may well be more than one entry, but it's more clear to present only one.
verrors.add(
f"{schema_name}.networks",
f"ERROR - This or another NFS share exports {data['path']} to {str(overlaps[0][1])} "
f"and overlaps network {network}"
)
used_networks.add(network)
for host in set(data["hosts"]):
# check for duplicate 'hosts' in other shares
# netgroups are valid, but limited to same duplicate restrictions
if host in used_hosts:
verrors.add(
f"{schema_name}.hosts",
f"ERROR - Another NFS share already exports {data['path']} for host {str(host)}"
)
continue
if host.startswith('@'):
continue
# wildcarded names without a 'domain' are valid
if leftmost_has_wildcards(host) and get_domain(host) is None:
continue
# Everything else should be resolvable
host_ip = dns_cache[host]
if host_ip is None:
verrors.add(
f"{schema_name}.hosts",
f"Unable to resolve host '{host}'"
)
continue
# Nothing more to check with wildcard names
if leftmost_has_wildcards(host):
continue
@private
async def validate_share_path(self, other_shares, data, schema_name, verrors):
"""
A share path centric test. Checks new share path against existing.
This function checks for common conditions.
"""
# We test other shares that are sharing the same path
tgt_stat = await self.middleware.call('filesystem.stat', data["path"])
# Sanity check: no symlinks
if tgt_stat['type'] == "SYMLINK":
verrors.add(
f"{schema_name}.path",
f"Symbolic links are not allowed: {data['path']}."
)
tgt_realpath = tgt_stat['realpath']
for share in other_shares:
try:
shr_realpath = (await self.middleware.call('filesystem.stat', share['path']))['realpath']
except CallError as e:
if e.errno != errno.ENOENT:
raise
# Allow for locked filesystems
shr_realpath = share['path']
if tgt_realpath == shr_realpath:
# Test hosts
# An empty 'hosts' list == '*' == 'everybody. Workaround: remove '*' as a host entry
datahosts = [host for host in data["hosts"] if host != "*"]
sharehosts = [host for host in share["hosts"] if host != "*"]
commonHosts = set(datahosts) & set(sharehosts)
commonNetworks = set(data["networks"]) & set(share["networks"])
if bool(commonHosts) | bool(commonNetworks):
reason = "'everybody', i.e. '*'"
other_share_desc = "Another share with the same path"
if commonHosts:
desc = other_share_desc
reason = str(commonHosts)
else:
desc = other_share_desc
reason = str(commonNetworks)
verrors.add(
f"{schema_name}.path",
f"ERROR - Export conflict. {desc} exports {share['path']} for {reason}"
)
# Found an export of the same path to the same 'hosts'. Report it.
break
@private
async def extend(self, data):
data["networks"] = data.pop("network").split()
data["hosts"] = data["hosts"].split()
data["security"] = [s.upper() for s in data["security"]]
return data
@private
async def compress(self, data):
data["network"] = " ".join(data.pop("networks"))
data["hosts"] = " ".join(data["hosts"])
data["security"] = [s.lower() for s in data["security"]]
data.pop(self.locked_field, None)
return data
async def pool_post_import(middleware, pool):
"""
Makes sure to reload NFS if a pool is imported and there are shares configured for it.
"""
if pool is None:
middleware.create_task(middleware.call('etc.generate', 'nfsd'))
return
path = f'/mnt/{pool["name"]}'
for share in await middleware.call('sharing.nfs.query', [], {'select': ['path']}):
if share['path'].startswith(path):
middleware.create_task(middleware.call('service.reload', 'nfs'))
break
async def setup(middleware):
await middleware.call(
'interface.register_listen_delegate',
SystemServiceListenMultipleDelegate(middleware, 'nfs', 'bindip'),
)
middleware.register_hook('pool.post_import', pool_post_import, sync=True)
| 39,424 | Python | .py | 811 | 36.086313 | 124 | 0.576942 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,507 | dns_client.py | truenas_middleware/src/middlewared/middlewared/plugins/dns_client.py | import asyncio
from dns.asyncresolver import Resolver
from io import StringIO
from middlewared.service import private, Service, ValidationError
from middlewared.schema import accepts, returns, IPAddr, Dict, Int, List, Str, Ref, OROperator
from middlewared.utils import filter_list
class DNSClient(Service):
class Config:
private = True
@private
async def get_resolver(self, options):
if options['nameservers']:
mem_resolvconf = StringIO()
for n in options['nameservers']:
mem_resolvconf.write(f"nameserver {n}\n")
mem_resolvconf.seek(0)
r = Resolver(mem_resolvconf)
else:
r = Resolver()
r.timeout = options['timeout']
return r
@private
async def resolve_name(self, name, rdtype, options):
r = await self.get_resolver(options)
if rdtype == 'PTR':
ans = await r.resolve_address(
name,
lifetime=options['lifetime']
)
else:
ans = await r.resolve(
name, rdtype,
lifetime=options['lifetime']
)
return ans
@accepts(Dict(
'lookup_data',
List('names', items=[Str('name')], required=True),
List(
'record_types',
items=[Str('record_type', default='A', enum=['A', 'AAAA', 'SRV', 'CNAME'])],
default=['A', 'AAAA']
),
Dict(
'dns_client_options',
List('nameservers', items=[IPAddr("ip")], default=[]),
Int('lifetime', default=12),
Int('timeout', default=4),
Str('raise_error', default='HOST_FAILURE', enum=['NEVER', 'ANY_FAILURE', 'HOST_FAILURE', 'ALL_FAILURE']),
register=True
),
Ref('query-filters'),
Ref('query-options'),
))
@returns(OROperator(
List(
'rdata_list_srv',
items=[
Dict(
Str('name'),
Int('priority'),
Int('weight'),
Int('port'),
Str('class'),
Str('type'),
Int('ttl'),
Str('target'),
)
],
),
List(
'rdata_list_cname',
items=[
Dict(
Str('name'),
Str('class'),
Str('type'),
Int('ttl'),
Str('target'),
)
],
),
List(
'rdata_list',
items=[
Dict(
Str('name'),
Str('class'),
Str('type'),
Int('ttl'),
IPAddr('address'),
)
],
),
name='record_list',
))
async def forward_lookup(self, data):
"""
Rules: We can combine 'A' and 'AAAA', but 'SRV' and 'CNAME' must be singular.
NB1: By default record_types is ['A', 'AAAA'] and if selected will return both 'A' and 'AAAA' records
for hosts that support both.
NB2: By default raise_error is 'HOST_FAILURE', i.e. raise exception if all tests for a name fail
NB3: With raise_error as 'NEVER' all results are returned and resolve attempts that
generate an exception are returned as an empty list
"""
single_rtypes = ['CNAME', 'SRV']
output = []
options = data['dns_client_options']
if (len(data['record_types']) > 1) and (set(single_rtypes) & set(data['record_types'])):
raise ValidationError(
'dnclient.forward_lookup',
f'{single_rtypes} cannot be combined with other rtypes in the same request'
)
results = await asyncio.gather(*[
self.resolve_name(h, rtype, options) for h in data['names'] for rtype in data['record_types']
], return_exceptions=True)
failures = []
failuresPerHost = {}
for (h, rtype), ans in zip([(h, rtype) for h in data['names'] for rtype in data['record_types']], results):
if isinstance(ans, Exception):
failures.append(ans)
failuresPerHost[h] = failuresPerHost.setdefault(h, [])
failuresPerHost[h].append(ans)
else:
ttl = ans.response.answer[0].ttl
name = ans.response.answer[0].name.to_text()
# 'SRV' and 'CNAME' are special
if rtype == 'SRV':
entries = [{
"name": name,
"priority": i.priority,
"weight": i.weight,
"port": i.port,
"class": i.rdclass.name,
"type": i.rdtype.name,
"ttl": ttl,
"target": i.target.to_text()
} for i in ans.response.answer[0].items if i.rdtype.name == rtype]
elif rtype == 'CNAME':
entries = [{
"name": name,
"class": i.rdclass.name,
"type": i.rdtype.name,
"ttl": ttl,
"target": i.target.to_text(),
} for i in ans.response.answer[0].items if i.rdtype.name == rtype]
else: # The remaining options are 'A' and/or 'AAAA'
entries = [{
"name": name,
"class": i.rdclass.name,
"type": i.rdtype.name,
"ttl": ttl,
"address": i.address,
} for i in ans.rrset.items if i.rdtype.name == rtype]
output.extend(entries)
# NEVER - squash all failures
# HOST - raise if all tests for a name fail (default case)
# ANY - raise on any failure
# ALL - raise if all tests for all 'names' fail
if failures:
if options['raise_error'] == 'HOST_FAILURE':
for h in data['names']:
fph = len(failuresPerHost[h]) if failuresPerHost.get(h) is not None else 0
if fph == len(data['record_types']):
raise failuresPerHost[h][0]
elif options['raise_error'] == 'ANY_FAILURE':
raise failures[0]
elif options['raise_error'] == 'ALL_FAILURE':
if len(data['names']) * len(data['record_types']) == len(failures):
raise failures[0]
return filter_list(output, data['query-filters'], data['query-options'])
@accepts(Dict(
'lookup_data',
List("addresses", items=[IPAddr("address")], required=True),
Ref('dns_client_options'),
Ref('query-filters'),
Ref('query-options'),
))
@returns(List(
'rdata_list',
items=[
Dict(
Str('name'),
Str('class'),
Str('type'),
Int('ttl'),
Str('target'),
)
]
))
async def reverse_lookup(self, data):
output = []
options = data['dns_client_options']
results = await asyncio.gather(*[
self.resolve_name(i, 'PTR', options) for i in data['addresses']
])
for ans in results:
ttl = ans.response.answer[0].ttl
name = ans.response.answer[0].name.to_text()
entries = [{
"name": name,
"class": i.rdclass.name,
"type": i.rdtype.name,
"ttl": ttl,
"target": i.target.to_text(),
} for i in ans.response.answer[0].items]
output.extend(entries)
return filter_list(output, data['query-filters'], data['query-options'])
| 8,045 | Python | .py | 208 | 24.841346 | 117 | 0.474978 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,508 | update.py | truenas_middleware/src/middlewared/middlewared/plugins/update.py | from middlewared.schema import accepts, Bool, Dict, Str
from middlewared.service import job, private, CallError, Service, pass_app
import middlewared.sqlalchemy as sa
from middlewared.plugins.update_.utils import UPLOAD_LOCATION
from middlewared.utils import PRODUCT
from middlewared.utils.time_utils import utc_now
import enum
import errno
import os
import shutil
import subprocess
import textwrap
import pathlib
SYSTEM_UPGRADE_REBOOT_REASON = 'System upgrade'
def parse_train_name(name):
split = (name + '-').split('-')
version = split[2]
branch = split[3]
return [version, branch]
class CompareTrainsResult(enum.Enum):
MAJOR_DOWNGRADE = "MAJOR_DOWNGRADE"
MAJOR_UPGRADE = "MAJOR_UPGRADE"
NIGHTLY_DOWNGRADE = "NIGHTLY_DOWNGRADE"
NIGHTLY_UPGRADE = "NIGHTLY_UPGRADE"
BAD_UPGRADES = {
CompareTrainsResult.NIGHTLY_DOWNGRADE: textwrap.dedent("""\
You're not allowed to change away from the nightly train, it is considered a downgrade.
If you have an existing boot environment that uses that train, boot into it in order to upgrade
that train.
"""),
CompareTrainsResult.MAJOR_DOWNGRADE: textwrap.dedent("""\
Downgrading TrueNAS installation is not supported.
If you have an existing boot environment that uses that train, boot into it in order to upgrade
that train.
"""),
}
def compare_trains(t1, t2):
v1 = parse_train_name(t1)
v2 = parse_train_name(t2)
branch1 = v1[1].lower()
branch2 = v2[1].lower()
if branch1 != branch2:
if branch2 == "nightlies":
return CompareTrainsResult.NIGHTLY_UPGRADE
elif branch1 == "nightlies":
return CompareTrainsResult.NIGHTLY_DOWNGRADE
if v1[0] != v2[0]:
if v1[0] > v2[0]:
return CompareTrainsResult.MAJOR_DOWNGRADE
else:
return CompareTrainsResult.MAJOR_UPGRADE
class UpdateModel(sa.Model):
__tablename__ = 'system_update'
id = sa.Column(sa.Integer(), primary_key=True)
upd_autocheck = sa.Column(sa.Boolean(), default=True)
upd_train = sa.Column(sa.String(50))
class UpdateService(Service):
class Config:
cli_namespace = 'system.update'
@accepts(roles=['READONLY_ADMIN'])
async def get_auto_download(self):
"""
Returns if update auto-download is enabled.
"""
return (await self.middleware.call('datastore.config', 'system.update'))['upd_autocheck']
@accepts(Bool('autocheck'))
async def set_auto_download(self, autocheck):
"""
Sets if update auto-download is enabled.
"""
config = await self.middleware.call('datastore.config', 'system.update')
await self.middleware.call('datastore.update', 'system.update', config['id'], {'upd_autocheck': autocheck})
await self.middleware.call('service.restart', 'cron')
@accepts(roles=['READONLY_ADMIN'])
def get_trains(self):
"""
Returns available trains dict and the currently configured train as well as the
train of currently booted environment.
"""
self.middleware.call_sync('network.general.will_perform_activity', 'update')
data = self.middleware.call_sync('datastore.config', 'system.update')
trains_data = self.middleware.call_sync('update.get_trains_data')
current_train = trains_data['current_train']
trains = trains_data['trains']
selected = None
for name, train in list(trains.items()):
try:
result = compare_trains(current_train, name)
except Exception:
self.logger.warning(
"Failed to compare trains %r and %r", current_train, name, exc_info=True
)
continue
else:
if result in BAD_UPGRADES:
trains.pop(name)
continue
if not selected and data['upd_train'] == name:
selected = data['upd_train']
if name in trains_data['trains_redirection']:
trains.pop(name)
continue
if not data['upd_train'] or not selected:
selected = current_train
if selected in trains_data['trains_redirection']:
selected = trains_data['trains_redirection'][selected]
return {
'trains': trains,
'current': current_train,
'selected': selected,
}
@accepts(Str('train', empty=False))
def set_train(self, train):
"""
Set an update train to be used by default in updates.
"""
return self.__set_train(train)
def __set_train(self, train, trains=None):
"""
Wrapper so we don't call get_trains twice on update method.
"""
if trains is None:
trains = self.get_trains()
if train != trains['selected']:
if train not in trains['trains']:
raise CallError('Invalid train name.', errno.ENOENT)
try:
result = compare_trains(trains['current'], train)
except Exception:
self.logger.warning(
"Failed to compare trains %r and %r", trains['current'], train, exc_info=True
)
else:
if result in BAD_UPGRADES:
raise CallError(BAD_UPGRADES[result])
data = self.middleware.call_sync('datastore.config', 'system.update')
if data['upd_train'] != train:
self.middleware.call_sync('datastore.update', 'system.update', data['id'], {
'upd_train': train
})
return True
@accepts(Dict(
'update-check-available',
Str('train', required=False),
required=False,
), roles=['READONLY_ADMIN'])
def check_available(self, attrs):
"""
Checks if there is an update available from update server.
status:
- REBOOT_REQUIRED: an update has already been applied
- AVAILABLE: an update is available
- UNAVAILABLE: no update available
- HA_UNAVAILABLE: HA is non-functional
.. examples(websocket)::
Check available update using default train:
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "update.check_available"
}
"""
try:
applied = self.middleware.call_sync('cache.get', 'update.applied')
except KeyError:
applied = False
if applied is True:
return {'status': 'REBOOT_REQUIRED'}
if self.middleware.call_sync('failover.licensed'):
# First, let's make sure HA is functional
if self.middleware.call_sync('failover.disabled.reasons'):
return {'status': 'HA_UNAVAILABLE'}
# If its HA and standby is running old version we assume
# legacy upgrade and check update on standby.
try:
self.middleware.call_sync(
'failover.call_remote', 'failover.upgrade_version',
)
except CallError as e:
if e.errno != CallError.ENOMETHOD:
raise
return self.middleware.call_sync(
'failover.call_remote', 'update.check_available', [attrs],
)
trains = self.middleware.call_sync('update.get_trains')
train = (attrs or {}).get('train')
if not train:
train = trains['selected']
elif train not in trains['trains']:
raise CallError('Invalid train name.', errno.ENOENT)
return self.middleware.call_sync('update.check_train', train)
@accepts(Str('path', null=True, default=None), roles=['READONLY_ADMIN'])
async def get_pending(self, path):
"""
Gets a list of packages already downloaded and ready to be applied.
Each entry of the lists consists of type of operation and name of it, e.g.
{
"operation": "upgrade",
"name": "baseos-11.0 -> baseos-11.1"
}
"""
if path is None:
path = await self.middleware.call('update.get_update_location')
return await self.middleware.call('update.get_pending_in_path', path)
@accepts(Dict(
'update',
Str('dataset_name', null=True, default=None),
Bool('resume', default=False),
Str('train', null=True, default=None),
Bool('reboot', default=False),
))
@job(lock='update')
@pass_app(rest=True)
async def update(self, app, job, attrs):
"""
Downloads (if not already in cache) and apply an update.
`resume` should be set to `true` if a previous call to this method returned a `CallError` with `errno=EAGAIN`
meaning that an upgrade can be performed with a warning and that warning is accepted. In that case, update
process will be continued using an already downloaded file without performing any extra checks.
"""
location = await self.middleware.call('update.get_update_location')
if attrs['resume']:
options = {'raise_warnings': False}
else:
options = {}
trains = await self.middleware.call('update.get_trains')
if attrs['train']:
await self.middleware.run_in_thread(self.__set_train, attrs['train'], trains)
train = attrs['train']
else:
train = trains['selected']
update = await self.middleware.call('update.download_update', job, train, location, 50)
if not update:
raise CallError('No update available')
await self.middleware.call('update.install', job, os.path.join(location, 'update.sqsh'), options)
await self.middleware.call('cache.put', 'update.applied', True)
await self.middleware.call_hook('update.post_update')
if attrs['reboot']:
await self.middleware.call('system.reboot', SYSTEM_UPGRADE_REBOOT_REASON, {'delay': 10}, app=app)
return True
@accepts()
@job(lock='updatedownload')
def download(self, job):
"""
Download updates using selected train.
"""
train = self.middleware.call_sync('update.get_trains')['selected']
location = self.middleware.call_sync('update.get_update_location')
return self.middleware.call_sync('update.download_update', job, train, location, 100)
@private
async def download_update(self, *args):
await self.middleware.call('network.general.will_perform_activity', 'update')
return await self.middleware.call('update.download_impl', *args)
@accepts(
Str('path'),
Dict(
'options',
Str('dataset_name', null=True, default=None),
Bool('resume', default=False),
Bool('cleanup', default=True),
)
)
@job(lock='update')
def manual(self, job, path, options):
"""
Update the system using a manual update file.
`path` must be the absolute path to the update file.
`options.resume` should be set to `true` if a previous call to this method returned a `CallError` with
`errno=EAGAIN` meaning that an upgrade can be performed with a warning and that warning is accepted.
If `options.cleanup` is set to `false` then the manual update file won't be removed on update success and
newly created BE won't be removed on update failure (useful for debugging purposes).
"""
if options.pop('resume'):
options['raise_warnings'] = False
update_file = pathlib.Path(path)
# make sure absolute path was given
if not update_file.is_absolute():
raise CallError('Absolute path must be provided.', errno.ENOENT)
# make sure file exists
if not update_file.exists():
raise CallError('File does not exist.', errno.ENOENT)
unlink_file = True
try:
try:
self.middleware.call_sync(
'update.install', job, str(update_file.absolute()), options,
)
except Exception as e:
if isinstance(e, CallError):
if e.errno == errno.EAGAIN:
unlink_file = False
raise
else:
self.logger.debug('Applying manual update failed', exc_info=True)
raise CallError(str(e), errno.EFAULT)
job.set_progress(95, 'Cleaning up')
finally:
if options['cleanup']:
if unlink_file:
if os.path.exists(path):
os.unlink(path)
if path.startswith(UPLOAD_LOCATION):
self.middleware.call_sync('update.destroy_upload_location')
self.middleware.call_hook_sync('update.post_update')
@private
def file_impl(self, job, options):
if options['resume']:
update_options = {'raise_warnings': False}
else:
update_options = {}
dest = options['destination']
if not dest:
if not options['resume']:
try:
self.middleware.call_sync('update.create_upload_location')
except Exception as e:
raise CallError(str(e))
dest = UPLOAD_LOCATION
elif not dest.startswith('/mnt/'):
raise CallError(f'Destination: {dest!r} must reside within a pool')
if not os.path.isdir(dest):
raise CallError(f'Destination: {dest!r} is not a directory')
destfile = os.path.join(dest, 'manualupdate.sqsh')
unlink_destfile = True
try:
if options['resume']:
if not os.path.exists(destfile):
raise CallError('There is no uploaded file to resume')
else:
job.check_pipe('input')
job.set_progress(10, 'Writing uploaded file to disk')
with open(destfile, 'wb') as f:
shutil.copyfileobj(job.pipes.input.r, f, 1048576)
try:
self.middleware.call_sync('update.install', job, destfile, update_options)
except CallError as e:
if e.errno == errno.EAGAIN:
unlink_destfile = False
raise
job.set_progress(95, 'Cleaning up')
finally:
if unlink_destfile:
if os.path.exists(destfile):
os.unlink(destfile)
if dest == UPLOAD_LOCATION:
self.middleware.call_sync('update.destroy_upload_location')
@accepts(Dict(
'updatefile',
Bool('resume', default=False),
Str('destination', null=True, default=None),
))
@job(lock='update')
async def file(self, job, options):
"""
Updates the system using the uploaded .tar file.
`resume` should be set to `true` if a previous call to this method returned a `CallError` with `errno=EAGAIN`
meaning that an upgrade can be performed with a warning and that warning is accepted. In that case, re-uploading
the file is not necessary.
Use null `destination` to create a temporary location.
"""
await self.middleware.run_in_thread(self.file_impl, job, options)
await self.middleware.call_hook('update.post_update')
job.set_progress(100, 'Update completed')
@private
async def get_update_location(self):
syspath = (await self.middleware.call('systemdataset.config'))['path']
if syspath:
path = f'{syspath}/update'
else:
path = UPLOAD_LOCATION
os.makedirs(path, exist_ok=True)
return path
@private
def take_systemdataset_samba4_snapshot(self):
basename = self.middleware.call_sync('systemdataset.config')['basename']
if basename is None:
self.logger.warning('System dataset is not available, not taking snapshot')
return
dataset = f'{basename}/samba4'
proc = subprocess.run(['zfs', 'list', '-t', 'snapshot', '-H', '-o', 'name', '-s', 'name', '-d', '1', dataset],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf8', errors='ignore')
if proc.returncode != 0:
self.logger.warning('Unable to list dataset %s snapshots: %s', dataset, proc.stderr)
return
snapshots = [s.split('@')[1] for s in proc.stdout.strip().splitlines()]
for snapshot in [s for s in snapshots if s.startswith('update--')][:-4]:
self.logger.info('Deleting dataset %s snapshot %s', dataset, snapshot)
subprocess.run(['zfs', 'destroy', f'{dataset}@{snapshot}'])
current_version = self.middleware.call_sync('system.version_short')
snapshot = f'update--{utc_now().strftime("%Y-%m-%d-%H-%M")}--{PRODUCT}-{current_version}'
subprocess.run(['zfs', 'snapshot', f'{dataset}@{snapshot}'])
async def post_update_hook(middleware):
is_ha = await middleware.call('failover.licensed')
if not is_ha or await middleware.call('failover.status') != 'BACKUP':
await middleware.call('update.take_systemdataset_samba4_snapshot')
async def setup(middleware):
await middleware.call('network.general.register_activity', 'update', 'Update')
middleware.register_hook('update.post_update', post_update_hook, sync=True)
| 17,736 | Python | .py | 399 | 33.882206 | 120 | 0.602714 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,509 | smart.py | truenas_middleware/src/middlewared/middlewared/plugins/smart.py | import asyncio
from datetime import datetime, timedelta, timezone
import functools
import re
import time
import json
from typing import Any
from humanize import ordinal
from middlewared.common.smart.smartctl import SMARTCTL_POWERMODES
from middlewared.plugins.smart_.schedule import SMARTD_SCHEDULE_PIECES, smartd_schedule_piece_values
from middlewared.schema import accepts, Bool, Cron, Datetime, Dict, Int, Float, List, Patch, returns, Str
from middlewared.service import (
CRUDService, filterable, filterable_returns, filter_list, job, private, SystemServiceService, ValidationErrors
)
from middlewared.service_exception import CallError
import middlewared.sqlalchemy as sa
from middlewared.utils.asyncio_ import asyncio_map
from middlewared.utils.time_utils import utc_now
from middlewared.api.current import (
AtaSelfTest, NvmeSelfTest, ScsiSelfTest
)
RE_TIME = re.compile(r'test will complete after ([a-z]{3} [a-z]{3} [0-9 ]+ \d\d:\d\d:\d\d \d{4})', re.IGNORECASE)
RE_TIME_SCSIPRINT_EXTENDED = re.compile(r'Please wait (\d+) minutes for test to complete')
async def annotate_disk_smart_tests(middleware, tests_filter, disk):
if disk["disk"] is None:
return
output = await middleware.call("disk.smartctl", disk["disk"], ["-a", "--json=c"], {"silent": True})
if output is None:
return
data = json.loads(output)
tests = parse_smart_selftest_results(data) or []
current_test = parse_current_smart_selftest(data)
return dict(tests=filter_list(tests, tests_filter), current_test=current_test, **disk)
def parse_smart_selftest_results(data) -> list[dict[str, Any]] | None:
tests = []
# ataprint.cpp
if "ata_smart_self_test_log" in data:
current_power_on_hours = data["power_on_time"]["hours"]
if "table" in data["ata_smart_self_test_log"]["standard"]: # If there are no tests, there is no table
for index, entry in enumerate(data["ata_smart_self_test_log"]["standard"]["table"]):
# remaining_percent is in the dict only if the test is in progress (status value & 0x0f)
if remaining := entry["status"]["value"] & 0x0f:
remaining = entry["status"]["remaining_percent"] / 100
test = AtaSelfTest(
num=index,
description=entry["type"]["string"],
status=entry["status"]["string"],
status_verbose=entry["status"]["string"],
remaining=remaining,
lifetime=entry["lifetime_hours"],
lba_of_first_error=entry.get("lba"), # only included if there is an error
power_on_hours_ago = current_power_on_hours - entry["lifetime_hours"]
)
if test.status_verbose == "Completed without error":
test.status = "SUCCESS"
elif test.status_verbose == "Self-test routine in progress":
test.status = "RUNNING"
elif test.status_verbose in ["Aborted by host", "Interrupted (host reset)"]:
test.status= "ABORTED"
else:
test.status = "FAILED"
tests.append(test.dict())
return tests
# nvmeprint.cpp
if "nvme_self_test_log" in data:
current_power_on_hours = data["power_on_time"]["hours"]
if "table" in data["nvme_self_test_log"]:
for index, entry in enumerate(data["nvme_self_test_log"]["table"]):
if lba := entry.get("lba"):
lba = entry["lba"]["value"]
test = NvmeSelfTest(
num=index,
description=entry["self_test_code"]["string"],
status=entry["self_test_result"]["string"],
status_verbose=entry["self_test_result"]["string"],
power_on_hours=entry["power_on_hours"],
failing_lba=lba,
nsid=entry.get("nsid"),
seg=entry.get("segment"),
sct=entry.get("status_code_type") or 0x0,
code=entry.get("status_code") or 0x0,
power_on_hours_ago=current_power_on_hours - entry["power_on_hours"]
)
if test.status_verbose == "Completed without error":
test.status = "SUCCESS"
elif test.status_verbose.startswith("Aborted:"):
test.status = "ABORTED"
else:
test.status = "FAILED"
tests.append(test.dict())
return tests
# scsiprint.cpp
# this JSON has numbered keys as an index, there's a reason it's not called a "smart" test
if "scsi_self_test_0" in data: # 0 is most recent test
current_power_on_hours = data["power_on_time"]["hours"]
for index in range(0, 20): # only 20 tests can ever return
test_key = f"scsi_self_test_{index}"
if test_key not in data:
break
entry = data[test_key]
if segment := entry.get("failed_segment"):
segment = entry["failed_segment"]["value"]
if lba := entry.get("lba_first_failure"):
lba = entry["lba_first_failure"]["value"]
lifetime = current_power_on_hours
if not entry.get("self_test_in_progress"):
lifetime = entry["power_on_time"]["hours"]
test = ScsiSelfTest(
num=index,
description=entry["code"]["string"],
status=entry["result"]["string"],
status_verbose=entry["result"]["string"], #will be replaced
segment_number=segment,
lifetime=lifetime,
lba_of_first_error=lba,
power_on_hours_ago=current_power_on_hours - lifetime
)
if test.status_verbose == "Completed":
test.status = "SUCCESS"
elif test.status_verbose == "Self test in progress ...":
test.status = "RUNNING"
elif test.status_verbose.startswith("Aborted"):
test.status = "ABORTED"
else:
test.status = "FAILED"
tests.append(test.dict())
return tests
def parse_current_smart_selftest(data):
# ata
if "ata_smart_self_test_log" in data:
if tests := data["ata_smart_self_test_log"]["standard"].get("table"):
if remaining := tests[0]["status"].get("remaining_percent"):
return {"progress": 100 - remaining}
# nvme
if "nvme_self_test_log" in data:
if remaining := data["nvme_self_test_log"].get("current_self_test_completion_percent"):
return {"progress": remaining}
# scsi gives no progress
if "self_test_in_progress" in data:
return {"progress": 0}
def smart_test_disks_intersect(existing_test, new_test, disk_choices):
if existing_test['all_disks']:
return (
'type',
f'There already is an all-disks {existing_test["type"]} test',
)
elif new_test['all_disks'] and (used_disks := [
disk_choices[disk]
for disk in existing_test['disks']
if disk in disk_choices
]):
return (
'type',
f'The following disks already have {existing_test["type"]} test: {", ".join(used_disks)}'
)
elif (used_disks := [
disk_choices[disk]
for disk in set(new_test['disks']) & set(existing_test['disks'])
if disk in disk_choices
]):
return (
'disks',
f'The following disks already have {existing_test["type"]} test: {", ".join(used_disks)}'
)
def smart_test_schedules_intersect_at(a, b):
intersections = []
for piece in SMARTD_SCHEDULE_PIECES:
a_values = set(smartd_schedule_piece_values(a[piece.key], piece.min, piece.max, piece.enum, piece.map))
b_values = set(smartd_schedule_piece_values(b[piece.key], piece.min, piece.max, piece.enum, piece.map))
intersection = a_values & b_values
if not intersection:
return
first_intersection = sorted(intersection)[0]
if piece.key == "hour":
intersections.append(f"{first_intersection:02d}:00")
continue
if len(intersection) == piece.max - piece.min + 1:
continue
if piece.key == "dom":
if intersections:
intersections.append(ordinal(first_intersection))
else:
intersections.append(f"Day {ordinal(first_intersection)} of every month")
continue
intersections.append({v: k for k, v in piece.enum.items()}[first_intersection].title())
if intersections:
return ", ".join(intersections)
class SmartTestModel(sa.Model):
__tablename__ = 'tasks_smarttest'
id = sa.Column(sa.Integer(), primary_key=True)
smarttest_type = sa.Column(sa.String(2))
smarttest_desc = sa.Column(sa.String(120))
smarttest_hour = sa.Column(sa.String(100))
smarttest_daymonth = sa.Column(sa.String(100))
smarttest_month = sa.Column(sa.String(100))
smarttest_dayweek = sa.Column(sa.String(100))
smarttest_all_disks = sa.Column(sa.Boolean(), default=False)
smarttest_disks = sa.relationship('DiskModel', secondary=lambda: SmartTestDiskModel.__table__)
class SmartTestDiskModel(sa.Model):
__tablename__ = 'tasks_smarttest_smarttest_disks'
__table_args__ = (
sa.Index('tasks_smarttest_smarttest_disks_smarttest_id__disk_id', 'smarttest_id', 'disk_id', unique=True),
)
id = sa.Column(sa.Integer(), primary_key=True)
smarttest_id = sa.Column(sa.Integer(), sa.ForeignKey('tasks_smarttest.id', ondelete='CASCADE'))
disk_id = sa.Column(sa.String(100), sa.ForeignKey('storage_disk.disk_identifier', ondelete='CASCADE'))
class SMARTTestService(CRUDService):
class Config:
datastore = 'tasks.smarttest'
datastore_extend = 'smart.test.smart_test_extend'
datastore_prefix = 'smarttest_'
namespace = 'smart.test'
cli_namespace = 'task.smart_test'
ENTRY = Patch(
'smart_task_create', 'smart_task_entry',
('add', Int('id')),
)
@private
async def smart_test_extend(self, data):
disks = data.pop('disks')
data['disks'] = [disk['disk_identifier'] for disk in disks]
test_type = {
'L': 'LONG',
'S': 'SHORT',
'C': 'CONVEYANCE',
'O': 'OFFLINE',
}
data['type'] = test_type[data.pop('type')]
Cron.convert_db_format_to_schedule(data)
return data
async def _validate(self, data, id_=None):
verrors = ValidationErrors()
disk_choices = await self.disk_choices()
other_tests = await self.query([('id', '!=', id_)] if id_ is not None else [])
if not data['disks'] and not data['all_disks']:
verrors.add('disks', 'This field is required')
for i, disk in enumerate(data['disks']):
if disk not in disk_choices:
verrors.add(f'disks.{i}', 'Invalid disk')
for test in other_tests:
if test['type'] == data['type']:
if error := smart_test_disks_intersect(test, data, disk_choices):
verrors.add(*error)
break
# "As soon as a match is found, the test will be started and no additional matches will be sought for that
# device and that polling cycle." (from man smartd.conf).
# So if two tests are scheduled to run at the same time, only one will run.
for test in other_tests:
if smart_test_disks_intersect(test, data, disk_choices):
if intersect_at := smart_test_schedules_intersect_at(test['schedule'], data['schedule']):
verrors.add('data.schedule', f'A {test["type"]} test already runs at {intersect_at}')
break
return verrors
@accepts(Str('disk'), roles=['REPORTING_READ'])
async def query_for_disk(self, disk_name):
"""
Query S.M.A.R.T. tests for the specified disk name.
"""
disk = await self.middleware.call('disk.query', [['name', '=', disk_name]], {'get': True})
return [
test
for test in await self.query()
if test['all_disks'] or disk['identifier'] in test['disks']
]
@accepts(Bool('full_disk', default=False))
async def disk_choices(self, full_disk):
"""
Returns disk choices for S.M.A.R.T. test.
`full_disk` will return full disk objects instead of just names.
"""
return {
disk['identifier']: disk if full_disk else disk['name']
for disk in await self.middleware.call('disk.query', [['name', '!^', 'pmem']])
if await self.middleware.call('disk.smartctl_args', disk['name']) is not None
}
@accepts(
Dict(
'smart_task_create',
Cron(
'schedule',
exclude=['minute']
),
Str('desc'),
Bool('all_disks', default=False),
List('disks', items=[Str('disk')]),
Str('type', enum=['LONG', 'SHORT', 'CONVEYANCE', 'OFFLINE'], required=True),
register=True
)
)
async def do_create(self, data):
"""
Create a SMART Test Task.
`disks` is a list of valid disks which should be monitored in this task.
`type` is specified to represent the type of SMART test to be executed.
`all_disks` when enabled sets the task to cover all disks in which case `disks` is not required.
.. examples(websocket)::
Create a SMART Test Task which executes after every 30 minutes.
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "smart.test.create",
"params": [{
"schedule": {
"minute": "30",
"hour": "*",
"dom": "*",
"month": "*",
"dow": "*"
},
"all_disks": true,
"type": "OFFLINE",
"disks": []
}]
}
"""
verrors = ValidationErrors()
verrors.add_child('smart_test_create', await self._validate(data))
verrors.check()
data['type'] = data.pop('type')[0]
Cron.convert_schedule_to_db_format(data)
data['id'] = await self.middleware.call(
'datastore.insert',
self._config.datastore,
data,
{'prefix': self._config.datastore_prefix}
)
self.middleware.create_task(self._service_change('smartd', 'restart'))
return await self.get_instance(data['id'])
async def do_update(self, id_, data):
"""
Update SMART Test Task of `id`.
"""
old = await self.query(filters=[('id', '=', id_)], options={'get': True})
new = old.copy()
new.update(data)
verrors = ValidationErrors()
verrors.add_child('smart_test_update', await self._validate(new, id_))
verrors.check()
new['type'] = new.pop('type')[0]
Cron.convert_schedule_to_db_format(new)
await self.middleware.call(
'datastore.update',
self._config.datastore,
id_,
new,
{'prefix': self._config.datastore_prefix}
)
self.middleware.create_task(self._service_change('smartd', 'restart'))
return await self.get_instance(id_)
async def do_delete(self, id_):
"""
Delete SMART Test Task of `id`.
"""
response = await self.middleware.call(
'datastore.delete',
self._config.datastore,
id_
)
self.middleware.create_task(self._service_change('smartd', 'restart'))
return response
@accepts(
List(
'disks', items=[
Dict(
'disk_run',
Str('identifier', required=True),
Str('mode', enum=['FOREGROUND', 'BACKGROUND'], default='BACKGROUND'),
Str('type', enum=['LONG', 'SHORT', 'CONVEYANCE', 'OFFLINE'], required=True),
)
]
)
)
@returns(List('smart_manual_test', items=[Dict(
'smart_manual_test_disk_response',
Str('disk', required=True),
Str('identifier', required=True),
Str('error', required=True, null=True),
Datetime('expected_result_time'),
Int('job'),
)]))
async def manual_test(self, disks):
"""
Run manual SMART tests for `disks`.
`type` indicates what type of SMART test will be ran and must be specified.
"""
verrors = ValidationErrors()
test_disks_list = []
if not disks:
verrors.add(
'disks',
'Please specify at least one disk.'
)
else:
supported_disks = await self.middleware.call('smart.test.disk_choices', True)
devices = await self.middleware.call('device.get_disks')
valid_disks = [
disk['identifier']
for disk in await self.middleware.call('disk.query', [
('identifier', 'in', [disk['identifier'] for disk in disks])
], {'force_sql_filters': True})
]
for index, disk in enumerate(disks):
if current_disk := supported_disks.get(disk['identifier']):
test_disks_list.append({
'disk': current_disk['name'],
**disk
})
else:
if disk['identifier'] in valid_disks:
verrors.add(
f'disks.{index}.identifier',
f'{disk["identifier"]} does not support S.M.A.R.T test.'
)
else:
verrors.add(
f'disks.{index}.identifier',
f'{disk["identifier"]} is not valid. Please provide a valid disk identifier.'
)
continue
if current_disk['name'] is None:
verrors.add(
f'disks.{index}.identifier',
f'Test cannot be performed for {disk["identifier"]} disk. Failed to retrieve name.'
)
device = devices.get(current_disk['name'])
if not device:
verrors.add(
f'disks.{index}.identifier',
f'Test cannot be performed for {disk["identifier"]}. Unable to retrieve disk details.'
)
verrors.check()
return await asyncio_map(self.__manual_test, test_disks_list, 16)
async def __manual_test(self, disk):
output = {'error': None}
args = ['-t', disk['type'].lower()]
if disk['mode'] == 'FOREGROUND':
args.extend(['-C'])
try:
result = await self.middleware.call('disk.smartctl', disk['disk'], args)
except CallError as e:
output['error'] = e.errmsg
else:
expected_result_time = None
time_details = re.findall(RE_TIME, result)
if time_details:
try:
expected_result_time = datetime.strptime(time_details[0].strip(), '%a %b %d %H:%M:%S %Y')
except Exception as e:
self.logger.error('Unable to parse expected_result_time: %r', e)
else:
expected_result_time = expected_result_time.astimezone(timezone.utc).replace(tzinfo=None)
elif time_details := re.search(RE_TIME_SCSIPRINT_EXTENDED, result):
expected_result_time = utc_now() + timedelta(minutes=int(time_details.group(1)))
elif 'Self-test has begun' in result:
# nvmeprint.cpp does not print expected result time
expected_result_time = utc_now() + timedelta(minutes=1)
elif 'Self Test has begun' in result:
# scsiprint.cpp does not always print expected result time
expected_result_time = utc_now() + timedelta(minutes=1)
if expected_result_time:
output['expected_result_time'] = expected_result_time
output['job'] = (
await self.middleware.call('smart.test.wait', disk, expected_result_time)
).id
else:
output['error'] = result
return {
'disk': disk['disk'],
'identifier': disk['identifier'],
**output
}
@filterable(roles=['REPORTING_READ'])
@filterable_returns(Dict(
'disk_smart_test_result',
Str('disk', required=True),
List('tests', items=[Dict(
'test_result',
Int('num', required=True),
Str('description', required=True),
Str('status', required=True),
Str('status_verbose', required=True),
Int('segment_number', null=True),
Float('remaining'),
Int('lifetime', null=True, required=True),
Str('lba_of_first_error', null=True, required=True),
)]),
Dict(
'current_test',
Int('progress', required=True),
null=True,
),
additional_attrs=True,
))
async def results(self, filters, options):
"""
Get disk(s) S.M.A.R.T. test(s) results.
`options.extra.tests_filter` is an optional filter for tests results.
.. examples(websocket)::
Get all disks tests results
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "smart.test.results",
"params": []
}
returns
:::javascript
[
# ATA disk
{
"disk": "sda",
"tests": [
{
"num": 1,
"description": "Short offline",
"status": "SUCCESS",
"status_verbose": "Completed without error",
"remaining": 0.0,
"lifetime": 16590,
"lba_of_first_error": None,
}
]
},
# NVME disk
{
"disk": "nvme0n1",
"tests: [
{
"num": 0,
"description": "Short",
"status": "SUCCESS",
"status_verbose": "Completed without error",
"power_on_hours": 18636,
"failing_lba": None,
"nsid": None,
"seg": None,
"sct": "0x0",
"code": "0x00",
},
]
},
# SCSI disk
{
"disk": "sdb",
"tests": [
{
"num": 1,
"description": "Background long",
"status": "FAILED",
"status_verbose": "Completed, segment failed",
"segment_number": None,
"lifetime": 3943,
"lba_of_first_error": None,
}
]
},
]
Get specific disk test results
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "smart.test.results",
"params": [
[["disk", "=", "ada0"]],
{"get": true}
]
}
returns
:::javascript
{
"disk": "ada0",
"tests": [
{
"num": 1,
"description": "Short offline",
"status": "SUCCESS",
"status_verbose": "Completed without error",
"remaining": 0.0,
"lifetime": 16590,
"lba_of_first_error": None,
}
]
}
"""
get = options.pop("get", False)
tests_filter = options["extra"].pop("tests_filter", [])
disks = filter_list(
[dict(disk, disk=disk["name"]) for disk in (await self.disk_choices(True)).values()],
filters,
options,
)
return filter_list(
list(filter(
None,
await asyncio_map(functools.partial(annotate_disk_smart_tests, self.middleware, tests_filter),
disks,
16)
)),
[],
{"get": get},
)
@private
@job(abortable=True)
async def wait(self, job, disk, expected_result_time):
try:
start = utc_now()
if expected_result_time < start:
raise CallError(f'Invalid expected_result_time {expected_result_time.isoformat()}')
start_monotime = time.monotonic()
end_monotime = start_monotime + (expected_result_time - start).total_seconds()
await self.middleware.call('smart.test.set_test_data', disk['disk'], {
'start_monotime': start_monotime,
'end_monotime': end_monotime,
})
async for _, data in await self.middleware.event_source_manager.iterate('smart.test.progress', disk['disk']):
if data['fields']['progress'] is None:
return
job.set_progress(data['fields']['progress'])
except asyncio.CancelledError:
await self.middleware.call('smart.test.abort', disk['disk'])
raise
@accepts(Str('disk'))
@returns()
async def abort(self, disk):
"""
Abort non-captive S.M.A.R.T. tests for disk.
"""
await self.middleware.call("disk.smartctl", disk, ["-X"], {"silent": True})
class SmartModel(sa.Model):
__tablename__ = 'services_smart'
id = sa.Column(sa.Integer(), primary_key=True)
smart_interval = sa.Column(sa.Integer(), default=30)
smart_powermode = sa.Column(sa.String(60), default="never")
smart_difference = sa.Column(sa.Integer(), default=0)
smart_informational = sa.Column(sa.Integer(), default=0)
smart_critical = sa.Column(sa.Integer(), default=0)
class SmartService(SystemServiceService):
class Config:
datastore = "services.smart"
service = "smartd"
service_verb_sync = False
datastore_extend = "smart.smart_extend"
datastore_prefix = "smart_"
cli_namespace = "service.smart"
ENTRY = Dict(
'smart_entry',
Int('interval', required=True),
Int('id', required=True),
Str('powermode', required=True, enum=SMARTCTL_POWERMODES),
Int('difference', required=True),
Int('informational', required=True),
Int('critical', required=True),
)
@private
async def smart_extend(self, smart):
smart["powermode"] = smart["powermode"].upper()
return smart
async def do_update(self, data):
"""
Update SMART Service Configuration.
`interval` is an integer value in minutes which defines how often smartd activates to check if any tests
are configured to run.
`critical`, `informational` and `difference` are integer values on which alerts for SMART are configured if
the disks temperature crosses the assigned threshold for each respective attribute. They default to 0 which
indicates they are disabled.
"""
old = await self.config()
new = old.copy()
new.update(data)
new["powermode"] = new["powermode"].lower()
verb = "reload"
if any(old[k] != new[k] for k in ["interval"]):
verb = "restart"
await self._update_service(old, new, verb)
if new["powermode"] != old["powermode"]:
await self._service_change("snmp", "restart")
return await self.config()
| 28,920 | Python | .py | 680 | 30.007353 | 121 | 0.535924 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,510 | tunables.py | truenas_middleware/src/middlewared/middlewared/plugins/tunables.py | import contextlib
import errno
import os
import subprocess
from middlewared.schema import accepts, Bool, Dict, Int, Patch, returns, Str, ValidationErrors
from middlewared.service import CRUDService, job, private
import middlewared.sqlalchemy as sa
from middlewared.utils import run
class TunableModel(sa.Model):
__tablename__ = 'system_tunable'
id = sa.Column(sa.Integer(), primary_key=True)
tun_type = sa.Column(sa.String(20))
tun_var = sa.Column(sa.String(128), unique=True)
tun_value = sa.Column(sa.String(512))
tun_orig_value = sa.Column(sa.String(512))
tun_comment = sa.Column(sa.String(100))
tun_enabled = sa.Column(sa.Boolean(), default=True)
TUNABLE_TYPES = ['SYSCTL', 'UDEV', 'ZFS']
def zfs_parameter_path(name):
return f'/sys/module/zfs/parameters/{name}'
def zfs_parameter_value(name):
with open(zfs_parameter_path(name)) as f:
return f.read().strip()
class TunableService(CRUDService):
class Config:
datastore = 'system.tunable'
datastore_prefix = 'tun_'
cli_namespace = 'system.tunable'
SYSCTLS = set()
ENTRY = Patch(
'tunable_create', 'tunable_entry',
('add', Int('id')),
('add', Str('orig_value')),
)
@private
def get_sysctls(self):
if not TunableService.SYSCTLS:
tunables = subprocess.run(['sysctl', '-aN'], stdout=subprocess.PIPE)
for tunable in filter(lambda x: x, tunables.stdout.decode().split('\n')):
TunableService.SYSCTLS.add(tunable)
return TunableService.SYSCTLS
@private
def get_sysctl(self, var):
with open(f'/proc/sys/{var.replace(".", "/")}', 'r') as f:
return f.read().strip()
@private
def set_sysctl(self, var, value):
path = f'/proc/sys/{var.replace(".", "/")}'
with contextlib.suppress(FileNotFoundError, PermissionError):
with open(path, 'w') as f:
f.write(value)
@private
def reset_sysctl(self, tunable):
self.set_sysctl(tunable['var'], tunable['orig_value'])
@private
def set_zfs_parameter(self, name, value):
path = zfs_parameter_path(name)
with contextlib.suppress(FileNotFoundError, PermissionError):
with open(path, 'w') as f:
f.write(value)
@private
def reset_zfs_parameter(self, tunable):
self.set_zfs_parameter(tunable['var'], tunable['orig_value'])
@private
async def handle_tunable_change(self, tunable):
if tunable['type'] == 'UDEV':
await self.middleware.call('etc.generate', 'udev')
await run(['udevadm', 'control', '-R'])
@accepts()
@returns(Dict('tunable_type_choices', *[Str(k, enum=[k]) for k in TUNABLE_TYPES]))
async def tunable_type_choices(self):
"""
Retrieve the supported tunable types that can be changed.
"""
return {k: k for k in TUNABLE_TYPES}
@accepts(Dict(
'tunable_create',
Str('type', enum=TUNABLE_TYPES, default='SYSCTL', required=True),
Str('var', required=True),
Str('value', required=True),
Str('comment', default=''),
Bool('enabled', default=True),
Bool('update_initramfs', default=True),
register=True
), audit='Tunable create')
@job(lock='tunable_crud')
async def do_create(self, job, data):
"""
Create a tunable.
If `type` is `SYSCTL` then `var` is a sysctl name (e.g. `kernel.watchdog`) and `value` is its corresponding
value (e.g. `0`).
If `type` is `UDEV` then `var` is an udev rules file name (e.g. `10-disable-usb`, `.rules` suffix will be
appended automatically) and `value` is its contents (e.g. `BUS=="usb", OPTIONS+="ignore_device"`).
If `type` is `ZFS` then `var` is a ZFS kernel module parameter name (e.g. `zfs_dirty_data_max_max`) and `value`
is its value (e.g. `783091712`).
If `update_initramfs` is `false` then initramfs will not be updated after creating a ZFS tunable and you will
need to run `system boot update_initramfs` manually.
"""
update_initramfs = data.pop('update_initramfs')
verrors = ValidationErrors()
if await self.middleware.call('tunable.query', [('var', '=', data['var'])]):
verrors.add('tunable_create.var', f'Tunable {data["var"]!r} already exists in database.', errno.EEXIST)
if data['type'] == 'SYSCTL':
if data['var'] not in await self.middleware.call('tunable.get_sysctls'):
verrors.add('tunable_create.var', f'Sysctl {data["var"]!r} does not exist in kernel.', errno.ENOENT)
if data['type'] == 'UDEV' and 'truenas' in data['var']:
verrors.add(
'tunable_create.var',
'Udev rules with `truenas` in their name are not allowed.',
errno.EPERM,
)
if data['type'] == 'ZFS':
if not await self.middleware.run_in_thread(os.path.exists, zfs_parameter_path(data['var'])):
verrors.add(
'tunable_create.var',
f'ZFS module does not accept {data["var"]!r} parameter.',
errno.ENOENT
)
verrors.check()
data['orig_value'] = ''
if data['type'] == 'SYSCTL':
data['orig_value'] = await self.middleware.call('tunable.get_sysctl', data['var'])
if data['type'] == 'ZFS':
data['orig_value'] = await self.middleware.run_in_thread(zfs_parameter_value, data['var'])
id_ = await self.middleware.call(
'datastore.insert', self._config.datastore, data, {'prefix': self._config.datastore_prefix}
)
if data['type'] == 'SYSCTL':
if data['enabled']:
await self.middleware.call('etc.generate', 'sysctl')
await self.middleware.call('tunable.set_sysctl', data['var'], data['value'])
elif data['type'] == 'ZFS':
if data['enabled']:
await self.middleware.call('tunable.set_zfs_parameter', data['var'], data['value'])
if update_initramfs:
await self.middleware.call('boot.update_initramfs')
else:
await self.handle_tunable_change(data)
return await self.get_instance(id_)
@accepts(
Int('id', required=True),
Patch(
'tunable_create',
'tunable_update',
('rm', {'name': 'type'}),
('rm', {'name': 'var'}),
('attr', {'update': True}),
),
audit='Tunable update'
)
@job(lock='tunable_crud')
async def do_update(self, job, id_, data):
"""
Update Tunable of `id`.
"""
old = await self.get_instance(id_)
update_initramfs = data.pop('update_initramfs', True)
new = old.copy()
new.update(data)
if old == new:
# nothing updated so return early
return old
await self.middleware.call(
'datastore.update', self._config.datastore, id_, new, {'prefix': self._config.datastore_prefix}
)
if new['type'] == 'SYSCTL':
await self.middleware.call('etc.generate', 'sysctl')
if new['enabled']:
await self.middleware.call('tunable.set_sysctl', new['var'], new['value'])
else:
await self.middleware.call('tunable.reset_sysctl', new)
elif new['type'] == 'ZFS':
if new['enabled']:
await self.middleware.call('tunable.set_zfs_parameter', new['var'], new['value'])
else:
await self.middleware.call('tunable.reset_zfs_parameter', new)
if update_initramfs:
await self.middleware.call('boot.update_initramfs')
else:
await self.handle_tunable_change(new)
return await self.get_instance(id_)
@accepts(Int('id', required=True), audit='Tunable delete')
@job(lock='tunable_crud')
async def do_delete(self, job, id_):
"""
Delete Tunable of `id`.
"""
entry = await self.get_instance(id_)
await self.middleware.call('datastore.delete', self._config.datastore, entry['id'])
if entry['type'] == 'SYSCTL':
await self.middleware.call('etc.generate', 'sysctl')
await self.middleware.call('tunable.reset_sysctl', entry)
elif entry['type'] == 'ZFS':
await self.middleware.call('tunable.reset_zfs_parameter', entry)
await self.middleware.call('boot.update_initramfs')
else:
await self.handle_tunable_change(entry)
| 8,735 | Python | .py | 197 | 34.720812 | 119 | 0.591777 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,511 | post_install.py | truenas_middleware/src/middlewared/middlewared/plugins/post_install.py | import json
import os
import jsonschema
from middlewared.service import Service
PATH = "/data/post_install.json"
SCHEMA = {
"type": "object",
"additionalProperties": False,
"properties": {
"network_interfaces": {
"type": "array",
"items": {
"type": "object",
"required": ["name"],
"additionalProperties": False,
"properties": {
"name": {"type": "string"},
"aliases": {
"type": "array",
"items": {
"type": "object",
"required": ["type", "address", "netmask"],
"additionalProperties": False,
"properties": {
"type": {"type": "string"},
"address": {"type": "string"},
"netmask": {"type": "integer"},
},
},
},
"ipv4_dhcp": {"type": "boolean"},
"ipv6_auto": {"type": "boolean"},
},
},
},
},
}
class PostInstallService(Service):
class Config:
private = True
def process(self):
if os.path.exists(PATH):
try:
with open(PATH) as f:
data = json.load(f)
jsonschema.validate(data, SCHEMA)
self.process_data(data)
finally:
os.unlink(PATH)
def process_data(self, data):
for interface in data.get("network_interfaces", []):
try:
self.middleware.call_sync("interface.update", interface["name"], {
"aliases": interface.get("aliases", []),
"ipv4_dhcp": interface.get("ipv4_dhcp", False),
"ipv6_auto": interface.get("ipv6_auto", False),
})
except Exception as e:
self.logger.warning("Error configuring interface %r from post_install: %r", interface["name"], e)
self.middleware.call_sync("interface.checkin")
async def setup(middleware):
try:
await middleware.call("postinstall.process")
except Exception:
middleware.logger.error("Error processing post_install file", exc_info=True)
| 2,441 | Python | .py | 65 | 23.046154 | 113 | 0.456816 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,512 | ups.py | truenas_middleware/src/middlewared/middlewared/plugins/ups.py | import csv
import functools
import glob
import io
import os
import re
import syslog
from middlewared.schema import accepts, Bool, Dict, Int, List, Patch, returns, Str
from middlewared.service import private, SystemServiceService, ValidationErrors
import middlewared.sqlalchemy as sa
from middlewared.utils import run
from middlewared.validators import Range, Port
RE_DRIVER_CHOICE = re.compile(r'(\S+)\s+(\S+=\S+)?\s*(?:\((.+)\))?$')
RE_TEST_IN_PROGRESS = re.compile(r'ups.test.result:\s*TestInProgress')
RE_UPS_STATUS = re.compile(r'ups.status: (.*)')
UPS_POWERDOWN_FLAG_FILE = '/etc/killpower'
class UPSModel(sa.Model):
__tablename__ = 'services_ups'
id = sa.Column(sa.Integer(), primary_key=True)
ups_mode = sa.Column(sa.String(6), default='master')
ups_identifier = sa.Column(sa.String(120), default='ups')
ups_remotehost = sa.Column(sa.String(50))
ups_remoteport = sa.Column(sa.Integer(), default=3493)
ups_driver = sa.Column(sa.String(120))
ups_port = sa.Column(sa.String(120))
ups_options = sa.Column(sa.Text())
ups_optionsupsd = sa.Column(sa.Text())
ups_description = sa.Column(sa.String(120))
ups_shutdown = sa.Column(sa.String(120), default='batt')
ups_shutdowntimer = sa.Column(sa.Integer(), default=30)
ups_monuser = sa.Column(sa.String(50), default='upsmon')
ups_monpwd = sa.Column(sa.EncryptedText(), default='fixmepass')
ups_extrausers = sa.Column(sa.Text())
ups_rmonitor = sa.Column(sa.Boolean(), default=False)
ups_powerdown = sa.Column(sa.Boolean(), default=False)
ups_nocommwarntime = sa.Column(sa.Integer(), nullable=True)
ups_hostsync = sa.Column(sa.Integer(), default=15)
ups_shutdowncmd = sa.Column(sa.String(255), nullable=True)
@functools.cache
def drivers_available():
return set(os.listdir('/lib/nut'))
class UPSService(SystemServiceService):
ENTRY = Dict(
'ups_entry',
Bool('powerdown', required=True),
Bool('rmonitor', required=True),
Int('id', required=True),
Int('nocommwarntime', null=True, required=True),
Int('remoteport', validators=[Port()], required=True),
Int('shutdowntimer', required=True),
Int('hostsync', validators=[Range(min_=0)], required=True),
Str('description', required=True),
Str('driver', required=True),
Str('extrausers', max_length=None, required=True),
Str('identifier', empty=False, required=True),
Str('mode', enum=['MASTER', 'SLAVE'], required=True),
Str('monpwd', empty=False, required=True),
Str('monuser', empty=False, required=True),
Str('options', max_length=None, required=True),
Str('optionsupsd', max_length=None, required=True),
Str('port', required=True),
Str('remotehost', required=True),
Str('shutdown', enum=['LOWBATT', 'BATT'], required=True),
Str('shutdowncmd', null=True, required=True),
Str('complete_identifier', required=True),
)
LOGGED_ERRORS = []
class Config:
datastore = 'services.ups'
datastore_prefix = 'ups_'
datastore_extend = 'ups.ups_config_extend'
service = 'ups'
service_verb = 'restart'
cli_namespace = 'service.ups'
@private
async def ups_config_extend(self, data):
data['mode'] = data['mode'].upper()
data['shutdown'] = data['shutdown'].upper()
host = 'localhost' if data['mode'] == 'MASTER' else data['remotehost']
data['complete_identifier'] = f'{data["identifier"]}@{host}:{data["remoteport"]}'
return data
@accepts()
@returns(List(items=[Str('port_choice')]))
async def port_choices(self):
adv_config = await self.middleware.call('system.advanced.config')
ports = [
os.path.join('/dev', port['name'])
for port in await self.middleware.call('device.get_serials')
if not adv_config['serialconsole'] or adv_config['serialport'] != port['name']
]
ports.extend(glob.glob('/dev/uhid*'))
ports.append('auto')
return ports
@private
def normalize_driver_string(self, driver_str):
driver = driver_str.split('$')[0]
driver = driver.split('(')[0] # "blazer_usb (USB ID 0665:5161)"
driver = driver.split(' or ')[0] # "blazer_ser or blazer_usb"
driver = driver.replace(' ', '\n\t') # "genericups upstype=16"
return f'driver = {driver}'
@accepts()
@returns(Dict(additional_attrs=True, example={'blazer_ser$CPM-800': 'WinPower ups 2 CPM-800 (blazer_ser)'}))
def driver_choices(self):
"""
Returns choices of UPS drivers supported by the system.
"""
ups_choices = {}
driver_list = '/usr/share/nut/driver.list'
if os.path.exists(driver_list):
with open(driver_list, 'r') as f:
d = f.read()
r = io.StringIO()
for line in re.sub(r'[ \t]+', ' ', d, flags=re.M).split('\n'):
r.write(line.strip() + '\n')
r.seek(0)
reader = csv.reader(r, delimiter=' ', quotechar='"')
for row in reader:
if len(row) == 0 or row[0].startswith('#'):
continue
if row[-2] == '#':
last = -3
else:
last = -1
driver_str = row[last]
driver_options = ''
driver_annotation = ''
# We want to match following strings
# genericups upstype=1
# powerman-pdu (experimental)
m = RE_DRIVER_CHOICE.match(driver_str)
if m:
driver_str = m.group(1)
driver_options = m.group(2) or ''
driver_annotation = m.group(3) or ''
for driver in driver_str.split(' or '): # can be "blazer_ser or blazer_usb"
driver = driver.strip()
if driver not in drivers_available():
continue
for i, field in enumerate(list(row)):
row[i] = field
key = '$'.join([driver + (f' {driver_options}' if driver_options else ''), row[3]])
val = f'{ups_choices[key]} / ' if key in ups_choices else ''
ups_choices[key] = val + '%s (%s)' % (
' '.join(filter(None, row[0:last])),
', '.join(filter(None, [driver, driver_annotation]))
)
return ups_choices
@private
async def validate_data(self, data, schema):
verrors = ValidationErrors()
driver = data.get('driver')
if driver:
if driver not in (await self.middleware.call('ups.driver_choices')).keys():
verrors.add(
f'{schema}.driver',
'Driver selected does not match local machine\'s driver list'
)
port = data['port']
if port:
adv_config = await self.middleware.call('system.advanced.config')
serial_port = os.path.join('/dev', adv_config['serialport'])
if adv_config['serialconsole'] and serial_port == port:
verrors.add(
f'{schema}.port',
'UPS port must be different then the port specified for '
'serial port for console in system advanced settings'
)
identifier = data['identifier']
if identifier:
if not re.search(r'^[a-z0-9\.\-_]+$', identifier, re.I):
verrors.add(
f'{schema}.identifier',
'Use alphanumeric characters, ".", "-" and "_"'
)
for field in ['monpwd', 'monuser']:
if re.search(r'[ #]', data[field], re.I):
verrors.add(f'{schema}.{field}', 'Spaces or number signs are not allowed.')
mode = data.get('mode')
if mode == 'MASTER':
for field in filter(
lambda f: not data[f],
['port', 'driver']
):
verrors.add(
f'{schema}.{field}',
'This field is required'
)
else:
if not data.get('remotehost'):
verrors.add(
f'{schema}.remotehost',
'This field is required'
)
data['mode'] = data['mode'].lower()
data['shutdown'] = data['shutdown'].lower()
verrors.check()
return data
@accepts(
Patch(
'ups_entry', 'ups_update',
('rm', {'name': 'id'}),
('rm', {'name': 'complete_identifier'}),
('edit', {'name': 'monpwd', 'method': lambda x: setattr(x, 'empty', False)}),
('attr', {'update': True}),
),
)
async def do_update(self, data):
"""
Update UPS Service Configuration.
`powerdown` when enabled, sets UPS to power off after shutting down the system.
`nocommwarntime` is a value in seconds which makes UPS Service wait the specified seconds before alerting that
the Service cannot reach configured UPS.
`shutdowntimer` is a value in seconds which tells the Service to wait specified seconds for the UPS before
initiating a shutdown. This only applies when `shutdown` is set to "BATT".
`shutdowncmd` is the command which is executed to initiate a shutdown. It defaults to "poweroff".
"""
config = await self.config()
config.pop('complete_identifier')
old_config = config.copy()
config.update(data)
config = await self.validate_data(config, 'ups_update')
old_config['mode'] = old_config['mode'].lower()
old_config['shutdown'] = old_config['shutdown'].lower()
if len(set(old_config.items()) ^ set(config.items())) > 0:
if config['identifier'] != old_config['identifier']:
await self.dismiss_alerts()
await self._update_service(old_config, config)
return await self.config()
@private
async def alerts_mapping(self):
return {
'LOWBATT': 'UPSBatteryLow',
'COMMBAD': 'UPSCommbad',
'COMMOK': 'UPSCommok',
'ONBATT': 'UPSOnBattery',
'ONLINE': 'UPSOnline',
'REPLBATT': 'UPSReplbatt'
}
@private
async def dismiss_alerts(self):
alerts = list((await self.alerts_mapping()).values())
await self.middleware.call('alert.oneshot_delete', alerts)
@private
@accepts(
Str('notify_type')
)
async def upssched_event(self, notify_type):
config = await self.config()
upsc_identifier = config['complete_identifier']
cp = await run('upsc', upsc_identifier, check=False)
if cp.returncode:
stats_output = ''
stderr = cp.stderr.decode(errors='ignore')
if stderr not in self.LOGGED_ERRORS:
self.LOGGED_ERRORS.append(stderr)
self.logger.error('Failed to retrieve ups information: %s', stderr)
else:
stats_output = cp.stdout.decode()
if RE_TEST_IN_PROGRESS.search(stats_output):
self.logger.debug('Self test is in progress and %r notify event should be ignored', notify_type)
return
if notify_type.lower() == 'shutdown':
# Before we start FSD with upsmon, lets ensure that ups is not ONLINE (OL).
# There are cases where battery/charger issues can result in ups.status being "OL LB" at the
# same time. This will ensure that we don't initiate a shutdown if ups is OL.
ups_status = RE_UPS_STATUS.findall(stats_output)
if ups_status and 'ol' in ups_status[0].lower():
self.middleware.logger.debug(
f'Shutdown not initiated as ups.status ({ups_status[0]}) indicates '
f'{config["identifier"]} is ONLINE (OL).'
)
else:
# if we shutdown the active node while the passive is still online
# then we're just going to cause a failover event. Shut the passive down
# first and then shut the active node down
if await self.middleware.call('failover.licensed'):
if await self.middleware.call('failover.status') == 'MASTER':
syslog.syslog(syslog.LOG_NOTICE, 'upssched-cmd "issuing shutdown" for passive node')
try:
await self.middleware.call('failover.call_remote', 'ups.upssched_event', ['shutdown'])
except Exception as e:
syslog.syslog(syslog.LOG_ERR, f'failed shutting down passive node with error {e}')
syslog.syslog(syslog.LOG_NOTICE, 'upssched-cmd "issuing shutdown"')
await run('upsmon', '-c', 'fsd', check=False)
elif 'notify' in notify_type.lower():
# notify_type is expected to be of the following format
# NOTIFY-EVENT i.e NOTIFY-LOWBATT
notify_type = notify_type.split('-')[-1]
# We would like to send alerts for the following events
alert_mapping = await self.alerts_mapping()
await self.dismiss_alerts()
if notify_type in alert_mapping:
# Send user with the notification event and details
# We send the email in the following format ( inclusive line breaks )
# UPS Statistics: 'ups'
#
# Statistics recovered:
#
# 1) Battery charge (percent)
# battery.charge: 5
#
# 2) Remaining battery level when UPS switches to LB (percent)
# battery.charge.low: 10
#
# 3) Battery runtime (seconds)
# battery.runtime: 1860
#
# 4) Remaining battery runtime when UPS switches to LB (seconds)
# battery.runtime.low: 900
body = f'<br><br>UPS Statistics: {config["identifier"]!r}<br><br>'
# Let's gather following stats
data_points = {
'battery.charge': 'Battery charge (percent)',
'battery.charge.low': 'Battery level remaining (percent) when UPS switches to Low Battery (LB)',
'battery.charge.status': 'Battery charge status',
'battery.runtime': 'Battery runtime (seconds)',
'battery.runtime.low': 'Battery runtime remaining (seconds) when UPS switches to Low Battery (LB)',
'battery.runtime.restart': 'Minimum battery runtime (seconds) to allow UPS restart after power-off',
}
stats_output = (
await run('upsc', upsc_identifier, check=False)
).stdout
recovered_stats = re.findall(
fr'({"|".join(data_points)}): (.*)',
'' if not stats_output else stats_output.decode()
)
if recovered_stats:
body += 'Statistics recovered:<br><br>'
# recovered_stats is expected to be a list in this format
# [('battery.charge', '5'), ('battery.charge.low', '10'), ('battery.runtime', '1860')]
for index, stat in enumerate(recovered_stats):
body += f'{index + 1}) {data_points[stat[0]]}<br> ' \
f' {stat[0]}: {stat[1]}<br><br>'
else:
body += 'Statistics could not be recovered<br>'
await self.middleware.call(
'alert.oneshot_create', alert_mapping[notify_type], {'ups': config['identifier'], 'body': body}
)
else:
self.middleware.logger.debug(f'Unrecognized UPS notification event: {notify_type}')
async def setup(middleware):
# Let's delete all UPS related alerts when starting middlewared ensuring we don't have any leftovers
await middleware.call('ups.dismiss_alerts')
| 16,430 | Python | .py | 343 | 35.653061 | 120 | 0.56114 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,513 | truenas.py | truenas_middleware/src/middlewared/middlewared/plugins/truenas.py | import errno
import os
from ixhardware import TRUENAS_UNKNOWN, get_chassis_hardware
from middlewared.plugins.truecommand.enums import Status as TrueCommandStatus
from middlewared.schema import accepts, Bool, Patch, returns, Str
from middlewared.service import cli_private, job, no_auth_required, private, Service
from middlewared.utils.functools_ import cache
EULA_FILE = '/usr/local/share/truenas/eula.html'
EULA_PENDING_PATH = "/data/truenas-eula-pending"
class TrueNASService(Service):
class Config:
cli_namespace = 'system.truenas'
@no_auth_required
@accepts()
@returns(Bool())
async def managed_by_truecommand(self):
"""
Returns whether TrueNAS is being managed by TrueCommand or not.
This endpoint has no authentication required as it is used by UI when the user has not logged in to see
if the system is being managed by TrueCommand or not.
"""
return TrueCommandStatus(
(await self.middleware.call('truecommand.config'))['status']
) == TrueCommandStatus.CONNECTED
@accepts()
@returns(Str('system_chassis_hardware'))
@cli_private
@cache
async def get_chassis_hardware(self):
"""
Returns what type of hardware this is, detected from dmidecode.
"""
dmi = await self.middleware.call('system.dmidecode_info_internal')
return get_chassis_hardware(dmi)
@accepts(roles=['READONLY_ADMIN'])
@returns(Bool('is_ix_hardware'))
async def is_ix_hardware(self):
"""
Return a boolean value on whether this is hardware that iXsystems sells.
"""
return await self.get_chassis_hardware() != TRUENAS_UNKNOWN
@accepts(roles=['READONLY_ADMIN'])
@returns(Str('eula', max_length=None, null=True))
@cli_private
def get_eula(self):
"""
Returns the TrueNAS End-User License Agreement (EULA).
"""
try:
with open(EULA_FILE, 'r', encoding='utf8') as f:
return f.read()
except FileNotFoundError:
pass
@accepts(roles=['READONLY_ADMIN'])
@returns(Bool('system_eula_accepted'))
@cli_private
def is_eula_accepted(self):
"""
Returns whether the EULA is accepted or not.
"""
return not os.path.exists(EULA_PENDING_PATH)
@accepts()
@returns()
def accept_eula(self):
"""
Accept TrueNAS EULA.
"""
try:
os.unlink(EULA_PENDING_PATH)
except OSError as e:
if e.errno != errno.ENOENT:
raise
@private
def unaccept_eula(self):
with open(EULA_PENDING_PATH, "w") as f:
os.fchmod(f.fileno(), 0o600)
@accepts(roles=['READONLY_ADMIN'])
@returns(Bool('is_production_system'))
async def is_production(self):
"""
Returns if system is marked as production.
"""
return await self.middleware.call('keyvalue.get', 'truenas:production', False)
@accepts(Bool('production'), Bool('attach_debug', default=False))
@returns(Patch(
'new_ticket_response', 'set_production',
('attr', {'null': True}),
))
@job()
async def set_production(self, job, production, attach_debug):
"""
Sets system production state and optionally sends initial debug.
"""
was_production = await self.is_production()
await self.middleware.call('keyvalue.set', 'truenas:production', production)
if not was_production and production:
serial = (await self.middleware.call('system.dmidecode_info'))["system-serial-number"]
return await job.wrap(await self.middleware.call('support.new_ticket', {
"title": f"System has been just put into production ({serial})",
"body": "This system has been just put into production",
"attach_debug": attach_debug,
"category": "Installation/Setup",
"criticality": "Inquiry",
"environment": "Production",
"name": "Automatic Alert",
"email": "auto-support@ixsystems.com",
"phone": "-",
}))
| 4,219 | Python | .py | 108 | 30.759259 | 111 | 0.628236 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,514 | alert.py | truenas_middleware/src/middlewared/middlewared/plugins/alert.py | from dataclasses import dataclass
from collections import defaultdict, namedtuple
import copy
from datetime import datetime, timezone
import errno
from itertools import zip_longest
import os
import textwrap
import time
import uuid
import html2text
from truenas_api_client import ReserveFDException
from middlewared.alert.base import (
AlertCategory,
alert_category_names,
AlertClass,
OneShotAlertClass,
SimpleOneShotAlertClass,
DismissableAlertClass,
AlertLevel,
Alert,
AlertSource,
ThreadedAlertSource,
ThreadedAlertService,
ProThreadedAlertService,
)
from middlewared.alert.base import UnavailableException, AlertService as _AlertService
from middlewared.api import api_method
from middlewared.api.current import (
AlertDismissArgs, AlertDismissResult, AlertListArgs, AlertListResult, AlertListCategoriesArgs,
AlertListCategoriesResult, AlertListPoliciesArgs, AlertListPoliciesResult, AlertRestoreArgs, AlertRestoreResult,
AlertOneshotCreateArgs, AlertOneshotCreateResult, AlertOneshotDeleteArgs, AlertOneshotDeleteResult,
AlertServiceCreateArgs, AlertServiceCreateResult, AlertServiceUpdateArgs, AlertServiceUpdateResult,
AlertServiceDeleteArgs, AlertServiceDeleteResult, AlertServiceTestArgs, AlertServiceTestResult,
AlertServiceEntry,
)
from middlewared.schema import Bool, Dict, Int, Str
from middlewared.service import (
ConfigService, CRUDService, Service, ValidationErrors,
job, periodic, private,
)
from middlewared.service_exception import CallError
import middlewared.sqlalchemy as sa
from middlewared.validators import validate_schema
from middlewared.utils import bisect
from middlewared.utils.plugins import load_modules, load_classes
from middlewared.utils.python import get_middlewared_dir
from middlewared.utils.time_utils import utc_now
from middlewared.plugins.failover_.remote import NETWORK_ERRORS
POLICIES = ["IMMEDIATELY", "HOURLY", "DAILY", "NEVER"]
DEFAULT_POLICY = "IMMEDIATELY"
ALERT_SOURCES = {}
ALERT_SERVICES_FACTORIES = {}
SEND_ALERTS_ON_READY = False
AlertSourceLock = namedtuple("AlertSourceLock", ["source_name", "expires_at"])
@dataclass(slots=True, frozen=True, kw_only=True)
class AlertFailoverInfo:
this_node: str
other_node: str
run_on_backup_node: bool
run_failover_related: bool
class AlertModel(sa.Model):
__tablename__ = 'system_alert'
id = sa.Column(sa.Integer(), primary_key=True)
node = sa.Column(sa.String(100))
source = sa.Column(sa.Text())
key = sa.Column(sa.Text())
datetime = sa.Column(sa.DateTime())
last_occurrence = sa.Column(sa.DateTime())
text = sa.Column(sa.Text())
args = sa.Column(sa.JSON(None))
dismissed = sa.Column(sa.Boolean())
uuid = sa.Column(sa.Text())
klass = sa.Column(sa.Text())
class AlertSourceRunFailedAlertClass(AlertClass):
category = AlertCategory.SYSTEM
level = AlertLevel.CRITICAL
title = "Alert Check Failed"
text = "Failed to check for alert %(source_name)s: %(traceback)s"
exclude_from_list = True
class AlertSourceRunFailedOnBackupNodeAlertClass(AlertClass):
category = AlertCategory.SYSTEM
level = AlertLevel.CRITICAL
title = "Alert Check Failed (Standby Controller)"
text = "Failed to check for alert %(source_name)s on standby controller: %(traceback)s"
exclude_from_list = True
class AutomaticAlertFailedAlertClass(AlertClass, SimpleOneShotAlertClass):
category = AlertCategory.SYSTEM
level = AlertLevel.WARNING
title = "Failed to Notify iXsystems About Alert"
text = textwrap.dedent("""\
Creating an automatic alert for iXsystems about system %(serial)s failed: %(error)s.
Please contact iXsystems Support: https://www.ixsystems.com/support/
Alert:
%(alert)s
""")
exclude_from_list = True
deleted_automatically = False
class TestAlertClass(AlertClass):
category = AlertCategory.SYSTEM
level = AlertLevel.CRITICAL
title = "Test alert"
exclude_from_list = True
class AlertPolicy:
def __init__(self, key=lambda now: now):
self.key = key
self.last_key_value = None
self.last_key_value_alerts = {}
def receive_alerts(self, now, alerts):
alerts = {alert.uuid: alert for alert in alerts}
gone_alerts = []
new_alerts = []
key = self.key(now)
if key != self.last_key_value:
gone_alerts = [alert for alert in self.last_key_value_alerts.values() if alert.uuid not in alerts]
new_alerts = [alert for alert in alerts.values() if alert.uuid not in self.last_key_value_alerts]
self.last_key_value = key
self.last_key_value_alerts = alerts
return gone_alerts, new_alerts
def delete_alert(self, alert):
self.last_key_value_alerts.pop(alert.uuid, None)
def get_alert_level(alert, classes):
return AlertLevel[classes.get(alert.klass.name, {}).get("level", alert.klass.level.name)]
def get_alert_policy(alert, classes):
return classes.get(alert.klass.name, {}).get("policy", DEFAULT_POLICY)
class AlertSerializer:
def __init__(self, middleware):
self.middleware = middleware
self.initialized = False
self.product_type = None
self.classes = None
self.nodes = None
async def serialize(self, alert):
await self._ensure_initialized()
return dict(
alert.__dict__,
id=alert.uuid,
node=self.nodes[alert.node],
klass=alert.klass.name,
level=self.classes.get(alert.klass.name, {}).get("level", alert.klass.level.name),
formatted=alert.formatted,
one_shot=issubclass(alert.klass, OneShotAlertClass) and not alert.klass.deleted_automatically
)
async def get_alert_class(self, alert):
await self._ensure_initialized()
return self.classes.get(alert.klass.name, {})
async def should_show_alert(self, alert):
await self._ensure_initialized()
if self.product_type not in alert.klass.products:
return False
if (await self.get_alert_class(alert)).get("policy") == "NEVER":
return False
return True
async def _ensure_initialized(self):
if not self.initialized:
self.product_type = await self.middleware.call("alert.product_type")
self.classes = (await self.middleware.call("alertclasses.config"))["classes"]
self.nodes = await self.middleware.call("alert.node_map")
self.initialized = True
class AlertService(Service):
alert_sources_errors = set()
class Config:
cli_namespace = "system.alert"
def __init__(self, middleware):
super().__init__(middleware)
self.blocked_sources = defaultdict(set)
self.sources_locks = {}
self.blocked_failover_alerts_until = 0
self.sources_run_times = defaultdict(lambda: {
"last": [],
"max": 0,
"total_count": 0,
"total_time": 0,
})
@private
def load_impl(self):
for module in load_modules(os.path.join(get_middlewared_dir(), "alert", "source")):
for cls in load_classes(module, AlertSource, (ThreadedAlertSource,)):
source = cls(self.middleware)
if source.name in ALERT_SOURCES:
raise RuntimeError(f"Alert source {source.name} is already registered")
ALERT_SOURCES[source.name] = source
for module in load_modules(
os.path.join(os.path.dirname(os.path.realpath(__file__)), os.path.pardir, "alert", "service")
):
for cls in load_classes(module, _AlertService, (ThreadedAlertService, ProThreadedAlertService)):
ALERT_SERVICES_FACTORIES[cls.name()] = cls
@private
async def load(self):
await self.middleware.run_in_thread(self.load_impl)
@private
async def initialize(self, load=True):
is_enterprise = await self.middleware.call("system.is_enterprise")
self.node = "A"
if is_enterprise:
if await self.middleware.call("failover.node") == "B":
self.node = "B"
self.alerts = []
if load:
alerts_uuids = set()
alerts_by_classes = defaultdict(list)
for alert in await self.middleware.call("datastore.query", "system.alert"):
del alert["id"]
if alert["source"] and alert["source"] not in ALERT_SOURCES:
self.logger.info("Alert source %r is no longer present", alert["source"])
continue
try:
alert["klass"] = AlertClass.class_by_name[alert["klass"]]
except KeyError:
self.logger.info("Alert class %r is no longer present", alert["klass"])
continue
alert["_uuid"] = alert.pop("uuid")
alert["_source"] = alert.pop("source")
alert["_key"] = alert.pop("key")
alert["_text"] = alert.pop("text")
alert = Alert(**alert)
if alert.uuid not in alerts_uuids:
alerts_uuids.add(alert.uuid)
alerts_by_classes[alert.klass.__name__].append(alert)
for alerts in alerts_by_classes.values():
if isinstance(alerts[0].klass, OneShotAlertClass):
alerts = await alerts[0].klass.load(alerts)
self.alerts.extend(alerts)
else:
await self.flush_alerts()
self.alert_source_last_run = defaultdict(lambda: datetime.min)
self.policies = {
"IMMEDIATELY": AlertPolicy(),
"HOURLY": AlertPolicy(lambda d: (d.date(), d.hour)),
"DAILY": AlertPolicy(lambda d: (d.date())),
"NEVER": AlertPolicy(lambda d: None),
}
for policy in self.policies.values():
policy.receive_alerts(utc_now(), self.alerts)
@private
async def terminate(self):
await self.flush_alerts()
@api_method(AlertListPoliciesArgs, AlertListPoliciesResult, roles=['ALERT_LIST_READ'])
async def list_policies(self):
"""
List all alert policies which indicate the frequency of the alerts.
"""
return POLICIES
@api_method(AlertListCategoriesArgs, AlertListCategoriesResult, roles=['ALERT_LIST_READ'])
async def list_categories(self):
"""
List all types of alerts which the system can issue.
"""
product_type = await self.middleware.call("alert.product_type")
classes = [alert_class for alert_class in AlertClass.classes
if product_type in alert_class.products and not alert_class.exclude_from_list]
return [
{
"id": alert_category.name,
"title": alert_category_names[alert_category],
"classes": sorted(
[
{
"id": alert_class.name,
"title": alert_class.title,
"level": alert_class.level.name,
"proactive_support": alert_class.proactive_support,
}
for alert_class in classes
if alert_class.category == alert_category
],
key=lambda klass: klass["title"]
)
}
for alert_category in AlertCategory
if any(alert_class.category == alert_category for alert_class in classes)
]
@api_method(AlertListArgs, AlertListResult, roles=['ALERT_LIST_READ'])
async def list(self):
"""
List all types of alerts including active/dismissed currently in the system.
"""
as_ = AlertSerializer(self.middleware)
classes = (await self.middleware.call("alertclasses.config"))["classes"]
return [
await as_.serialize(alert)
for alert in sorted(
self.alerts,
key=lambda alert: (
-get_alert_level(alert, classes).value,
alert.klass.title,
alert.datetime,
),
)
if await as_.should_show_alert(alert)
]
@private
async def node_map(self):
nodes = {
'A': 'Controller A',
'B': 'Controller B',
}
if await self.middleware.call('failover.licensed'):
node = await self.middleware.call('failover.node')
status = await self.middleware.call('failover.status')
if status == 'MASTER':
if node == 'A':
nodes = {
'A': 'Active Controller (A)',
'B': 'Standby Controller (B)',
}
else:
nodes = {
'A': 'Standby Controller (A)',
'B': 'Active Controller (B)',
}
else:
nodes[node] = f'{status.title()} Controller ({node})'
return nodes
def __alert_by_uuid(self, uuid):
try:
return [a for a in self.alerts if a.uuid == uuid][0]
except IndexError:
return None
@api_method(AlertDismissArgs, AlertDismissResult)
async def dismiss(self, uuid):
"""
Dismiss `id` alert.
"""
alert = self.__alert_by_uuid(uuid)
if alert is None:
return
if issubclass(alert.klass, DismissableAlertClass):
related_alerts, unrelated_alerts = bisect(lambda a: (a.node, a.klass) == (alert.node, alert.klass),
self.alerts)
left_alerts = await alert.klass(self.middleware).dismiss(related_alerts, alert)
for deleted_alert in related_alerts:
if deleted_alert not in left_alerts:
self._delete_on_dismiss(deleted_alert)
elif issubclass(alert.klass, OneShotAlertClass) and not alert.klass.deleted_automatically:
self._delete_on_dismiss(alert)
else:
alert.dismissed = True
await self._send_alert_changed_event(alert)
def _delete_on_dismiss(self, alert):
try:
self.alerts.remove(alert)
removed = True
except ValueError:
removed = False
for policy in self.policies.values():
policy.delete_alert(alert)
if removed:
self._send_alert_deleted_event(alert)
@api_method(AlertRestoreArgs, AlertRestoreResult)
async def restore(self, uuid):
"""
Restore `id` alert which had been dismissed.
"""
alert = self.__alert_by_uuid(uuid)
if alert is None:
return
alert.dismissed = False
await self._send_alert_changed_event(alert)
async def _send_alert_changed_event(self, alert):
as_ = AlertSerializer(self.middleware)
if await as_.should_show_alert(alert):
self.middleware.send_event("alert.list", "CHANGED", id=alert.uuid, fields=await as_.serialize(alert))
def _send_alert_deleted_event(self, alert):
self.middleware.send_event("alert.list", "REMOVED", id=alert.uuid)
@periodic(60)
@private
@job(lock="process_alerts", transient=True, lock_queue_size=1)
async def process_alerts(self, job):
if not await self.__should_run_or_send_alerts():
return
valid_alerts = copy.deepcopy(self.alerts)
await self.__run_alerts()
self.__expire_alerts()
if not await self.__should_run_or_send_alerts():
self.alerts = valid_alerts
return
await self.middleware.call("alert.send_alerts")
@private
@job(lock="process_alerts", transient=True)
async def send_alerts(self, job):
global SEND_ALERTS_ON_READY
if await self.middleware.call("system.state") != "READY":
SEND_ALERTS_ON_READY = True
return
product_type = await self.middleware.call("alert.product_type")
classes = (await self.middleware.call("alertclasses.config"))["classes"]
now = utc_now()
for policy_name, policy in self.policies.items():
gone_alerts, new_alerts = policy.receive_alerts(now, self.alerts)
for alert_service_desc in await self.middleware.call("datastore.query", "system.alertservice",
[["enabled", "=", True]]):
service_level = AlertLevel[alert_service_desc["level"]]
service_alerts = [
alert for alert in self.alerts
if (
product_type in alert.klass.products and
get_alert_level(alert, classes).value >= service_level.value and
get_alert_policy(alert, classes) != "NEVER"
)
]
service_gone_alerts = [
alert for alert in gone_alerts
if (
product_type in alert.klass.products and
get_alert_level(alert, classes).value >= service_level.value and
get_alert_policy(alert, classes) == policy_name
)
]
service_new_alerts = [
alert for alert in new_alerts
if (
product_type in alert.klass.products and
get_alert_level(alert, classes).value >= service_level.value and
get_alert_policy(alert, classes) == policy_name
)
]
for gone_alert in list(service_gone_alerts):
for new_alert in service_new_alerts:
if gone_alert.klass == new_alert.klass and gone_alert.key == new_alert.key:
service_gone_alerts.remove(gone_alert)
service_new_alerts.remove(new_alert)
break
if not service_gone_alerts and not service_new_alerts:
continue
factory = ALERT_SERVICES_FACTORIES.get(alert_service_desc["type"])
if factory is None:
self.logger.error("Alert service %r does not exist", alert_service_desc["type"])
continue
try:
alert_service = factory(self.middleware, alert_service_desc["attributes"])
except Exception:
self.logger.error("Error creating alert service %r with parameters=%r",
alert_service_desc["type"], alert_service_desc["attributes"], exc_info=True)
continue
alerts = [alert for alert in service_alerts if not alert.dismissed]
service_gone_alerts = [alert for alert in service_gone_alerts if not alert.dismissed]
service_new_alerts = [alert for alert in service_new_alerts if not alert.dismissed]
if alerts or service_gone_alerts or service_new_alerts:
try:
await alert_service.send(alerts, service_gone_alerts, service_new_alerts)
except Exception:
self.logger.error("Error in alert service %r", alert_service_desc["type"], exc_info=True)
if policy_name == "IMMEDIATELY":
as_ = AlertSerializer(self.middleware)
for alert in gone_alerts:
if await as_.should_show_alert(alert):
self._send_alert_deleted_event(alert)
for alert in new_alerts:
if await as_.should_show_alert(alert):
self.middleware.send_event(
"alert.list", "ADDED", id=alert.uuid, fields=await as_.serialize(alert),
)
for alert in new_alerts:
if alert.mail:
await self.middleware.call("mail.send", alert.mail)
if await self.middleware.call("system.is_enterprise"):
gone_proactive_support_alerts = [
alert
for alert in gone_alerts
if (
alert.klass.proactive_support and
(await as_.get_alert_class(alert)).get("proactive_support", True) and
alert.klass.proactive_support_notify_gone
)
]
new_proactive_support_alerts = [
alert
for alert in new_alerts
if (
alert.klass.proactive_support and
(await as_.get_alert_class(alert)).get("proactive_support", True)
)
]
if gone_proactive_support_alerts or new_proactive_support_alerts:
if await self.middleware.call("support.is_available_and_enabled"):
support = await self.middleware.call("support.config")
msg = []
if gone_proactive_support_alerts:
msg.append("The following alerts were cleared:")
msg += [f"* {html2text.html2text(alert.formatted)}"
for alert in gone_proactive_support_alerts]
if new_proactive_support_alerts:
msg.append("The following new alerts appeared:")
msg += [f"* {html2text.html2text(alert.formatted)}"
for alert in new_proactive_support_alerts]
serial = (await self.middleware.call("system.dmidecode_info"))["system-serial-number"]
for name, verbose_name in await self.middleware.call("support.fields"):
value = support[name]
if value:
msg += ["", "{}: {}".format(verbose_name, value)]
msg = "\n".join(msg)
job = await self.middleware.call("support.new_ticket", {
"title": "Automatic alert (%s)" % serial,
"body": msg,
"attach_debug": False,
"category": "Hardware",
"criticality": "Loss of Functionality",
"environment": "Production",
"name": "Automatic Alert",
"email": "auto-support@ixsystems.com",
"phone": "-",
})
await job.wait()
if job.error:
await self.middleware.call("alert.oneshot_create", "AutomaticAlertFailed",
{"serial": serial, "alert": msg, "error": str(job.error)})
def __uuid(self):
return str(uuid.uuid4())
async def __should_run_or_send_alerts(self):
if await self.middleware.call('system.state') != 'READY':
return False
if await self.middleware.call('failover.licensed'):
status = await self.middleware.call('failover.status')
if status == 'BACKUP' or await self.middleware.call('failover.in_progress'):
return False
return True
async def __get_failover_info(self):
this_node, other_node = "A", "B"
run_on_backup_node = run_failover_related = False
run_failover_related = await self.middleware.call("failover.licensed")
if run_failover_related:
if await self.middleware.call("failover.node") != "A":
this_node, other_node = "B", "A"
run_failover_related = time.monotonic() > self.blocked_failover_alerts_until
if run_failover_related:
try:
args = ([], {"connect_timeout": 2})
rem_ver = await self.middleware.call("failover.call_remote", "system.version", *args)
rem_state = await self.middleware.call("failover.call_remote", "system.state", *args)
rem_fstat = await self.middleware.call("failover.call_remote", "failover.status", *args)
except Exception:
pass
else:
run_on_backup_node = all((
await self.middleware.call("system.version") == rem_ver,
rem_state == "READY",
rem_fstat == "BACKUP",
))
return AlertFailoverInfo(
this_node=this_node,
other_node=other_node,
run_on_backup_node=run_on_backup_node,
run_failover_related=run_failover_related
)
async def __handle_locked_alert_source(self, name, this_node, other_node):
this_node_alerts, other_node_alerts = [], []
locked = self.blocked_sources[name]
if locked:
self.logger.debug("Not running alert source %r because it is blocked", name)
for i in filter(lambda x: x.source == name, self.alerts):
if i.node == this_node:
this_node_alerts.append(i)
elif i.node == other_node:
other_node_alerts.append(i)
return this_node_alerts, other_node_alerts, locked
async def __run_other_node_alert_source(self, name):
keys = ("args", "datetime", "last_occurrence", "dismissed", "mail",)
other_node_alerts = []
try:
try:
for alert in await self.middleware.call("failover.call_remote", "alert.run_source", [name]):
other_node_alerts.append(
Alert(**dict(
{k: v for k, v in alert.items() if k in keys},
klass=AlertClass.class_by_name[alert["klass"]],
_source=alert["source"],
_key=alert["key"]
))
)
except CallError as e:
if e.errno not in NETWORK_ERRORS + (CallError.EALERTCHECKERUNAVAILABLE,):
raise
except ReserveFDException:
self.logger.debug('Failed to reserve a privileged port')
except Exception as e:
other_node_alerts = [Alert(
AlertSourceRunFailedOnBackupNodeAlertClass,
args={"source_name": name, "traceback": str(e)},
_source=name
)]
return other_node_alerts
async def __run_alerts(self):
product_type = await self.middleware.call("alert.product_type")
fi = await self.__get_failover_info()
for k, source_lock in list(self.sources_locks.items()):
if source_lock.expires_at <= time.monotonic():
await self.unblock_source(k)
for alert_source in ALERT_SOURCES.values():
if product_type not in alert_source.products:
continue
if alert_source.failover_related and not fi.run_failover_related:
continue
if not alert_source.schedule.should_run(utc_now(), self.alert_source_last_run[alert_source.name]):
continue
self.alert_source_last_run[alert_source.name] = utc_now()
this_node_alerts, other_node_alerts, locked = await self.__handle_locked_alert_source(
alert_source.name, fi.this_node, fi.other_node
)
if not locked:
self.logger.trace("Running alert source: %r", alert_source.name)
try:
this_node_alerts = await self.__run_source(alert_source.name)
except UnavailableException:
pass
if fi.run_on_backup_node and alert_source.run_on_backup_node:
other_node_alerts = await self.__run_other_node_alert_source(alert_source.name)
for talert, oalert in zip_longest(this_node_alerts, other_node_alerts, fillvalue=None):
if talert is not None:
talert.node = fi.this_node
self.__handle_alert(talert)
if oalert is not None:
oalert.node = fi.other_node
self.__handle_alert(oalert)
self.alerts = (
[a for a in self.alerts if a.source != alert_source.name] + this_node_alerts + other_node_alerts
)
def __handle_alert(self, alert):
try:
existing_alert = [
a for a in self.alerts
if (a.node, a.source, a.klass, a.key) == (alert.node, alert.source, alert.klass, alert.key)
][0]
except IndexError:
existing_alert = None
if existing_alert is None:
alert.uuid = self.__uuid()
else:
alert.uuid = existing_alert.uuid
if existing_alert is None:
alert.datetime = alert.datetime or utc_now()
if alert.datetime.tzinfo is not None:
alert.datetime = alert.datetime.astimezone(timezone.utc).replace(tzinfo=None)
else:
alert.datetime = existing_alert.datetime
alert.last_occurrence = utc_now()
if existing_alert is None:
alert.dismissed = False
else:
alert.dismissed = existing_alert.dismissed
def __expire_alerts(self):
self.alerts = list(filter(lambda alert: not self.__should_expire_alert(alert), self.alerts))
def __should_expire_alert(self, alert):
if issubclass(alert.klass, OneShotAlertClass):
if alert.klass.expires_after is not None:
return alert.last_occurrence < utc_now() - alert.klass.expires_after
return False
@private
async def sources_stats(self):
return {
k: {"avg": v["total_time"] / v["total_count"] if v["total_count"] != 0 else 0, **v}
for k, v in sorted(self.sources_run_times.items(), key=lambda t: t[0])
}
@private
async def run_source(self, source_name):
try:
return [dict(alert.__dict__, klass=alert.klass.name)
for alert in await self.__run_source(source_name)]
except UnavailableException:
raise CallError("This alert checker is unavailable", CallError.EALERTCHECKERUNAVAILABLE)
@private
async def block_source(self, source_name, timeout=3600):
if source_name not in ALERT_SOURCES:
raise CallError("Invalid alert source")
lock = str(uuid.uuid4())
self.blocked_sources[source_name].add(lock)
self.sources_locks[lock] = AlertSourceLock(source_name, time.monotonic() + timeout)
return lock
@private
async def unblock_source(self, lock):
source_lock = self.sources_locks.pop(lock, None)
if source_lock:
self.blocked_sources[source_lock.source_name].remove(lock)
@private
async def block_failover_alerts(self):
# This values come from observation from support of how long a M-series boot can take.
self.blocked_failover_alerts_until = time.monotonic() + 900
async def __run_source(self, source_name):
alert_source = ALERT_SOURCES[source_name]
start = time.monotonic()
try:
alerts = (await alert_source.check()) or []
except UnavailableException:
raise
except Exception as e:
if source_name not in self.alert_sources_errors:
self.logger.error("Error checking for alert %r", alert_source.name, exc_info=True)
self.alert_sources_errors.add(source_name)
alerts = [
Alert(AlertSourceRunFailedAlertClass,
args={
"source_name": alert_source.name,
"traceback": str(e),
})
]
else:
self.alert_sources_errors.discard(source_name)
if not isinstance(alerts, list):
alerts = [alerts]
finally:
run_time = time.monotonic() - start
source_stat = self.sources_run_times[source_name]
source_stat["last"] = source_stat["last"][-9:] + [run_time]
source_stat["max"] = max(source_stat["max"], run_time)
source_stat["total_count"] += 1
source_stat["total_time"] += run_time
keys = set()
unique_alerts = []
for alert in alerts:
if alert.key in keys:
continue
keys.add(alert.key)
unique_alerts.append(alert)
alerts = unique_alerts
for alert in alerts:
alert.source = source_name
return alerts
@periodic(3600, run_on_start=False)
@private
async def flush_alerts(self):
if await self.middleware.call('failover.licensed'):
if await self.middleware.call('failover.status') == 'BACKUP':
return
await self.middleware.call("datastore.delete", "system.alert", [])
for alert in self.alerts:
d = alert.__dict__.copy()
d["klass"] = d["klass"].name
del d["mail"]
await self.middleware.call("datastore.insert", "system.alert", d)
@api_method(AlertOneshotCreateArgs, AlertOneshotCreateResult, private=True)
@job(lock="process_alerts", transient=True)
async def oneshot_create(self, job, klass, args):
"""
Creates a one-shot alert of specified `klass`, passing `args` to `klass.create` method.
Normal alert creation logic will be applied, so if you create an alert with the same `key` as an already
existing alert, no duplicate alert will be created.
:param klass: one-shot alert class name (without the `AlertClass` suffix).
:param args: `args` that will be passed to `klass.create` method.
"""
try:
klass = AlertClass.class_by_name[klass]
except KeyError:
raise CallError(f"Invalid alert class: {klass!r}")
if not issubclass(klass, OneShotAlertClass):
raise CallError(f"Alert class {klass!r} is not a one-shot alert class")
alert = await klass(self.middleware).create(args)
if alert is None:
return
alert.source = ""
alert.klass = alert.klass
alert.node = self.node
self.__handle_alert(alert)
self.alerts = [a for a in self.alerts if a.uuid != alert.uuid] + [alert]
await self.middleware.call("alert.send_alerts")
@api_method(AlertOneshotDeleteArgs, AlertOneshotDeleteResult, private=True)
@job(lock="process_alerts", transient=True)
async def oneshot_delete(self, job, klass, query):
"""
Deletes one-shot alerts of specified `klass` or klasses, passing `query`
to `klass.delete` method.
It's not an error if no alerts matching delete `query` exist.
:param klass: either one-shot alert class name (without the `AlertClass` suffix), or list thereof.
:param query: `query` that will be passed to `klass.delete` method.
"""
if isinstance(klass, list):
klasses = klass
else:
klasses = [klass]
deleted = False
for klassname in klasses:
try:
klass = AlertClass.class_by_name[klassname]
except KeyError:
raise CallError(f"Invalid alert source: {klassname!r}")
if not issubclass(klass, OneShotAlertClass):
raise CallError(f"Alert class {klassname!r} is not a one-shot alert source")
related_alerts, unrelated_alerts = bisect(lambda a: (a.node, a.klass) == (self.node, klass),
self.alerts)
left_alerts = await klass(self.middleware).delete(related_alerts, query)
for deleted_alert in related_alerts:
if deleted_alert not in left_alerts:
self.alerts.remove(deleted_alert)
deleted = True
if deleted:
# We need to flush alerts to the database immediately after deleting oneshot alerts.
# Some oneshot alerts can only de deleted programmatically (i.e. cloud sync oneshot alerts are deleted
# when deleting cloud sync task). If we delete a cloud sync task and then reboot the system abruptly,
# the alerts won't be flushed to the database and on next boot an alert for nonexisting cloud sync task
# will appear, and it won't be deletable.
await self.middleware.call("alert.flush_alerts")
await self.middleware.call("alert.send_alerts")
@private
def alert_source_clear_run(self, name):
alert_source = ALERT_SOURCES.get(name)
if not alert_source:
raise CallError(f"Alert source {name!r} not found.", errno.ENOENT)
self.alert_source_last_run[alert_source.name] = datetime.min
@private
async def product_type(self):
return await self.middleware.call("system.product_type")
class AlertServiceModel(sa.Model):
__tablename__ = 'system_alertservice'
id = sa.Column(sa.Integer(), primary_key=True)
name = sa.Column(sa.String(120))
type = sa.Column(sa.String(20))
attributes = sa.Column(sa.JSON())
enabled = sa.Column(sa.Boolean())
level = sa.Column(sa.String(20))
class AlertServiceService(CRUDService):
class Config:
datastore = "system.alertservice"
datastore_extend = "alertservice._extend"
datastore_order_by = ["name"]
cli_namespace = "system.alert.service"
entry = AlertServiceEntry
@private
async def _extend(self, service):
try:
service["type__title"] = ALERT_SERVICES_FACTORIES[service["type"]].title
except KeyError:
service["type__title"] = "<Unknown>"
return service
@private
async def _compress(self, service):
service.pop("type__title")
return service
@private
async def _validate(self, service, schema_name):
verrors = ValidationErrors()
factory = ALERT_SERVICES_FACTORIES.get(service["type"])
if factory is None:
verrors.add(f"{schema_name}.type", "This field has invalid value")
raise verrors
levels = AlertLevel.__members__
if service["level"] not in levels:
verrors.add(f"{schema_name}.level", f"Level must be one of {list(levels)}")
raise verrors
verrors.add_child(f"{schema_name}.attributes",
validate_schema(list(factory.schema.attrs.values()), service["attributes"]))
verrors.check()
@api_method(AlertServiceCreateArgs, AlertServiceCreateResult)
async def do_create(self, data):
"""
Create an Alert Service of specified `type`.
If `enabled`, it sends alerts to the configured `type` of Alert Service.
.. examples(websocket)::
Create an Alert Service of Mail `type`
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "alertservice.create",
"params": [{
"name": "Test Email Alert",
"enabled": true,
"type": "Mail",
"attributes": {
"email": "dev@ixsystems.com"
},
"settings": {
"VolumeVersion": "HOURLY"
}
}]
}
"""
await self._validate(data, "alert_service_create")
data["id"] = await self.middleware.call("datastore.insert", self._config.datastore, data)
await self._extend(data)
return await self.get_instance(data["id"])
@api_method(AlertServiceUpdateArgs, AlertServiceUpdateResult)
async def do_update(self, id_, data):
"""
Update Alert Service of `id`.
"""
old = await self.middleware.call("datastore.query", self._config.datastore, [("id", "=", id_)],
{"extend": self._config.datastore_extend,
"get": True})
new = old.copy()
new.update(data)
await self._validate(new, "alert_service_update")
await self._compress(new)
await self.middleware.call("datastore.update", self._config.datastore, id_, new)
return await self.get_instance(id_)
@api_method(AlertServiceDeleteArgs, AlertServiceDeleteResult)
async def do_delete(self, id_):
"""
Delete Alert Service of `id`.
"""
return await self.middleware.call("datastore.delete", self._config.datastore, id_)
@api_method(AlertServiceTestArgs, AlertServiceTestResult)
async def test(self, data):
"""
Send a test alert using `type` of Alert Service.
.. examples(websocket)::
Send a test alert using Alert Service of Mail `type`.
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "alertservice.test",
"params": [{
"name": "Test Email Alert",
"enabled": true,
"type": "Mail",
"attributes": {
"email": "dev@ixsystems.com"
},
"settings": {}
}]
}
"""
await self._validate(data, "alert_service_test")
factory = ALERT_SERVICES_FACTORIES.get(data["type"])
if factory is None:
self.logger.error("Alert service %r does not exist", data["type"])
return False
try:
alert_service = factory(self.middleware, data["attributes"])
except Exception:
self.logger.error("Error creating alert service %r with parameters=%r",
data["type"], data["attributes"], exc_info=True)
return False
master_node = "A"
if await self.middleware.call("failover.licensed"):
master_node = await self.middleware.call("failover.node")
test_alert = Alert(
TestAlertClass,
node=master_node,
datetime=utc_now(),
last_occurrence=utc_now(),
_uuid=str(uuid.uuid4()),
)
try:
await alert_service.send([test_alert], [], [test_alert])
except Exception:
self.logger.error("Error in alert service %r", data["type"], exc_info=True)
return False
return True
class AlertClassesModel(sa.Model):
__tablename__ = 'system_alertclasses'
id = sa.Column(sa.Integer(), primary_key=True)
classes = sa.Column(sa.JSON())
class AlertClassesService(ConfigService):
class Config:
datastore = "system.alertclasses"
cli_namespace = "system.alert.class"
ENTRY = Dict(
"alertclasses_entry",
Int("id"),
Dict("classes", additional_attrs=True),
)
async def do_update(self, data):
"""
Update default Alert settings.
.. examples(rest)::
Set ClassName's level to LEVEL and policy to POLICY. Reset settings for other alert classes.
{
"classes": {
"ClassName": {
"level": "LEVEL",
"policy": "POLICY",
}
}
}
"""
old = await self.config()
new = old.copy()
new.update(data)
verrors = ValidationErrors()
for k, v in new["classes"].items():
if k not in AlertClass.class_by_name:
verrors.add(f"alert_class_update.classes.{k}", "This alert class does not exist")
verrors.add_child(
f"alert_class_update.classes.{k}",
validate_schema([
Str("level", enum=list(AlertLevel.__members__)),
Str("policy", enum=POLICIES),
Bool("proactive_support"),
], v),
)
if "proactive_support" in v and not AlertClass.class_by_name[k].proactive_support:
verrors.add(
f"alert_class_update.classes.{k}.proactive_support",
"Proactive support is not supported by this alert class",
)
verrors.check()
await self.middleware.call("datastore.update", self._config.datastore, old["id"], new)
return await self.config()
async def _event_system(middleware, event_type, args):
if SEND_ALERTS_ON_READY:
await middleware.call("alert.send_alerts")
async def setup(middleware):
middleware.event_register("alert.list", "Sent on alert changes.", roles=["ALERT_LIST_READ"])
await middleware.call("alert.load")
await middleware.call("alert.initialize")
middleware.event_subscribe("system.ready", _event_system)
| 46,090 | Python | .py | 996 | 33.319277 | 117 | 0.573844 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,515 | mail.py | truenas_middleware/src/middlewared/middlewared/plugins/mail.py | from middlewared.schema import accepts, Bool, Dict, Int, List, Password, Patch, Ref, returns, Str
from middlewared.service import CallError, ConfigService, ValidationErrors, job, periodic, private
import middlewared.sqlalchemy as sa
from middlewared.utils import ProductName, BRAND
from middlewared.utils.mako import get_template
from middlewared.validators import Email
from collections import deque
from datetime import datetime, timedelta
from email.header import Header
from email.message import Message
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.utils import formatdate, make_msgid
import html2text
from threading import Lock
import base64
import errno
import html
import json
import os
import smtplib
import syslog
class DenyNetworkActivity(Exception):
pass
class QueueItem(object):
def __init__(self, message):
self.attempts = 0
self.message = message
class MailQueue(object):
MAX_ATTEMPTS = 3
MAX_QUEUE_LIMIT = 20
def __init__(self):
self.queue = deque(maxlen=self.MAX_QUEUE_LIMIT)
self.lock = Lock()
def append(self, message):
self.queue.append(QueueItem(message))
def __enter__(self):
self.lock.acquire()
return self
def __exit__(self, typ, value, traceback):
self.lock.release()
if typ is not None:
raise
class MailModel(sa.Model):
__tablename__ = 'system_email'
id = sa.Column(sa.Integer(), primary_key=True)
em_fromemail = sa.Column(sa.String(120), default='')
em_outgoingserver = sa.Column(sa.String(120))
em_port = sa.Column(sa.Integer(), default=25)
em_security = sa.Column(sa.String(120), default="plain")
em_smtp = sa.Column(sa.Boolean())
em_user = sa.Column(sa.String(120), nullable=True)
em_pass = sa.Column(sa.EncryptedText(), nullable=True)
em_fromname = sa.Column(sa.String(120), default='')
em_oauth = sa.Column(sa.JSON(dict, encrypted=True), nullable=True)
class MailService(ConfigService):
mail_queue = MailQueue()
oauth_access_token = None
oauth_access_token_expires_at = None
class Config:
datastore = 'system.email'
datastore_prefix = 'em_'
datastore_extend = 'mail.mail_extend'
cli_namespace = 'system.mail'
ENTRY = Dict(
'mail_entry',
Str('fromemail', validators=[Email(empty=True)], required=True),
Str('fromname', required=True),
Str('outgoingserver', required=True),
Int('port', required=True),
Str('security', enum=['PLAIN', 'SSL', 'TLS'], required=True),
Bool('smtp', required=True),
Str('user', null=True, required=True),
Password('pass', null=True, required=True),
Dict(
'oauth',
Str('client_id'),
Str('client_secret'),
Password('refresh_token'),
null=True,
private=True,
required=True,
),
Int('id', required=True),
)
@private
async def mail_extend(self, cfg):
if cfg['security']:
cfg['security'] = cfg['security'].upper()
return cfg
@accepts(
Patch(
'mail_entry', 'mail_update',
('rm', {'name': 'id'}),
(
'replace', Dict(
'oauth',
Str('client_id', required=True),
Str('client_secret', required=True),
Password('refresh_token', required=True),
null=True,
private=True,
)
),
('attr', {'update': True}),
register=True
)
)
async def do_update(self, data):
"""
Update Mail Service Configuration.
`fromemail` is used as a sending address which the mail server will use for sending emails.
`outgoingserver` is the hostname or IP address of SMTP server used for sending an email.
`security` is type of encryption desired.
`smtp` is a boolean value which when set indicates that SMTP authentication has been enabled and `user`/`pass`
are required attributes now.
"""
config = await self.config()
new = config.copy()
new.update(data)
new['security'] = new['security'].lower() # Django Model compatibility
verrors = ValidationErrors()
if new['smtp'] and new['user'] == '':
verrors.add(
'mail_update.user',
'This field is required when SMTP authentication is enabled',
)
if new['oauth']:
if new['fromemail']:
verrors.add('mail_update.fromemail', 'This field cannot be used with GMail')
if new['fromname']:
verrors.add('mail_update.fromname', 'This field cannot be used with GMail')
else:
if not new['fromemail']:
verrors.add('mail_update.fromemail', 'This field is required')
self.__password_verify(new['pass'], 'mail_update.pass', verrors)
verrors.check()
await self.middleware.call('datastore.update', 'system.email', config['id'], new, {'prefix': 'em_'})
await self.middleware.call('mail.gmail_initialize')
return await self.config()
def __password_verify(self, password, schema, verrors=None):
if verrors is None:
verrors = ValidationErrors()
if not password:
return verrors
# FIXME: smtplib does not support non-ascii password yet
# https://github.com/python/cpython/pull/8938
try:
password.encode('ascii')
except UnicodeEncodeError:
verrors.add(
schema,
'Only plain text characters (7-bit ASCII) are allowed in passwords. '
'UTF or composed characters are not allowed.'
)
return verrors
@accepts(Dict(
'mail_message',
Str('subject', required=True),
Str('text', max_length=None),
Str('html', null=True, max_length=None),
List('to', items=[Str('email')]),
List('cc', items=[Str('email')]),
Int('interval', null=True),
Str('channel', null=True),
Int('timeout', default=300),
Bool('attachments', default=False),
Bool('queue', default=True),
Dict('extra_headers', additional_attrs=True),
register=True,
), Ref('mail_update'))
@returns(Bool('successfully_sent'))
@job(pipes=['input'], check_pipes=False)
def send(self, job, message, config):
"""
Sends mail using configured mail settings.
`text` will be formatted to HTML using Markdown and rendered using default E-Mail template.
You can put your own HTML using `html`. If `html` is null, no HTML MIME part will be added to E-Mail.
If `attachments` is true, a list compromised of the following dict is required
via HTTP upload:
- headers(list)
- name(str)
- value(str)
- params(dict)
- content (str)
[
{
"headers": [
{
"name": "Content-Transfer-Encoding",
"value": "base64"
},
{
"name": "Content-Type",
"value": "application/octet-stream",
"params": {
"name": "test.txt"
}
}
],
"content": "dGVzdAo="
}
]
"""
gc = self.middleware.call_sync('datastore.config', 'network.globalconfiguration')
hostname = f'{gc["gc_hostname"]}.{gc["gc_domain"]}'
message['subject'] = f'{ProductName.PRODUCT_NAME} {hostname}: {message["subject"]}'
add_html = True
if 'html' in message and message['html'] is None:
message.pop('html')
add_html = False
if 'text' not in message:
if 'html' not in message:
verrors = ValidationErrors()
verrors.add('mail_message.text', 'Text is required when HTML is not set')
verrors.check()
message['text'] = html2text.html2text(message['html'])
if add_html and 'html' not in message:
template = get_template('assets/templates/mail.html')
message['html'] = template.render(body=html.escape(message['text']).replace('\n', '<br>\n'))
return self.send_raw(job, message, config)
@accepts(Ref('mail_message'), Ref('mail_update'))
@job(pipes=['input'], check_pipes=False)
@private
def send_raw(self, job, message, config):
config = dict(self.middleware.call_sync('mail.config'), **config)
from_addr = self._from_addr(config)
interval = message.get('interval')
if interval is None:
interval = timedelta()
else:
interval = timedelta(seconds=interval)
if interval > timedelta():
channelfile = f'/tmp/.msg.{message.get("channel") or BRAND.lower()}'
last_update = datetime.now() - interval
try:
last_update = datetime.fromtimestamp(os.stat(channelfile).st_mtime)
except OSError:
pass
timediff = datetime.now() - last_update
if (timediff >= interval) or (timediff < timedelta()):
# Make sure mtime is modified
# We could use os.utime but this is simpler!
with open(channelfile, 'w') as f:
f.write('!')
else:
raise CallError('This message was already sent in the given interval')
verrors = self.__password_verify(config['pass'], 'mail_update.pass')
verrors.check()
to = message.get('to')
if not to:
to = self.middleware.call_sync('mail.local_administrators_emails')
if not to:
raise CallError('None of the local administrators has an e-mail address configured')
if message.get('attachments'):
job.check_pipe("input")
def read_json():
f = job.pipes.input.r
data = b''
i = 0
while True:
read = f.read(1048576) # 1MiB
if read == b'':
break
data += read
i += 1
if i > 50:
raise ValueError('Attachments bigger than 50MB not allowed yet')
if data == b'':
return None
return json.loads(data)
attachments = read_json()
else:
attachments = None
if 'html' in message or attachments:
msg = MIMEMultipart()
msg.preamble = 'This is a multi-part message in MIME format.'
if 'html' in message:
msg2 = MIMEMultipart('alternative')
msg2.attach(MIMEText(message['text'], 'plain', _charset='utf-8'))
msg2.attach(MIMEText(message['html'], 'html', _charset='utf-8'))
msg.attach(msg2)
if attachments:
for attachment in attachments:
m = Message()
m.set_payload(attachment['content'])
for header in attachment.get('headers'):
m.add_header(header['name'], header['value'], **(header.get('params') or {}))
msg.attach(m)
else:
msg = MIMEText(message['text'], _charset='utf-8')
msg['Subject'] = message['subject']
msg['From'] = from_addr
msg['To'] = ', '.join(to)
if message.get('cc'):
msg['Cc'] = ', '.join(message.get('cc'))
msg['Date'] = formatdate()
local_hostname = self.middleware.call_sync('system.hostname')
msg['Message-ID'] = make_msgid(base64.urlsafe_b64encode(os.urandom(3)).decode("ascii"))
extra_headers = message.get('extra_headers') or {}
for key, val in list(extra_headers.items()):
# We already have "Content-Type: multipart/mixed" and setting "Content-Type: text/plain" like some scripts
# do will break python e-mail module.
if key.lower() == "content-type":
continue
if key in msg:
msg.replace_header(key, val)
else:
msg[key] = val
syslog.openlog(logoption=syslog.LOG_PID, facility=syslog.LOG_MAIL)
try:
if config['oauth']:
self.middleware.call_sync('mail.gmail_send', msg, config)
else:
server = self._get_smtp_server(config, message['timeout'], local_hostname=local_hostname)
# NOTE: Don't do this.
#
# If smtplib.SMTP* tells you to run connect() first, it's because the
# mailserver it tried connecting to via the outgoing server argument
# was unreachable and it tried to connect to 'localhost' and barfed.
# This is because FreeNAS doesn't run a full MTA.
# else:
# server.connect()
headers = '\n'.join([f'{k}: {v}' for k, v in msg._headers])
syslog.syslog(f"sending mail to {', '.join(to)}\n{headers}")
server.sendmail(from_addr.encode(), to, msg.as_string())
server.quit()
except DenyNetworkActivity:
self.logger.warning('Sending email denied')
return False
except Exception as e:
# Don't spam syslog with these messages. They should only end up in the
# test-email pane.
# We are only interested in ValueError, not subclasses.
if e.__class__ is ValueError:
raise CallError(str(e))
syslog.syslog(f'Failed to send email to {", ".join(to)}: {str(e)}')
if isinstance(e, smtplib.SMTPAuthenticationError):
raise CallError(
f'Authentication error ({e.smtp_code}): {e.smtp_error}', errno.EPERM
)
self.logger.warning('Failed to send email', exc_info=True)
if message['queue']:
with self.mail_queue as mq:
mq.append(msg)
raise CallError(f'Failed to send email: {e}')
return True
def _get_smtp_server(self, config, timeout=300, local_hostname=None):
try:
self.middleware.call_sync('network.general.will_perform_activity', 'mail')
except CallError:
raise DenyNetworkActivity()
if local_hostname is None:
local_hostname = self.middleware.call_sync('system.hostname')
if not config['outgoingserver'] or not config['port']:
# See NOTE below.
raise ValueError('you must provide an outgoing mailserver and mail'
' server port when sending mail')
if config['security'] == 'SSL':
server = smtplib.SMTP_SSL(
config['outgoingserver'],
config['port'],
timeout=timeout,
local_hostname=local_hostname)
else:
server = smtplib.SMTP(
config['outgoingserver'],
config['port'],
timeout=timeout,
local_hostname=local_hostname)
if config['security'] == 'TLS':
server.starttls()
if config['smtp']:
server.login(config['user'], config['pass'])
return server
@periodic(600, run_on_start=False)
@private
def send_mail_queue(self):
with self.mail_queue as mq:
for queue in list(mq.queue):
try:
config = self.middleware.call_sync('mail.config')
if config['oauth']:
self.middleware.call_sync('mail.gmail_send', queue.message, config)
else:
server = self._get_smtp_server(config)
# Update `From` address from currently used config because if the SMTP user changes,
# already queued messages might not be sent due to (553, b'Relaying disallowed as xxx') error
queue.message['From'] = self._from_addr(config)
server.sendmail(queue.message['From'].encode(),
queue.message['To'].split(', '),
queue.message.as_string())
server.quit()
except DenyNetworkActivity:
# no reason to queue up email since network activity was
# explicitly denied by end-user
mq.queue.remove(queue)
except Exception:
self.logger.debug('Sending message from queue failed', exc_info=True)
queue.attempts += 1
if queue.attempts >= mq.MAX_ATTEMPTS:
mq.queue.remove(queue)
else:
mq.queue.remove(queue)
def _from_addr(self, config):
if config['fromname']:
from_addr = Header(config['fromname'], 'utf-8')
try:
config['fromemail'].encode('ascii')
except UnicodeEncodeError:
from_addr.append(f'<{config["fromemail"]}>', 'utf-8')
else:
from_addr.append(f'<{config["fromemail"]}>', 'ascii')
else:
try:
config['fromemail'].encode('ascii')
except UnicodeEncodeError:
from_addr = Header(config['fromemail'], 'utf-8')
else:
from_addr = Header(config['fromemail'], 'ascii')
return from_addr
@private
async def local_administrators_emails(self):
return list(set(user["email"] for user in await self.middleware.call("user.query", [
["roles", "rin", "FULL_ADMIN"],
["local", "=", True],
["email", "!=", None]
])))
@private
async def local_administrator_email(self):
emails = await self.local_administrators_emails()
if emails:
return sorted(emails)[0]
else:
return None
async def setup(middleware):
await middleware.call('network.general.register_activity', 'mail', 'Mail')
| 18,573 | Python | .py | 437 | 30.620137 | 118 | 0.558047 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,516 | builtin_administrator.py | truenas_middleware/src/middlewared/middlewared/plugins/account_/builtin_administrator.py | from middlewared.api import api_method
from middlewared.api.current import GroupHasPasswordEnabledUserArgs, GroupHasPasswordEnabledUserResult
from middlewared.plugins.account import unixhash_is_valid
from middlewared.service import filter_list, Service, private
class GroupService(Service):
@api_method(GroupHasPasswordEnabledUserArgs, GroupHasPasswordEnabledUserResult)
async def has_password_enabled_user(self, gids, exclude_user_ids):
"""
Checks whether at least one local user with a password is a member of any of the `group_ids`.
"""
return len(await self.get_password_enabled_users(gids, exclude_user_ids)) > 0
@private
async def get_password_enabled_users(self, gids, exclude_user_ids, groups=None):
if groups is None:
groups = await self.middleware.call("group.query")
result = []
result_user_ids = set()
groups = filter_list(groups, [["gid", "in", gids]])
for membership in await self.middleware.call(
"datastore.query",
"account.bsdgroupmembership",
[
["group", "in", [g["id"] for g in groups]],
["user", "nin", set(exclude_user_ids)],
],
{"prefix": "bsdgrpmember_"}
):
if membership["user"]["id"] in result_user_ids:
continue
if membership["user"]["bsdusr_locked"]:
continue
if membership["user"]["bsdusr_password_disabled"]:
continue
if not unixhash_is_valid(membership["user"]["bsdusr_unixhash"]):
continue
result.append({k.removeprefix("bsdusr_"): v for k, v in membership["user"].items()})
result_user_ids.add(membership["user"]["id"])
return result
| 1,821 | Python | .py | 38 | 37.578947 | 102 | 0.625847 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,517 | sync_builtin.py | truenas_middleware/src/middlewared/middlewared/plugins/account_/sync_builtin.py | from collections import defaultdict
import itertools
from middlewared.service import private, Service
def read_file(path):
with open(path) as f:
return list(filter(None, f.read().strip().split("\n")))
class UserService(Service):
@private
def sync_builtin(self):
smb_builtins = [
'builtin_administrators',
'builtin_users',
'builtin_guests',
]
remove_groups = {
group["group"]: group
for group in self.middleware.call_sync(
"datastore.query",
"account.bsdgroups",
[("builtin", "=", True)],
{"prefix": "bsdgrp_"},
)
}
remove_users = {
user["username"]: user
for user in self.middleware.call_sync(
"datastore.query",
"account.bsdusers",
[("builtin", "=", True)],
{"prefix": "bsdusr_"},
)
}
non_builtin_groups = {
group["group"]: group
for group in self.middleware.call_sync(
"datastore.query",
"account.bsdgroups",
[("builtin", "=", False)],
{"prefix": "bsdgrp_"},
)
}
non_builtin_users = {
user["username"]: user
for user in self.middleware.call_sync(
"datastore.query",
"account.bsdusers",
[("builtin", "=", False)],
{"prefix": "bsdusr_"},
)
}
group_file = read_file("/conf/base/etc/group")
passwd_file = read_file("/conf/base/etc/passwd")
# Insert new groups or update GID for existing groups
groups_members = defaultdict(set)
for name, _, gid, members in map(lambda s: s.split(":", 3), group_file):
gid = int(gid)
if name in non_builtin_groups:
for i in itertools.count(1):
new_name = f"{name}_{i}"
if new_name not in non_builtin_groups:
break
self.logger.info(
"Renaming non-builtin group %r to %r as builtin group with that name should exist",
name, new_name,
)
self.middleware.call_sync(
"datastore.update",
"account.bsdgroups",
non_builtin_groups[name]["id"],
{
"group": new_name,
},
{"prefix": "bsdgrp_"},
)
existing_group = remove_groups.pop(name, None)
if existing_group is not None:
if existing_group["gid"] != gid:
self.logger.info("Changing group %r GID from %r to %r", existing_group["group"],
existing_group["gid"], gid)
self.middleware.call_sync(
"datastore.update",
"account.bsdgroups",
existing_group["id"],
{
"gid": gid,
},
{"prefix": "bsdgrp_"},
)
else:
self.logger.info("Creating new group %r", name)
existing_group = {
"gid": gid,
"group": name,
"builtin": True,
"smb": True if name in smb_builtins else False,
"sudo_commands": [],
"sudo_commands_nopasswd": [],
}
existing_group["id"] = self.middleware.call_sync(
"datastore.insert",
"account.bsdgroups",
existing_group,
{"prefix": "bsdgrp_"},
)
for username in list(filter(None, members.split(","))):
groups_members[username].add(existing_group["id"])
# Remove gone groups
remove_groups = list(remove_groups.values())
if remove_groups:
self.logger.info("Removing groups %r", [group["group"] for group in remove_groups])
remove_group_ids = [group["id"] for group in remove_groups]
nogroup_id = self.middleware.call_sync(
"datastore.query",
"account.bsdgroups",
[("group", "=", "nogroup")],
{
"get": True,
"prefix": "bsdgrp_",
},
)["id"]
for user in self.middleware.call_sync(
"datastore.query",
"account.bsdusers",
[
("group_id", "in", remove_group_ids),
],
{"prefix": "bsdusr_"},
):
self.middleware.call_sync(
"datastore.update",
"account.bsdusers",
user["id"],
{
"group_id": nogroup_id,
},
{"prefix": "bsdusr_"},
)
self.middleware.call_sync(
"datastore.delete",
"account.bsdgroups",
[("id", "in", remove_group_ids)],
)
# Insert new users or update GID for existing groups
for name, _, uid, gid, gecos, home, shell in map(lambda s: s.split(":", 6), passwd_file):
uid = int(uid)
gid = int(gid)
if name in non_builtin_users:
for i in itertools.count(1):
new_name = f"{name}_{i}"
if new_name not in non_builtin_users:
break
self.logger.info(
"Renaming non-builtin user %r to %r as builtin user with that name should exist",
name, new_name,
)
self.middleware.call_sync(
"datastore.update",
"account.bsdusers",
non_builtin_users[name]["id"],
{
"username": new_name,
},
{"prefix": "bsdusr_"},
)
group = self.middleware.call_sync(
"datastore.query",
"account.bsdgroups",
[("gid", "=", gid)],
{
"get": True,
"prefix": "bsdgrp_",
},
)
existing_user = remove_users.pop(name, None)
if existing_user is not None:
# Reload updated GID
existing_user = self.middleware.call_sync(
"datastore.query",
"account.bsdusers",
[
("id", "=", existing_user["id"]),
],
{
"get": True,
"prefix": "bsdusr_",
},
)
update = {}
if existing_user["uid"] != uid:
self.logger.info("Changing user %r UID from %r to %r", existing_user["username"],
existing_user["uid"], uid)
update["uid"] = uid
if existing_user["group"]["bsdgrp_gid"] != gid:
self.logger.info("Changing user %r group from %r to %r", existing_user["username"],
existing_user["group"]["bsdgrp_group"], group["group"])
update["group"] = group["id"]
if existing_user["home"] != home:
update["home"] = home
if update:
self.middleware.call_sync(
"datastore.update",
"account.bsdusers",
existing_user["id"],
update,
{"prefix": "bsdusr_"},
)
else:
self.logger.info("Creating new user %r", name)
existing_user = {
"uid": uid,
"username": name,
"home": home,
"shell": shell,
"full_name": gecos.split(",")[0],
"builtin": True,
"group": group["id"],
"smb": False,
"sudo_commands": [],
"sudo_commands_nopasswd": [],
}
existing_user["id"] = self.middleware.call_sync(
"datastore.insert",
"account.bsdusers",
existing_user,
{"prefix": "bsdusr_"},
)
self.middleware.call_sync(
"datastore.insert", "account.twofactor_user_auth", {
'secret': None,
'user': existing_user["id"],
}
)
for group_id in groups_members[name]:
if not self.middleware.call_sync(
"datastore.query",
"account.bsdgroupmembership",
[
("group", "=", group_id),
("user", "=", existing_user["id"]),
],
{"prefix": "bsdgrpmember_"},
):
self.logger.info("Adding user %r to group %r", name, group_id)
self.middleware.call_sync(
"datastore.insert",
"account.bsdgroupmembership",
{
"group": group_id,
"user": existing_user["id"]
},
{"prefix": "bsdgrpmember_"},
)
# Remove gone users
remove_users = list(remove_users.values())
if remove_users:
self.logger.info("Removing users %r", [user["username"] for user in remove_users])
remove_user_ids = [user["id"] for user in remove_users]
self.middleware.call_sync(
"datastore.delete",
"account.bsdusers",
[("id", "in", remove_user_ids)],
)
| 10,524 | Python | .py | 262 | 22.381679 | 103 | 0.407549 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,518 | privilege_roles.py | truenas_middleware/src/middlewared/middlewared/plugins/account_/privilege_roles.py | from middlewared.role import ROLES
from middlewared.service import Service, filterable, filterable_returns, filter_list, no_authz_required
from middlewared.schema import Bool, Dict, List, Str
class PrivilegeService(Service):
class Config:
namespace = "privilege"
cli_namespace = "auth.privilege"
@no_authz_required
@filterable
@filterable_returns(Dict(
"role",
Str("name"),
Str("title"),
List("includes", items=[Str("name")]),
Bool("builtin")
))
async def roles(self, filters, options):
"""
Get all available roles.
Each entry contains the following keys:
`name` - the internal name of the role
`includes` - list of other roles that this role includes. When user is
granted this role, they will also receive permissions granted by all
the included roles.
`builtin` - role exists for internal backend purposes for access
control.
"""
roles = [
{
"name": name,
"title": name,
"includes": role.includes,
"builtin": role.builtin,
}
for name, role in ROLES.items()
]
return filter_list(roles, filters, options)
| 1,299 | Python | .py | 37 | 26.189189 | 103 | 0.60415 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,519 | constants.py | truenas_middleware/src/middlewared/middlewared/plugins/account_/constants.py | ADMIN_UID = 950
ADMIN_GID = 950
SKEL_PATH = '/etc/skel/' # TODO evaluate whether this is still needed
# TrueNAS historically used /nonexistent as the default home directory for new
# users. The nonexistent directory has caused problems when
# 1) an admin chooses to create it from shell
# 2) PAM checks for home directory existence
# And so this default has been deprecated in favor of using /var/empty
# which is an empty and immutable directory.
LEGACY_DEFAULT_HOME_PATH = '/nonexistent'
DEFAULT_HOME_PATH = '/var/empty'
DEFAULT_HOME_PATHS = (DEFAULT_HOME_PATH, LEGACY_DEFAULT_HOME_PATH)
MIDDLEWARE_PAM_SERVICE = '/etc/pam.d/middleware'
MIDDLEWARE_PAM_API_KEY_SERVICE = '/etc/pam.d/middleware-api-key'
| 706 | Python | .py | 14 | 49.357143 | 78 | 0.780029 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,520 | 2fa.py | truenas_middleware/src/middlewared/middlewared/plugins/account_/2fa.py | import errno
import pyotp
from middlewared.api import api_method
from middlewared.api.current import *
from middlewared.service import CallError, no_authz_required, pass_app, private, Service
from middlewared.utils import ProductName
from middlewared.utils.privilege import app_credential_full_admin_or_user
class UserService(Service):
@private
async def provisioning_uri_internal(self, username, user_twofactor_config):
return pyotp.totp.TOTP(
user_twofactor_config['secret'], interval=user_twofactor_config['interval'],
digits=user_twofactor_config['otp_digits'],
).provisioning_uri(
f'{username}-{await self.middleware.call("system.hostname")}'
f'@{ProductName.PRODUCT_NAME}',
'iXsystems'
)
@api_method(UserProvisioningUriArgs, UserProvisioningUriResult)
async def provisioning_uri(self, username):
"""
Returns the provisioning URI for the OTP for `username`. This can then be encoded in a QR code and used
to provision an OTP app like Google Authenticator.
"""
user = await self.translate_username(username)
user_twofactor_config = await self.middleware.call(
'auth.twofactor.get_user_config', user['id' if user['local'] else 'sid'], user['local'],
)
if not user_twofactor_config['secret']:
raise CallError(f'{user["username"]!r} user does not have two factor authentication configured')
return await self.provisioning_uri_internal(username, user_twofactor_config)
@api_method(UserTwofactorConfigArgs, UserTwofactorConfigResult)
async def twofactor_config(self, username):
"""
Returns two-factor authentication configuration settings for specified `username`.
"""
user = await self.translate_username(username)
user_twofactor_config = await self.middleware.call(
'auth.twofactor.get_user_config', user['id' if user['local'] else 'sid'], user['local'],
)
if user_twofactor_config['secret']:
provisioning_uri = await self.provisioning_uri_internal(username, user_twofactor_config)
else:
provisioning_uri = None
return {
'provisioning_uri': provisioning_uri,
'secret_configured': bool(user_twofactor_config['secret']),
'interval': user_twofactor_config['interval'],
'otp_digits': user_twofactor_config['otp_digits'],
}
@api_method(UserVerifyTwofactorTokenArgs, UserVerifyTwofactorTokenResult)
def verify_twofactor_token(self, username, token):
"""
Returns boolean true if provided `token` is successfully authenticated for `username`.
"""
twofactor_config = self.middleware.call_sync('auth.twofactor.config')
if not twofactor_config['enabled']:
raise CallError('Please enable Two Factor Authentication first')
user = self.middleware.call_sync('user.translate_username', username)
if not user['twofactor_auth_configured']:
raise CallError('Two Factor Authentication is not configured for this user')
user_twofactor_config = self.middleware.call_sync(
'auth.twofactor.get_user_config', user['id' if user['local'] else 'sid'], user['local'],
)
totp = pyotp.totp.TOTP(
user_twofactor_config['secret'], interval=user_twofactor_config['interval'],
digits=user_twofactor_config['otp_digits'],
)
return totp.verify(token or '', valid_window=twofactor_config['window'])
@private
async def translate_username(self, username):
"""
Translates `username` to a user object.
"""
try:
user = await self.middleware.call('user.get_user_obj', {'username': username})
except KeyError:
raise CallError(f'User {username!r} does not exist', errno.ENOENT)
return await self.middleware.call('user.query', [['username', '=', user['pw_name']]], {'get': True})
@api_method(UserUnset2faSecretArgs, UserUnset2faSecretResult,
audit='Unset two-factor authentication secret:', audit_extended=lambda username: username)
async def unset_2fa_secret(self, username):
"""
Unset two-factor authentication secret for `username`.
"""
user = await self.translate_username(username)
twofactor_auth = await self.middleware.call(
'auth.twofactor.get_user_config', user['id' if user['local'] else 'sid'], user['local']
)
if not twofactor_auth['exists']:
# This will only happen for AD users and we don't have a db record for them until they configure 2fa
# in this case we don't do anything and the secret is already unset
return
twofactor_config = await self.middleware.call('auth.twofactor.config')
if twofactor_config['enabled']:
# TODO: Let's try to stream line exception behaviour where we change this to either validation error
# when this starts being used in a form or UI changes how they handle call errors
raise CallError('Please disable Two Factor Authentication first')
await self.middleware.call(
'datastore.update',
'account.twofactor_user_auth',
twofactor_auth['id'], {
'secret': None,
}
)
@no_authz_required
@api_method(
UserRenew2faSecretArgs,
UserRenew2faSecretResult,
audit='Renew two-factor authentication secret:',
audit_extended=lambda username, options: username
)
@pass_app()
async def renew_2fa_secret(self, app, username, twofactor_options):
"""
Renew `username` user's two-factor authentication secret.
NOTE: This username must match the authenticated username unless authenticated
credentials have FULL_ADMIN role.
"""
if not app_credential_full_admin_or_user(app, username):
raise CallError(
f'{username}: currently authenticated credential may not renew two-factor '
'authentication for this user.',
errno.EPERM
)
user = await self.translate_username(username)
twofactor_auth = await self.middleware.call(
'auth.twofactor.get_user_config', user['id' if user['local'] else 'sid'], user['local']
)
# Add some sanity checks here
# The sanity check is only for local users because they should always have a db record in our 2fa
# table. For AD users, we don't have a db record for them until they configure 2fa explicitly.
if user['local'] and not twofactor_auth['exists']:
raise CallError(f'Unable to locate two factor authentication configuration for {username!r} user')
secret = await self.middleware.call('auth.twofactor.generate_base32_secret')
if twofactor_auth['exists']:
await self.middleware.call(
'datastore.update',
'account.twofactor_user_auth',
twofactor_auth['id'], {
'secret': secret,
**twofactor_options,
}
)
else:
await self.middleware.call(
'datastore.insert', 'account.twofactor_user_auth', {
'secret': secret,
'user': None,
'user_sid': user['sid'],
**twofactor_options,
}
)
if (await self.middleware.call('auth.twofactor.config'))['services']['ssh']:
# This needs to be reloaded so that user's new secret can be reflected in sshd configuration
await self.middleware.call('service.reload', 'ssh')
return await self.translate_username(username)
| 7,920 | Python | .py | 157 | 40.050955 | 112 | 0.641602 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,521 | internal_cache.py | truenas_middleware/src/middlewared/middlewared/plugins/account_/internal_cache.py | import errno
from middlewared.service import CallError, private, Service
class UserService(Service):
SYS_USERS = {}
@private
async def get_builtin_user_id(self, username):
if not self.SYS_USERS:
UserService.SYS_USERS = {
u['username']: u['uid'] for u in await self.middleware.call(
'user.query', [['builtin', '=', True]], {'force_sql_filters': True}
)
}
try:
return self.SYS_USERS[username]
except KeyError:
raise CallError(f'{username!r} user not found', errno.ENOENT)
class GroupService(Service):
SYS_GROUPS = {}
@private
async def get_builtin_group_id(self, group_name):
if not self.SYS_GROUPS:
GroupService.SYS_GROUPS = {
g['group']: g['gid'] for g in await self.middleware.call(
'group.query', [['builtin', '=', True]], {'force_sql_filters': True}
)
}
try:
return self.SYS_GROUPS[group_name]
except KeyError:
raise CallError(f'{group_name!r} group not found', errno.ENOENT)
| 1,160 | Python | .py | 30 | 28.3 | 88 | 0.564674 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,522 | privilege.py | truenas_middleware/src/middlewared/middlewared/plugins/account_/privilege.py | import enum
import errno
import wbclient
from middlewared.api import api_method
from middlewared.api.current import *
from middlewared.plugins.account import unixhash_is_valid
from middlewared.service import CallError, CRUDService, filter_list, private, ValidationErrors
from middlewared.service_exception import MatchNotFound
from middlewared.utils.privilege import (
LocalAdminGroups,
privilege_has_webui_access,
privileges_group_mapping
)
import middlewared.sqlalchemy as sa
class BuiltinPrivileges(enum.Enum):
LOCAL_ADMINISTRATOR = "LOCAL_ADMINISTRATOR"
READONLY_ADMINISTRATOR = "READONLY_ADMINISTRATOR"
SHARING_ADMINISTRATOR = "SHARING_ADMINISTRATOR"
class PrivilegeModel(sa.Model):
__tablename__ = "account_privilege"
id = sa.Column(sa.Integer(), primary_key=True)
builtin_name = sa.Column(sa.String(200), nullable=True)
name = sa.Column(sa.String(200))
local_groups = sa.Column(sa.JSON(list))
ds_groups = sa.Column(sa.JSON(list))
allowlist = sa.Column(sa.JSON(list))
roles = sa.Column(sa.JSON(list))
web_shell = sa.Column(sa.Boolean())
class PrivilegeService(CRUDService):
keys = {}
class Config:
namespace = "privilege"
datastore = "account.privilege"
datastore_extend = "privilege.item_extend"
datastore_extend_context = "privilege.item_extend_context"
cli_namespace = "auth.privilege"
entry = PrivilegeEntry
@private
async def item_extend_context(self, rows, extra):
return {
"groups": await self._groups(),
}
@private
async def item_extend(self, item, context):
item["local_groups"] = self._local_groups(context["groups"], item["local_groups"])
item["ds_groups"] = await self._ds_groups(context["groups"], item["ds_groups"])
return item
@api_method(PrivilegeCreateArgs, PrivilegeCreateResult,
audit="Create privilege", audit_extended=lambda data: data["name"])
async def do_create(self, data):
"""
Creates a privilege.
`name` is a name for privilege (must be unique).
`local_groups` is a list of local user account group GIDs that gain this privilege.
`ds_groups` is list of Directory Service group GIDs that will gain this privilege.
`allowlist` is a list of API endpoints allowed for this privilege.
`web_shell` controls whether users with this privilege are allowed to log in to the Web UI.
"""
await self._validate("privilege_create", data)
id_ = await self.middleware.call(
"datastore.insert",
self._config.datastore,
data
)
return await self.get_instance(id_)
@api_method(PrivilegeUpdateArgs, PrivilegeUpdateResult, audit="Update privilege", audit_callback=True)
async def do_update(self, audit_callback, id_, data):
"""
Update the privilege `id`.
"""
old = await self.get_instance(id_)
audit_callback(old["name"])
new = old.copy()
new["local_groups"] = [g["gid"] for g in new["local_groups"]]
# Preference is for SID values rather than GIDS because they are universally unique
new["ds_groups"] = []
for g in old["ds_groups"]:
new["ds_groups"].append(g["gid"] if not g["sid"] else g["sid"])
new.update(data)
verrors = ValidationErrors()
if new["builtin_name"]:
for k in ["name", "allowlist", "roles"]:
if new[k] != old[k]:
verrors.add(f"privilege_update.{k}", "This field is read-only for built-in privileges")
builtin_privilege = BuiltinPrivileges(new["builtin_name"])
if builtin_privilege == BuiltinPrivileges.LOCAL_ADMINISTRATOR:
if LocalAdminGroups.BUILTIN_ADMINISTRATORS not in new["local_groups"]:
verrors.add(
"privilege_update.local_groups",
f"The group {LocalAdminGroups.BUILTIN_ADMINISTRATORS.name.lower()} must be "
"among grantees of the \"Local Administrator\" privilege."
)
if not await self.middleware.call("group.has_password_enabled_user", new["local_groups"]):
verrors.add(
"privilege_update.local_groups",
"None of the members of these groups has password login enabled. At least one grantee of "
"the \"Local Administrator\" privilege must have password login enabled."
)
elif builtin_privilege == BuiltinPrivileges.READONLY_ADMINISTRATOR:
if new["web_shell"]:
verrors.add(
"privilege_update.web_shell",
"Web shell access may not be enabled for the built-in group for "
"read-only administrators."
)
verrors.check()
new.update(data)
await self._validate("privilege_update", new, id_)
await self.middleware.call(
"datastore.update",
self._config.datastore,
id_,
new,
)
return await self.get_instance(id_)
@api_method(PrivilegeDeleteArgs, PrivilegeDeleteResult, audit="Delete privilege", audit_callback=True)
async def do_delete(self, audit_callback, id_):
"""
Delete the privilege `id`.
"""
privilege = await self.get_instance(id_)
audit_callback(privilege["name"])
if privilege["builtin_name"]:
raise CallError("Unable to delete built-in privilege", errno.EPERM)
response = await self.middleware.call(
"datastore.delete",
self._config.datastore,
id_
)
return response
async def _validate(self, schema_name, data, id_=None):
verrors = ValidationErrors()
await self._ensure_unique(verrors, schema_name, "name", data["name"], id_)
groups = await self._groups()
for i, local_group_id in enumerate(data["local_groups"]):
if not self._local_groups(groups, [local_group_id], include_nonexistent=False):
verrors.add(
f"{schema_name}.local_groups.{i}",
f"{local_group_id}: local group does not exist. "
"This error may be addressed by either re-creating the missing group "
"with the specified group id or removing this entry from the privilege."
)
for i, ds_group_id in enumerate(data["ds_groups"]):
if not await self._ds_groups(groups, [ds_group_id], include_nonexistent=False):
verrors.add(
f"{schema_name}.ds_groups.{i}",
f"{ds_group_id}: directory Service group does not exist. "
"If the directory service state is healthy, then this error may be "
"addressed by removing this entry from the privilege."
)
for i, role in enumerate(data["roles"]):
if role not in self.middleware.role_manager.roles:
verrors.add(f"{schema_name}.roles.{i}", "Invalid role")
verrors.check()
async def _groups(self):
groups = await self.middleware.call("group.query")
by_gid = {group["gid"]: group for group in groups}
by_sid = {
group["sid"]: group
for group in filter_list(
groups, [["sid", "!=", None], ["local", "=", False]],
)
}
return {'by_gid': by_gid, 'by_sid': by_sid}
def _local_groups(self, groups, local_groups, *, include_nonexistent=True):
result = []
for gid in local_groups:
if group := groups['by_gid'].get(gid):
if group["local"]:
result.append(group)
else:
if include_nonexistent:
result.append({
"gid": gid,
"group": None,
})
return result
async def _ds_groups(self, groups, ds_groups, *, include_nonexistent=True):
"""
Directory services group privileges may assigned by either GID or SID.
preference is for latter if it is available. The primary case where it
will not be available is if this is not active directory.
"""
result = []
if (sids_to_check := [entry for entry in ds_groups if wbclient.sid_is_valid(str(entry))]):
try:
mapped_sids = (await self.middleware.call('idmap.convert_sids', sids_to_check))['mapped']
except Exception:
self.logger.warning('Failed to generate privileges for domain groups', exc_info=True)
return result
else:
mapped_sids = {}
for xid in ds_groups:
if isinstance(xid, int):
if (group := groups['by_gid'].get(xid)) is None:
gid = xid
else:
if (group := groups['by_sid'].get(xid)) is None:
unixid = mapped_sids.get(xid)
if unixid is None or unixid['id_type'] == 'USER':
gid = -1
else:
gid = unixid['id']
if group is None:
try:
group = await self.middleware.call(
"group.query",
[["gid", "=", gid]],
{"get": True},
)
except MatchNotFound:
if include_nonexistent:
result.append({
"gid": gid,
"sid": None,
"group": None,
})
continue
if group["local"]:
continue
result.append(group)
return result
@private
async def before_user_password_disable(self, user):
return await self.before_user_deactivation(
user,
'After disabling password for this user no password-enabled local user',
)
@private
async def before_user_delete(self, user):
return await self.before_user_deactivation(user, 'After deleting this user no local user')
@private
async def before_user_deactivation(self, user, error_text):
for privilege in await self.middleware.call(
'datastore.query',
'account.privilege',
[['builtin_name', '=', 'LOCAL_ADMINISTRATOR']],
):
if not await self.middleware.call('group.has_password_enabled_user', privilege['local_groups'],
[user['id']]):
raise CallError(
f'{error_text} will have built-in privilege {privilege["name"]!r}.',
errno.EACCES,
)
@private
async def before_group_delete(self, group):
for privilege in await self.middleware.call('datastore.query', 'account.privilege'):
if group['gid'] in privilege['local_groups']:
raise CallError(
f'This group is used by privilege {privilege["name"]!r}. Please remove it from that privilege '
'first, then delete the group.',
errno.EACCES,
)
@private
async def used_local_gids(self):
gids = {}
for privilege in await self.middleware.call('datastore.query', 'account.privilege', [], {'order_by': ['id']}):
for gid in privilege['local_groups']:
gids.setdefault(gid, privilege)
return gids
@private
async def privileges_for_groups(self, groups_key, group_ids):
"""
group_ids here are based on NSS group_list output.
Directory services groups may have privileges assigned by SID, which
are set on the domain controller rather than locally on TrueNAS.
This means we expand the set of group_ids to include SID mappings for
permissions evaluation.
If for some reason libwbclient raises an exception during the attempt
to convert unix gids to SIDs, then the domain is probably unhealthy and
permissions failure is acceptable. We do not need to log here as there will
be other failures / alerts and we don't want to spam logs unnecessarily.
"""
if groups_key == 'ds_groups':
try:
sids = await self.middleware.call(
'idmap.convert_unixids',
[{'id_type': 'GROUP', 'id': x} for x in group_ids]
)
except Exception:
group_ids = set(group_ids)
else:
group_ids = set(group_ids) | set([s['sid'] for s in sids['mapped'].values()])
else:
group_ids = set(group_ids)
privileges = await self.middleware.call('datastore.query', 'account.privilege')
return privileges_group_mapping(privileges, group_ids, groups_key)['privileges']
@private
async def compose_privilege(self, privileges):
compose = {
'roles': set(),
'allowlist': [],
'web_shell': False,
'webui_access': False,
}
for privilege in privileges:
for role in privilege['roles']:
compose['roles'] |= self.middleware.role_manager.roles_for_role(role)
compose['allowlist'].extend(self.middleware.role_manager.allowlist_for_role(role))
for item in privilege['allowlist']:
if item == {'method': '*', 'resource': '*'} and 'FULL_ADMIN' not in compose['roles']:
compose['roles'] |= self.middleware.role_manager.roles_for_role('FULL_ADMIN')
compose['webui_access'] = True
compose['allowlist'].append(item)
compose['web_shell'] |= privilege['web_shell']
compose['webui_access'] |= privilege_has_webui_access(privilege)
return compose
@private
async def full_privilege(self):
return {
'roles': {'FULL_ADMIN'},
'allowlist': [{'method': '*', 'resource': '*'}],
'web_shell': True,
'webui_access': True,
}
previous_always_has_root_password_enabled_value = None
@private
async def always_has_root_password_enabled(self, users=None, groups=None):
if users is None:
users = await self.middleware.call('user.query', [['local', '=', True]])
if groups is None:
groups = await self.middleware.call('group.query', [['local', '=', True]])
root_user = filter_list(
users,
[['username', '=', 'root']],
{'get': True},
)
users = await self.local_administrators([root_user['id']], users, groups)
if not users:
value = True
else:
value = False
if self.previous_always_has_root_password_enabled_value:
usernames = [user['username'] for user in users]
self.middleware.send_event(
'user.web_ui_login_disabled', 'ADDED', id=None, fields={'usernames': usernames},
)
self.previous_always_has_root_password_enabled_value = value
return value
@private
async def local_administrators(self, exclude_user_ids=None, users=None, groups=None):
exclude_user_ids = exclude_user_ids or []
if users is None:
users = await self.middleware.call('user.query', [['local', '=', True]])
if groups is None:
groups = await self.middleware.call('group.query', [['local', '=', True]])
local_administrator_privilege = await self.middleware.call(
'datastore.query',
'account.privilege',
[['builtin_name', '=', BuiltinPrivileges.LOCAL_ADMINISTRATOR.value]],
{'get': True},
)
local_administrators = await self.middleware.call(
'group.get_password_enabled_users',
local_administrator_privilege['local_groups'],
exclude_user_ids,
groups,
)
if not local_administrators:
root_user = filter_list(
users,
[['username', '=', 'root']],
{'get': True},
)
if root_user['id'] not in exclude_user_ids:
if unixhash_is_valid(root_user['unixhash']):
# This can only be if `always_has_root_password_enabled` is `True`
local_administrators = [root_user]
return local_administrators
async def setup(middleware):
middleware.event_register(
'user.web_ui_login_disabled',
'Sent when root user login to the Web UI is disabled.'
)
| 17,109 | Python | .py | 374 | 33.459893 | 118 | 0.571703 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,523 | sec.py | truenas_middleware/src/middlewared/middlewared/plugins/nfs_/sec.py | from middlewared.service import private, Service
from middlewared.plugins.nfs import NFSProtocol
class NFSService(Service):
class Config:
service = "nfs"
service_verb = "restart"
datastore_prefix = "nfs_srv_"
datastore_extend = 'nfs.nfs_extend'
@private
async def sec(self, config, has_nfs_principal):
if NFSProtocol.NFSv4 in config["protocols"]:
if config["v4_krb"]:
return ["krb5", "krb5i", "krb5p"]
elif has_nfs_principal:
return ["sys", "krb5", "krb5i", "krb5p"]
else:
return ["sys"]
return []
| 647 | Python | .py | 18 | 26.666667 | 56 | 0.584936 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,524 | status.py | truenas_middleware/src/middlewared/middlewared/plugins/nfs_/status.py | from middlewared.plugins.nfs import NFSServicePathInfo
from middlewared.schema import accepts, Int, returns, Str, Dict
from middlewared.service import Service, private, filterable, filterable_returns
from middlewared.utils import filter_list
from middlewared.service_exception import CallError
from contextlib import suppress
import yaml
import os
class NFSService(Service):
@private
def get_rmtab(self):
"""
In future we can apply enhance based on socket status
e.g ss -H -o state established '( sport = :nfs )'
"""
entries = []
with suppress(FileNotFoundError):
with open(os.path.join(NFSServicePathInfo.STATEDIR.path(), "rmtab"), "r") as f:
for line in f:
ip, data = line.split(":", 1)
export, refcnt = line.rsplit(":", 1)
# for now we won't display the refcnt
entries.append({
"ip": ip,
"export": export,
})
return entries
# NFS_WRITE because this exposes hostnames and IP addresses
# READONLY is considered administrative-level permission
@filterable(roles=['READONLY_ADMIN', 'SHARING_NFS_WRITE'])
def get_nfs3_clients(self, filters, options):
"""
Read contents of rmtab. This information may not
be accurate due to stale entries. This is ultimately
a limitation of the NFSv3 protocol.
"""
rmtab = self.get_rmtab()
return filter_list(rmtab, filters, options)
@private
def get_nfs4_client_info(self, id_):
"""
See the following link:
NFS 4.1 spec: https://www.rfc-editor.org/rfc/rfc8881.html
"""
info = {}
with suppress(FileNotFoundError):
with open(f"/proc/fs/nfsd/clients/{id_}/info", "r") as f:
info = yaml.safe_load(f.read())
return info
@private
def get_nfs4_client_states(self, id_):
"""
Detailed information regarding current open files per NFS client
TODO: review formatting of this field
"""
states = []
with suppress(FileNotFoundError):
with open(f"/proc/fs/nfsd/clients/{id_}/states", "r") as f:
states = yaml.safe_load(f.read())
# states file may be empty, which changes it to None type
# return empty list in this case
return states or []
# NFS_WRITE because this exposes hostnames, IP addresses and other details
# READONLY is considered administrative-level permission
@filterable(roles=['READONLY_ADMIN', 'SHARING_NFS_WRITE'])
@filterable_returns(Dict(
'client',
Str('id'),
Dict('info', additional_attrs=True),
Dict('state', additional_attrs=True)
))
def get_nfs4_clients(self, filters, options):
"""
Read information about NFSv4 clients from /proc/fs/nfsd/clients
Sample output:
[{
"id": "4",
"info": {
"clientid": 6273260596088110000,
"address": "192.168.40.247:790",
"status": "confirmed",
"seconds from last renew": 45,
"name": "Linux NFSv4.2 debian12-hv",
"minor version": 2,
"Implementation domain": "kernel.org",
"Implementation name": "Linux 6.1.0-12-amd64 #1 SMP PREEMPT_DYNAMIC Debian 6.1.52-1 (2023-09-07) x86_64",
"Implementation time": [0, 0],
"callback state": "UP",
"callback address": "192.168.40.247:0"
},
"states": [
{
"94850248556250062041657638912": {
"type": "deleg",
"access": "r",
"superblock": "00:39:5",
"filename": "/debian12-hv"
}
},
{
"94850248556250062041741524992": {
"type": "open",
"access": "rw",
"deny": "--",
"superblock": "00:39:137",
"filename": "/.debian12-hv.swp",
"owner": "open id:\u0000\u0000\u00008\u0000\u0000\u0000\u0000\u0000\u0000\u0014þÀ²3"
}
}
]
}]
---- Description of the fields (all per NFS client) ----
'clientid': Hash generated for this client connection
'address': The client IP and port. e.g. 10.20.30.40:768
'status': The current client status:
'confirmed' An active connection.
The status will convert to 'courtesy' in 90 seconds if not 'confirmed' by the client.
'courtesy' A stalled connection from an inactive client.
The status will convert to 'expirable' in 24hr.
'expirable' Waiting to be cleaned up.
'seconds from last renew': The session timeout counter. See 'status' field.
Gets reset by confirmation update from the client
'name': Supplied by the client.
Linux clients might offer something like 'Linux NFS4.2 clnt_name'.
FreeBSD clients might supply a UUID like name
'minor version': The NFS4.x minor version. E.G. '2' for NFSv4.2
'Implementation domain': NFSv4.1 info - e.g. 'kernel.org' or 'freebsd.org'.
'Implementation name': NFSv4.1 info - e.g. equivalent to 'uname -a' on the client
'Implementation time': NFSv4.1 info - Timestamp (time nfstime4) of client version (maybe unused?)
'callback state': Current callback 'service' status for this client: 'UP', 'DOWN', 'FAULT' or 'UNKNOWN'
Linux clients usually indicate 'UP'
FreeBSD clients may indicate 'DOWN' but are still functional
"""
clients = []
with suppress(FileNotFoundError):
for client in os.listdir("/proc/fs/nfsd/clients/"):
entry = {
"id": client,
"info": self.get_nfs4_client_info(client),
"states": self.get_nfs4_client_states(client),
}
clients.append(entry)
return filter_list(clients, filters, options)
@accepts(roles=['SHARING_NFS_READ'])
@returns(Int('number_of_clients'))
def client_count(self):
"""
Return currently connected clients count.
Count may not be accurate if NFSv3 protocol is in use
due to potentially stale rmtab entries.
"""
cnt = 0
for op in (self.get_nfs3_clients, self.get_nfs4_clients):
cnt += op([], {"count": True})
return cnt
@private
def close_client_state(self, client_id):
"""
force the server to immediately revoke all state held by:
`client_id`. This only applies to NFSv4. `client_id` is `id`
returned in `get_nfs4_clients`.
"""
with suppress(FileNotFoundError):
with open(f"/proc/fs/nfsd/clients/{client_id}/ctl", "w") as f:
f.write("expire\n")
@private
def get_threadpool_mode(self):
with open("/sys/module/sunrpc/parameters/pool_mode", "r") as f:
pool_mode = f.readline().strip()
return pool_mode.upper()
@private
@accepts(Str("pool_mode", enum=["AUTO", "GLOBAL", "PERCPU", "PERNODE"]))
def set_threadpool_mode(self, pool_mode):
"""
Control how the NFS server code allocates CPUs to
service thread pools. Depending on how many NICs
you have and where their interrupts are bound, this
option will affect which CPUs will do NFS serving.
Note: this parameter cannot be changed while the
NFS server is running.
auto the server chooses an appropriate mode
automatically using heuristics
global a single global pool contains all CPUs
percpu one pool for each CPU
pernode one pool for each NUMA node (equivalent
to global on non-NUMA machines)
"""
try:
with open("/sys/module/sunrpc/parameters/pool_mode", "w") as f:
f.write(pool_mode.lower())
except OSError as e:
raise CallError(
"NFS service must be stopped before threadpool mode changes",
errno=e.errno
)
| 8,651 | Python | .py | 195 | 32.261538 | 121 | 0.563546 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,525 | utils.py | truenas_middleware/src/middlewared/middlewared/plugins/nfs_/utils.py | import re
RE_DOMAIN_WILDCARD = re.compile(r'\*|\?|\[|\]')
def get_domain(hostname):
"""
return the 'domain' part of the hostname
e.g. gruff.billy.goat will return 'billy.goat'
and gruffbillygoat will return None
"""
lst = hostname.split('.', 1)
if len(lst) > 1:
return lst[1]
return None
def leftmost_has_wildcards(hostname):
"""
A bool that returns True if the left most level contains wildcards
"""
return bool(RE_DOMAIN_WILDCARD.search(hostname.split('.')[0]))
def get_wildcard_domain(hostname):
"""
If the left most level of the supplied hostname contains valid wildcard characters
and there is more than one level in the name,
then return the domain part.
e.g. asdf-* will return None
asdf-*.example.com will return example.com
fred.example.com will return None
"""
if leftmost_has_wildcards(hostname):
return get_domain(hostname)
return None
| 973 | Python | .py | 29 | 28.275862 | 86 | 0.673426 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,526 | debug.py | truenas_middleware/src/middlewared/middlewared/plugins/nfs_/debug.py | from middlewared.schema import accepts, List, Str, Dict
from middlewared.service import Service, private
from contextlib import suppress
import os
import enum
class NFS_DBGFLAGS(enum.Enum):
# include/uapi/linux/nfs_fs.h
NONE = 0x0000
VFS = 0x0001
DIRCACHE = 0x0002
LOOKUPCACHE = 0x0004
PAGECACHE = 0x0008
PROC = 0x0010
XDR = 0x0020
FILE = 0x0040
ROOT = 0x0080
CALLBACK = 0x0100
CLIENT = 0x0200
MOUNT = 0x0400
FSCACHE = 0x0800
PNFS = 0x1000
PNFS_LD = 0x2000
STATE = 0x4000
XATTR_CACHE = 0x8000
ALL = 0xFFFF
class NFSD_DBGFLAGS(enum. Enum):
# include/uapi/linux/nfsd/debug.h
NONE = 0x0000
SOCK = 0x0001
FH = 0x0002
EXPORT = 0x0004
SVC = 0x0008
PROC = 0x0010
FILEOP = 0x0020
AUTH = 0x0040
REPCACHE = 0x0080
XDR = 0x0100
LOCKD = 0x0200
PNFS = 0x0400
ALL = 0x7FFF
# NOCHANGE 0xFFFF
class NLM_DBGFLAGS(enum.Enum):
# include/linux/lockd/debug.h
NONE = 0x0000
SVC = 0x0001
CLIENT = 0x0002
CLNTLOCK = 0x0004
SVCLOCK = 0x0008
MONITOR = 0x0010
CLNTSUBS = 0x0020
SVCSUBS = 0x0040
HOSTCACHE = 0x0080
XDR = 0x0100
ALL = 0x7fff
class RPC_DBGFLAGS(enum.Enum):
# include/uapi/linux/sunrpc/debug.h
NONE = 0x0000
XPRT = 0x0001
CALL = 0x0002
DEBUG = 0x0004
NFS = 0x0008
AUTH = 0x0010
BIND = 0x0020
SCHED = 0x0040
TRANS = 0x0080
SVCXPRT = 0x0100
SVCDSP = 0x0200
MISC = 0x0400
CACHE = 0x0800
ALL = 0x7fff
class NFSService(Service):
'''
NFSService class holds the functions to set and get the debug flags
for nfs_debug, nfsd_debug, nlm_debug and rpc_debug. All of these
are files in /proc/sys/sunrpc.
'''
dbgcls = {'NFS': NFS_DBGFLAGS, 'NFSD': NFSD_DBGFLAGS, 'NLM': NLM_DBGFLAGS, 'RPC': RPC_DBGFLAGS}
@private
def get_debug(self):
'''
Display current debug settings for NFS, NFSD, NLM and RPC
All settings are reported as uppercase.
See man (8) rpcdebug for more information.
'''
output = {}
with suppress(FileNotFoundError):
for svc in os.listdir("/proc/sys/sunrpc"):
flags = []
if not svc.endswith("debug"):
continue
svc_name = svc.upper().split('_')[0]
with open(f"/proc/sys/sunrpc/{svc}", "r") as f:
val = int(f.readline().strip(), 16)
for dbgflg in self.dbgcls[svc_name]:
if dbgflg.name == 'NONE':
continue
if not (val & dbgflg.value):
continue
if dbgflg.name == 'ALL' and dbgflg.value != val:
continue
flags.append(dbgflg.name)
if not flags:
flags = [dbgflg.NONE.name]
if dbgflg.ALL.name in flags:
flags = [dbgflg.ALL.name]
output[svc_name] = flags
return output
@private
@accepts(Dict(
'svcs',
List("NFS", items=[Str("nfs_dbg_opts", enum=[x.name for x in NFS_DBGFLAGS])]),
List("NFSD", items=[Str("nfsd_dbg_opts", enum=[x.name for x in NFSD_DBGFLAGS])]),
List("NLM", items=[Str("nlm_dbg_opts", enum=[x.name for x in NLM_DBGFLAGS])]),
List("RPC", items=[Str("rpc_dbg_opts", enum=[x.name for x in RPC_DBGFLAGS])])
))
def set_debug(self, services):
'''
Set debug flags for NFS, NFSD, NLM and RPC.
All flag names are uppercase.
See man (8) rpcdebug for more information.
'''
def debug_level_to_int(svc, opts):
rv = 0
if "NONE" in opts:
return rv
for x in opts:
rv = rv | self.dbgcls[svc][x].value
return rv
for svc, opts in services.items():
if opts == []:
continue
if "NONE" in opts and len(opts) > 1:
raise ValueError(f"Cannot specify another value with NONE: {svc}={opts}")
to_set = "0x%0.4X" % debug_level_to_int(svc, opts)
with open(f"/proc/sys/sunrpc/{svc.lower()}_debug", "w") as f:
f.write(to_set)
return True
| 4,344 | Python | .py | 137 | 23.131387 | 99 | 0.566196 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,527 | fs_attachment_delegate.py | truenas_middleware/src/middlewared/middlewared/plugins/nfs_/fs_attachment_delegate.py | from middlewared.common.attachment import LockableFSAttachmentDelegate
from middlewared.plugins.nfs import SharingNFSService
class NFSFSAttachmentDelegate(LockableFSAttachmentDelegate):
name = 'nfs'
title = 'NFS Share'
service = 'nfs'
service_class = SharingNFSService
resource_name = 'path'
async def restart_reload_services(self, attachments):
await self._service_change('nfs', 'reload')
async def setup(middleware):
await middleware.call('pool.dataset.register_attachment_delegate', NFSFSAttachmentDelegate(middleware))
| 565 | Python | .py | 12 | 42.583333 | 107 | 0.787934 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,528 | port_attachments.py | truenas_middleware/src/middlewared/middlewared/plugins/nfs_/port_attachments.py | from middlewared.common.ports import ServicePortDelegate
class NFSServicePortDelegate(ServicePortDelegate):
bind_address_field = 'bindip'
name = 'nfs'
namespace = 'nfs'
port_fields = ['mountd_port', 'rpcstatd_port', 'rpclockd_port']
title = 'NFS Service'
def bind_address(self, config):
if config[self.bind_address_field] and '0.0.0.0' not in config[self.bind_address_field]:
return config[self.bind_address_field]
else:
return ['0.0.0.0']
async def get_ports_internal(self):
await self.basic_checks()
config = await self.config()
ports = [('0.0.0.0', 2049)]
bind_addresses = self.bind_address(config)
for k in filter(lambda k: config.get(k), self.port_fields):
for bindip in bind_addresses:
ports.append((bindip, config[k]))
return ports
async def setup(middleware):
await middleware.call('port.register_attachment_delegate', NFSServicePortDelegate(middleware))
| 1,019 | Python | .py | 23 | 36.521739 | 98 | 0.662955 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,529 | status.py | truenas_middleware/src/middlewared/middlewared/plugins/ftp_/status.py | from middlewared.service import Service, private
from middlewared.schema import Int, accepts, returns
from middlewared.utils.network_.procfs import read_proc_net
class FTPService(Service):
@private
@accepts(roles=['SHARING_FTP_READ'])
@returns(Int('number_of_connections'))
def connection_count(self):
''' Return the number of active connections '''
# FTP listening port is 21
ftp = 21
try:
proc_data = read_proc_net()
ftp_proclist = list(filter(lambda x: x.local_port == ftp and x.remote_port != 0, proc_data))
except Exception:
num_conn = 0
else:
num_conn = len(ftp_proclist)
# NOTE: This count includes multiple 'connections' from a single client.
# If we want to report number of 'clients', we process the filtered list with:
# set_clients = set([':'.join(i.split()[4].split(':')[:-1]) for i in ftp_conn])
return num_conn
| 998 | Python | .py | 22 | 37.090909 | 104 | 0.624486 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,530 | attachments.py | truenas_middleware/src/middlewared/middlewared/plugins/ftp_/attachments.py | from middlewared.common.ports import ServicePortDelegate
class FTPServicePortDelegate(ServicePortDelegate):
name = 'FTP'
namespace = 'ftp'
port_fields = ['port']
title = 'FTP Service'
async def setup(middleware):
await middleware.call('port.register_attachment_delegate', FTPServicePortDelegate(middleware))
| 333 | Python | .py | 8 | 37.5 | 98 | 0.778125 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,531 | cert_attachment.py | truenas_middleware/src/middlewared/middlewared/plugins/ftp_/cert_attachment.py | from middlewared.common.attachment.certificate import CertificateServiceAttachmentDelegate
class FTPCertificateAttachment(CertificateServiceAttachmentDelegate):
CERT_FIELD = 'ssltls_certificate'
HUMAN_NAME = 'FTP Service'
SERVICE = 'ftp'
async def setup(middleware):
await middleware.call('certificate.register_attachment_delegate', FTPCertificateAttachment(middleware))
| 392 | Python | .py | 7 | 52 | 107 | 0.834211 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,532 | lan.py | truenas_middleware/src/middlewared/middlewared/plugins/ipmi_/lan.py | from subprocess import run, DEVNULL
from functools import cache
from middlewared.schema import accepts, Bool, Dict, Int, IPAddr, List, Password, Ref, returns, Str
from middlewared.service import private, CallError, filterable_returns, CRUDService, ValidationError, ValidationErrors
from middlewared.utils import filter_list
from middlewared.validators import Netmask, PasswordComplexity, Range
@cache
def lan_channels():
channels = []
out = run(['bmc-info', '--get-channel-info'], capture_output=True)
lines = out.stdout.decode().split('\n')
for idx, line in filter(lambda x: x[1], enumerate(lines)):
# lines that we're interested in look like
# Channel : 1
# Medium Type : 802.3 LAN
if (key_value := line.split(':')) and len(key_value) == 2 and '802.3 LAN' in key_value[1]:
try:
channels.append(int(lines[idx - 1].split(':')[-1].strip()))
except (IndexError, ValueError):
continue
return channels
def apply_config(channel, data):
base_cmd = ['ipmitool', 'lan', 'set', str(channel)]
rc = 0
options = {'stdout': DEVNULL, 'stderr': DEVNULL}
if data.get('dhcp'):
rc |= run(base_cmd + ['dhcp'], **options).returncode
else:
rc |= run(base_cmd + ['ipsrc', 'static'], **options).returncode
rc |= run(base_cmd + ['ipaddr', data['ipaddress']], **options).returncode
rc |= run(base_cmd + ['netmask', data['netmask']], **options).returncode
rc |= run(base_cmd + ['defgw', 'ipaddr', data['gateway']], **options).returncode
rc |= run(base_cmd + ['vlan', 'id', f'{data.get("vlan", "off")}'], **options).returncode
rc |= run(base_cmd + ['access', 'on'], **options).returncode
rc |= run(base_cmd + ['auth', 'USER', 'MD2,MD5'], **options).returncode
rc |= run(base_cmd + ['auth', 'OPERATOR', 'MD2,MD5'], **options).returncode
rc |= run(base_cmd + ['auth', 'ADMIN', 'MD2,MD5'], **options).returncode
rc |= run(base_cmd + ['auth', 'CALLBACK', 'MD2,MD5'], **options).returncode
# Apparently tickling these ARP options can "fail" on certain hardware
# which isn't fatal so we ignore returncode in this instance. See #15578.
run(base_cmd + ['arp', 'respond', 'on'], **options)
run(base_cmd + ['arp', 'generate', 'on'], **options)
if passwd := data.get('password'):
cp = run(['ipmitool', 'user', 'set', 'password', '2', passwd], capture_output=True)
if cp.returncode != 0:
err = '\n'.join(cp.stderr.decode().split('\n'))
raise CallError(f'Failed setting password: {err!r}')
cp = run(['ipmitool', 'user', 'enable', '2'], capture_output=True)
if cp.returncode != 0:
err = '\n'.join(cp.stderr.decode().split('\n'))
raise CallError(f'Failed enabling user: {err!r}')
return rc
class IPMILanService(CRUDService):
ENTRY = Dict(
'ipmi_channel',
Int('channel'),
Int('id'),
Str('ip_address_source'),
Str('ip_address'),
Str('mac_address'),
Str('subnet_mask'),
Str('default_gateway_ip_address'),
Str('default_gateway_mac_address'),
Str('backup_gateway_ip_address'),
Str('backup_gateway_mac_address'),
Int('vlan_id'),
Bool('vlan_id_enable'),
Int('vlan_priority'),
)
class Config:
namespace = 'ipmi.lan'
cli_namespace = 'network.ipmi'
@accepts(roles=['IPMI_READ'])
@returns(List('lan_channels', items=[Int('lan_channel')]))
def channels(self):
"""Return a list of available IPMI channels."""
channels = []
if self.middleware.call_sync('ipmi.is_loaded') and (channels := lan_channels()):
if self.middleware.call_sync('truenas.get_chassis_hardware').startswith('TRUENAS-F'):
# We cannot expose IPMI lan channel 8 on the f-series platform
channels = [i for i in channels if i != 8]
return channels
@private
def query_impl(self):
result = []
for channel in self.channels():
section = 'Lan_Conf' if channel == 1 else f'Lan_Conf_Channel_{channel}'
cp = run(['ipmi-config', '--checkout', f'--section={section}', '--verbose'], capture_output=True)
if cp.returncode != 0 and (stderr := cp.stderr.decode()):
# on the F-series platform, if you add the --verbose flag, then the return code is
# set to 1 but the correct information is given to stdout. Just check to see if there
# is stderr
# TODO: fix this in dragonfish (dependent on webUI changes to be made see NAS-123225)
# raise CallError(f'Failed to get details from channel {channel}: {stderr}')
self.logger.error('Failed to get details from channel %r with error %r', channel, stderr)
stdout = cp.stdout.decode().split('\n')
if not stdout:
continue
data = {'channel': channel, 'id': channel}
for i in filter(lambda x: x.startswith('\t') and not x.startswith('\t#'), stdout):
try:
name, value = i.strip().split()
name, value = name.lower(), value.lower()
if value in ('no', 'yes'):
value = True if value == 'yes' else False
elif value.isdigit():
value = int(value)
data[name] = value
except ValueError:
break
result.append(data)
return result
@accepts(
Dict(
'ipmi_lan_query',
Ref('query-filters'),
Ref('query-options'),
Dict('ipmi-options', Bool('query-remote', default=False)),
register=True,
),
roles=['IPMI_READ'],
)
def query(self, data):
"""Query available IPMI Channels with `query-filters` and `query-options`."""
result = []
if not data['ipmi-options']['query-remote']:
result = self.query_impl()
elif self.middleware.call_sync('failover.licensed'):
try:
result = self.middleware.call_sync(
'failover.call_remote', 'ipmi.lan.query_impl'
)
except Exception:
# could be ENOMETHOD on upgrade or could be that
# remote node isn't connected/functioning etc OR
# could be that we're not on an HA system. In
# either of the scenarios, we just need to return
# an empty list
result = []
return filter_list(result, data['query-filters'], data['query-options'])
@accepts(
Int('channel'),
Dict(
'ipmi_update',
IPAddr('ipaddress', v6=False),
Str('netmask', validators=[Netmask(ipv6=False, prefix_length=False)]),
IPAddr('gateway', v6=False),
Password('password', validators=[
PasswordComplexity(["ASCII_UPPER", "ASCII_LOWER", "DIGIT", "SPECIAL"], 3),
Range(8, 16)
]),
Bool('dhcp'),
Int('vlan', validators=[Range(0, 4094)], null=True),
Bool('apply_remote', default=False),
register=True
),
roles=['IPMI_WRITE'],
audit='Update IPMI configuration'
)
def do_update(self, id_, data):
"""
Update IPMI configuration on channel number `id`.
`ipaddress` is an IPv4 address to be assigned to channel number `id`.
`netmask` is the subnet mask associated with `ipaddress`.
`gateway` is an IPv4 address used by `ipaddress` to reach outside the local subnet.
`password` is a password to be assigned to channel number `id`
`dhcp` is a boolean. If False, `ipaddress`, `netmask` and `gateway` must be set.
`vlan` is an integer representing the vlan tag number.
`apply_remote` is a boolean. If True and this is an HA licensed system, will apply
the configuration to the remote controller.
"""
verrors = ValidationErrors()
schema = 'ipmi.lan.update'
if not self.middleware.call_sync('ipmi.is_loaded'):
verrors.add(schema, '/dev/ipmi0 could not be found')
elif id_ not in self.channels():
verrors.add(schema, f'IPMI channel number {id_!r} not found')
elif not data.get('dhcp'):
for k in ['ipaddress', 'netmask', 'gateway']:
if not data.get(k):
verrors.add(schema, f'{k} field is required when dhcp is false.')
verrors.check()
# It's _very_ important to pop this key so that
# we don't have a situation where we send the same
# data across to the other side which turns around
# and sends it back to us causing a loop
apply_remote = data.pop('apply_remote')
if not apply_remote:
return apply_config(id_, data)
elif self.middleware.call_sync('failover.licensed'):
try:
return self.middleware.call_sync('failover.call_remote', 'ipmi.lan.update', [id_, data])
except Exception as e:
raise ValidationError(schema, f'Failed to apply IPMI config on remote controller: {e}')
| 9,391 | Python | .py | 194 | 37.93299 | 118 | 0.580452 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,533 | chassis.py | truenas_middleware/src/middlewared/middlewared/plugins/ipmi_/chassis.py | from subprocess import run, DEVNULL
from middlewared.service import Service
from middlewared.schema import Str, Dict, accepts, returns
class IpmiChassisService(Service):
class Config:
namespace = 'ipmi.chassis'
cli_namespace = 'service.ipmi.chassis'
@accepts(roles=['IPMI_READ'])
@returns(Dict('chassis_info', additional_attrs=True))
def info(self):
"""Return looks like:
{
"system_power": "on",
"power_overload": "false",
"interlock": "inactive",
"power_fault": "false",
"power_control_fault": "false",
"power_restore_policy": "Always off",
"last_power_event": "unknown",
"chassis_intrusion": "inactive",
"front_panel_lockout": "inactive",
"drive_fault": "false",
"cooling/fan_fault": "false",
"chassis_identify_state": "off"
}
"""
rv = {}
if not self.middleware.call_sync('ipmi.is_loaded'):
return rv
out = run(['ipmi-chassis', '--get-chassis-status'], capture_output=True)
for line in filter(lambda x: x, out.stdout.decode().split('\n')):
ele, status = line.split(':', 1)
rv[ele.strip().replace(' ', '_').lower()] = status.strip()
return rv
@accepts(Str('verb', default='ON', enum=['ON', 'OFF']), roles=['IPMI_WRITE'])
@returns()
def identify(self, verb):
"""
Toggle the chassis identify light.
`verb`: str if 'ON' turn identify light on. if 'OFF' turn identify light off.
"""
verb = 'force' if verb == 'ON' else '0'
run(['ipmi-chassis', f'--chassis-identify={verb}'], stdout=DEVNULL, stderr=DEVNULL)
| 1,757 | Python | .py | 43 | 31.651163 | 91 | 0.570088 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,534 | mc.py | truenas_middleware/src/middlewared/middlewared/plugins/ipmi_/mc.py | from subprocess import run
from middlewared.service import Service
from middlewared.schema import accepts, returns, Dict
class IpmiMcService(Service):
class Config:
namespace = 'ipmi.mc'
cli_namespace = 'service.ipmi.mc'
@accepts(roles=['IPMI_READ'])
@returns(Dict('mc_info', additional_attrs=True))
def info(self):
"""Return looks like:
{
'auxiliary_firmware_revision_information': '00000006h',
'bridge': 'unsupported',
'chassis_device': 'supported',
'device_available': 'yes (normal operation)',
'device_id': '32',
'device_revision': '1',
'device_sdrs': 'unsupported',
'firmware_revision': '6.71',
'fru_inventory_device': 'supported',
'ipmb_event_generator': 'supported',
'ipmb_event_receiver': 'supported',
'ipmi_version': '2.0',
'manufacturer_id': 'Super Micro Computer Inc. (10876)',
'product_id': '2327',
'sdr_repository_device': 'supported',
'sel_device': 'supported',
'sensor_device': 'supported'
}
"""
rv = {}
if not self.middleware.call_sync('ipmi.is_loaded'):
return rv
out = run(['bmc-info', '--get-device-id'], capture_output=True)
for line in filter(lambda x: x, out.stdout.decode().split('\n')):
ele, status = line.split(':', 1)
rv[ele.strip().replace(' ', '_').lower()] = status.strip()
return rv
| 1,640 | Python | .py | 39 | 30.102564 | 73 | 0.53325 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,535 | sensors.py | truenas_middleware/src/middlewared/middlewared/plugins/ipmi_/sensors.py | from random import uniform
from subprocess import run
from time import sleep
from middlewared.service import Service, filterable, filterable_returns, private
from middlewared.utils import filter_list
from middlewared.schema import List, Dict
def get_sensors_data():
cmd = [
'ipmi-sensors',
'--comma-separated',
'--no-header-output',
'--non-abbreviated-units',
'--output-sensor-state',
'--output-sensor-thresholds',
]
rv = []
cp = run(cmd, capture_output=True)
if cp.returncode == 0 and cp.stdout:
rv = cp.stdout.decode().split('\n')
return rv
class IpmiSensorsService(Service):
class Config:
namespace = 'ipmi.sensors'
cli_namespace = 'service.ipmi.sensors'
@private
def query_impl(self):
rv, reread = [], None
if not self.middleware.call_sync('ipmi.is_loaded'):
return rv, reread
mseries = self.middleware.call_sync('failover.hardware') == 'ECHOWARP'
for line in filter(lambda x: x, get_sensors_data()):
if (values := line.split(',')) and len(values) == 13:
sensor = {
'id': values[0],
'name': values[1],
'type': values[2],
'state': values[3],
'reading': values[4],
'units': values[5],
'lower-non-recoverable': values[6],
'lower-critical': values[7],
'lower-non-critical': values[8],
'upper-non-critical': values[9],
'upper-critical': values[10],
'upper-non-recoverable': values[11],
'event': [i.replace("'", '').strip().lower() for i in values[12].split("' '")]
}
if sensor['type'] == 'Power Supply' and mseries and 'no presence detected' in sensor['event']:
# PMBus (which controls the PSU's status) can not be probed at the same time because
# it's not a shared bus. HA systems show false positive "no presence detected" more
# often because both controllers are randomly probing the status of the PSU's at the
# same time because this method is called in an alert. The alert, by default, gets
# called on both controllers.
reread = f'"{sensor["name"]}" reporting "no presence detected"'
rv.append(sensor)
return rv, reread
@filterable(roles=['IPMI_READ'])
@filterable_returns(List('sensors', items=[Dict('sensor', additional_attrs=True)]))
def query(self, filters, options):
sensors, reread = self.query_impl()
if reread is not None:
max_retries = 3
while max_retries != 0:
self.logger.info('%s re-reading', reread)
sleep(round(uniform(0.4, 1.2), 2))
sensors, reread = self.query_impl()
if reread is None:
# re-read the sensors list and PSU status came back
# healthy so exit early
break
else:
max_retries -= 1
return filter_list(sensors, filters, options)
| 3,311 | Python | .py | 73 | 32.726027 | 110 | 0.549457 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,536 | sel.py | truenas_middleware/src/middlewared/middlewared/plugins/ipmi_/sel.py | from subprocess import run
from middlewared.service import job, Service, filterable, filterable_returns
from middlewared.utils import filter_list
from middlewared.schema import accepts, returns, Dict
from middlewared.service_exception import CallError
SEL_LOCK = 'sel_lock'
def get_sel_data(data):
cmd = ['ipmi-sel']
if data == 'elist':
cmd.extend(['-v', '--no-header-output', '--comma-separated-output', '--non-abbreviated-units'])
elif data == 'info':
cmd.extend(['--info'])
else:
raise ValueError(f'Invalid value: {data!r}')
rv = []
cp = run(cmd, capture_output=True)
if cp.returncode == 0 and cp.stdout:
rv = cp.stdout.decode().split('\n')
return rv
class IpmiSelService(Service):
class Config:
namespace = 'ipmi.sel'
cli_namespace = 'service.ipmi.sel'
@filterable(roles=['IPMI_READ'])
@filterable_returns(Dict('ipmi_elist', additional_attrs=True))
@job(lock=SEL_LOCK, lock_queue_size=1, transient=True)
def elist(self, job, filters, options):
"""Query IPMI System Event Log (SEL) extended list"""
rv = []
if not self.middleware.call_sync('ipmi.is_loaded'):
return rv
job.set_progress(78, 'Enumerating extended event log info')
for line in get_sel_data('elist'):
if (values := line.strip().split(',')) and len(values) == 7:
rv.append({
'id': values[0].strip(),
'date': values[1].strip(),
'time': values[2].strip(),
'name': values[3].strip(),
'type': values[4].strip(),
'event_direction': values[5].strip(),
'event': values[6].strip(),
})
job.set_progress(100, 'Parsing extended event log complete')
return filter_list(rv, filters, options)
@accepts(roles=['IPMI_READ'])
@returns(Dict('ipmi_sel_info', additional_attrs=True))
@job(lock=SEL_LOCK, lock_queue_size=1, transient=True)
def info(self, job):
"""Query General information about the IPMI System Event Log"""
rv = {}
if not self.middleware.call_sync('ipmi.is_loaded'):
return rv
job.set_progress(78, 'Enumerating general extended event log info')
for line in get_sel_data('info'):
if (values := line.strip().split(':')) and len(values) == 2:
entry, value = values
rv[entry.strip().replace(' ', '_').lower()] = value.strip()
job.set_progress(100, 'Parsing general extended event log complete')
return rv
@accepts(roles=['IPMI_WRITE'])
@returns()
@job(lock=SEL_LOCK, lock_queue_size=1)
def clear(self, job):
if self.middleware.call_sync('ipmi.is_loaded'):
cp = run(['ipmi-sel', '--clear'], capture_output=True)
if cp.returncode:
raise CallError(cp.stderr.decode().strip() or f'Unexpected failure with returncode: {cp.returncode!r}')
| 3,051 | Python | .py | 68 | 35.632353 | 119 | 0.597236 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,537 | rest.py | truenas_middleware/src/middlewared/middlewared/plugins/test/rest.py | import json
import time
from middlewared.schema import accepts, Any
from middlewared.service import job, Service
class RestTestService(Service):
class Config:
private = True
@accepts(Any("arg"))
@job(pipes=["input"])
def test_input_pipe(self, job, arg):
return json.dumps(arg) + job.pipes.input.r.read().decode("utf-8")
@accepts(Any("arg"))
@job(pipes=["input"], check_pipes=False)
def test_input_unchecked_pipe(self, job, arg):
if job.pipes.input:
input_ = job.pipes.input.r.read().decode("utf-8")
else:
input_ = "NONE"
return json.dumps(arg) + input_
@accepts(Any("arg"))
@job(pipes=["output"])
def test_download_pipe(self, job, arg):
job.pipes.output.w.write(json.dumps(arg).encode("utf-8"))
job.pipes.output.w.close()
@accepts(Any("arg"))
@job(pipes=["output"], check_pipes=False)
def test_download_unchecked_pipe(self, job, arg):
if job.pipes.output:
job.pipes.output.w.write(json.dumps(arg).encode("utf-8"))
job.pipes.output.w.close()
else:
return {"wrapped": arg}
@accepts(Any("arg"))
@job(pipes=["output"])
def test_download_slow_pipe(self, job, arg):
time.sleep(2)
job.pipes.output.w.write(json.dumps(arg).encode("utf-8"))
job.pipes.output.w.close()
@accepts(Any("arg"))
@job(lock="test_download_slow_pipe_with_lock", lock_queue_size=0, pipes=["output"])
def test_download_slow_pipe_with_lock(self, job, arg):
time.sleep(5)
job.pipes.output.w.write(json.dumps(arg).encode("utf-8"))
job.pipes.output.w.close()
| 1,684 | Python | .py | 44 | 31.227273 | 87 | 0.623313 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,538 | mock.py | truenas_middleware/src/middlewared/middlewared/plugins/test/mock.py | from middlewared.service import CallError, Service
class TestService(Service):
class Config:
private = True
async def set_mock(self, name, args, description):
if isinstance(description, str):
exec(description)
try:
method = locals()["mock"]
except KeyError:
raise CallError("Your mock declaration must include `def mock` or `async def mock`")
elif isinstance(description, dict):
keys = set(description.keys())
if keys == {"exception"}:
def method(*args, **kwargs):
raise Exception()
elif keys == {"return_value"}:
def method(*args, **kwargs):
return description["return_value"]
else:
raise CallError("Invalid mock declaration")
else:
raise CallError("Invalid mock declaration")
self.middleware.set_mock(name, args, method)
async def remove_mock(self, name, args):
self.middleware.remove_mock(name, args)
# Dummy methods to mock for internal infrastructure testing (i.e. jobs manager)
async def test1(self):
pass
async def test2(self):
pass
| 1,252 | Python | .py | 31 | 29.322581 | 100 | 0.587799 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,539 | log.py | truenas_middleware/src/middlewared/middlewared/plugins/test/log.py | from middlewared.service import Service
class TestService(Service):
class Config:
private = True
def notify_test_start(self, name):
self.middleware.logger.debug("Starting integration test %s", name)
def notify_test_end(self, name):
self.middleware.logger.debug("Ending integration test %s", name)
| 337 | Python | .py | 8 | 36.125 | 74 | 0.72 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,540 | crud.py | truenas_middleware/src/middlewared/middlewared/plugins/cloud_sync_/crud.py | from middlewared.schema import accepts, Dict, Int, Str
from middlewared.service import item_method, Service
class CloudSyncService(Service):
@item_method
@accepts(
Int("id"),
Dict(
"cloud_sync_restore",
Str("description"),
Str("transfer_mode", enum=["SYNC", "COPY"], required=True),
Str("path", required=True),
),
roles=["CLOUD_SYNC_WRITE"],
)
async def restore(self, id_, data):
"""
Create the opposite of cloud sync task `id` (PULL if it was PUSH and vice versa).
"""
cloud_sync = await self.middleware.call(
"cloudsync.query", [["id", "=", id_]], {"get": True, "extra": {"retrieve_locked_info": False}}
)
credentials = cloud_sync["credentials"]
if cloud_sync["direction"] == "PUSH":
data["direction"] = "PULL"
else:
data["direction"] = "PUSH"
data["credentials"] = credentials["id"]
for k in ["encryption", "filename_encryption", "encryption_password", "encryption_salt", "schedule",
"transfers", "attributes"]:
data[k] = cloud_sync[k]
data["enabled"] = False # Do not run it automatically
return await self.middleware.call("cloudsync.create", data)
| 1,320 | Python | .py | 32 | 31.9375 | 108 | 0.575781 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,541 | remotes.py | truenas_middleware/src/middlewared/middlewared/plugins/cloud/remotes.py | import os
from middlewared.rclone.base import BaseRcloneRemote
from middlewared.utils.plugins import load_modules, load_classes
from middlewared.utils.python import get_middlewared_dir
REMOTES = {}
remote_classes = []
for module in load_modules(os.path.join(get_middlewared_dir(), "rclone", "remote")):
for cls in load_classes(module, BaseRcloneRemote, []):
remote_classes.append(cls)
async def setup(middleware):
for cls in remote_classes:
remote = cls(middleware)
REMOTES[remote.name] = remote
| 534 | Python | .py | 13 | 37.153846 | 84 | 0.75534 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,542 | crud.py | truenas_middleware/src/middlewared/middlewared/plugins/cloud/crud.py | import os
import shlex
import textwrap
from middlewared.plugins.cloud.remotes import REMOTES
from middlewared.plugins.zfs_.utils import zvol_path_to_name
from middlewared.schema import Bool, Str
from middlewared.service import CallError, private
from middlewared.utils.privilege import credential_has_full_admin
from middlewared.validators import validate_schema
class CloudTaskServiceMixin:
allow_zvol = False
@private
async def _get_credentials(self, credentials_id):
try:
return await self.middleware.call("datastore.query", "system.cloudcredentials",
[("id", "=", credentials_id)], {"get": True})
except IndexError:
return None
@private
def _common_task_schema(self, provider):
schema = []
if provider.fast_list:
schema.append(Bool("fast_list", default=False, title="Use --fast-list", description=textwrap.dedent("""\
Use fewer transactions in exchange for more RAM. This may also speed up or slow down your
transfer. See [rclone documentation](https://rclone.org/docs/#fast-list) for more details.
""").rstrip()))
return schema
@private
async def _basic_validate(self, verrors, name, data):
try:
shlex.split(data["args"])
except ValueError as e:
verrors.add(f"{name}.args", f"Parse error: {e.args[0]}")
credentials = await self._get_credentials(data["credentials"])
if not credentials:
verrors.add(f"{name}.credentials", "Invalid credentials")
if verrors:
return
provider = REMOTES[credentials["provider"]]
schema = []
if provider.buckets:
schema.append(Str("bucket", required=True, empty=False))
schema.append(Str("folder", required=True))
schema.extend(provider.task_schema)
schema.extend(self._common_task_schema(provider))
attributes_verrors = validate_schema(schema, data["attributes"])
if not attributes_verrors:
await provider.validate_task_basic(data, credentials, verrors)
verrors.add_child(f"{name}.attributes", attributes_verrors)
@private
async def _validate(self, app, verrors, name, data):
await self._basic_validate(verrors, name, data)
if not verrors:
credentials = await self._get_credentials(data["credentials"])
provider = REMOTES[credentials["provider"]]
await provider.validate_task_full(data, credentials, verrors)
for i, (limit1, limit2) in enumerate(zip(data["bwlimit"], data["bwlimit"][1:])):
if limit1["time"] >= limit2["time"]:
verrors.add(f"{name}.bwlimit.{i + 1}.time", f"Invalid time order: {limit1['time']}, {limit2['time']}")
if self.allow_zvol and (path := await self.get_path_field(data)).startswith("/dev/zvol/"):
zvol = zvol_path_to_name(path)
if not await self.middleware.call('pool.dataset.query', [['name', '=', zvol], ['type', '=', 'VOLUME']]):
verrors.add(f'{name}.{self.path_field}', 'Volume does not exist')
else:
try:
await self.middleware.call(f'{self._config.namespace}.validate_zvol', path)
except CallError as e:
verrors.add(f'{name}.{self.path_field}', e.errmsg)
else:
await self.validate_path_field(data, name, verrors)
if data["snapshot"]:
if await self.middleware.call("pool.dataset.query",
[["name", "^", os.path.relpath(data["path"], "/mnt") + "/"],
["type", "=", "FILESYSTEM"]]):
verrors.add(f"{name}.snapshot", "This option is only available for datasets that have no further "
"nesting")
if app and not credential_has_full_admin(app.authenticated_credentials):
for k in ["pre_script", "post_script"]:
if data[k]:
verrors.add(f"{name}.{k}", "The ability to edit cloud sync pre and post scripts is limited to "
"users who have full administrative credentials")
| 4,350 | Python | .py | 81 | 41.234568 | 118 | 0.59901 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,543 | path.py | truenas_middleware/src/middlewared/middlewared/plugins/cloud/path.py | import os
import stat
from middlewared.service import CallError
def get_remote_path(provider, attributes):
remote_path = attributes["folder"].rstrip("/")
if not remote_path:
remote_path = "/"
if provider.buckets:
remote_path = f"{attributes['bucket']}/{remote_path.lstrip('/')}"
return remote_path
async def check_local_path(middleware, path, *, check_mountpoint=True, error_text_path=None):
error_text_path = error_text_path or path
try:
info = await middleware.run_in_thread(os.stat, path)
except FileNotFoundError:
raise CallError(f"Directory {error_text_path!r} does not exist")
else:
if not stat.S_ISDIR(info.st_mode):
raise CallError(f"{error_text_path!r} is not a directory")
if check_mountpoint:
if not await middleware.call("filesystem.is_dataset_path", path):
raise CallError(f"Directory {error_text_path!r} must reside within volume mount point")
| 973 | Python | .py | 22 | 38 | 99 | 0.691737 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,544 | model.py | truenas_middleware/src/middlewared/middlewared/plugins/cloud/model.py | from sqlalchemy.ext.declarative import declared_attr
from middlewared.schema import Bool, Cron, Dict, Int, List, Str
import middlewared.sqlalchemy as sa
from middlewared.validators import Range, Time
class CloudTaskModelMixin:
id = sa.Column(sa.Integer(), primary_key=True)
description = sa.Column(sa.String(150))
path = sa.Column(sa.String(255))
@declared_attr
def credential_id(cls):
return sa.Column(sa.ForeignKey("system_cloudcredentials.id"), index=True)
attributes = sa.Column(sa.JSON())
minute = sa.Column(sa.String(100), default="00")
hour = sa.Column(sa.String(100), default="*")
daymonth = sa.Column(sa.String(100), default="*")
month = sa.Column(sa.String(100), default="*")
dayweek = sa.Column(sa.String(100), default="*")
pre_script = sa.Column(sa.Text())
post_script = sa.Column(sa.Text())
snapshot = sa.Column(sa.Boolean())
bwlimit = sa.Column(sa.JSON(list))
include = sa.Column(sa.JSON(list))
exclude = sa.Column(sa.JSON(list))
transfers = sa.Column(sa.Integer(), nullable=True)
args = sa.Column(sa.Text())
enabled = sa.Column(sa.Boolean(), default=True)
job = sa.Column(sa.JSON(None))
cloud_task_schema = [
Str("description", default=""),
Str("path", required=True),
Int("credentials", required=True),
Dict("attributes", additional_attrs=True, required=True),
Cron(
"schedule",
defaults={"minute": "00"},
required=True
),
Str("pre_script", default="", max_length=None),
Str("post_script", default="", max_length=None),
Bool("snapshot", default=False),
List("bwlimit", items=[Dict("cloud_sync_bwlimit",
Str("time", validators=[Time()]),
Int("bandwidth", validators=[Range(min_=1)], null=True))]),
List("include", items=[Str("path", empty=False)]),
List("exclude", items=[Str("path", empty=False)]),
Int("transfers", null=True, default=None, validators=[Range(min_=1)]),
Str("args", default="", max_length=None),
Bool("enabled", default=True),
]
| 2,108 | Python | .py | 49 | 36.979592 | 91 | 0.646199 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,545 | ssh_connections.py | truenas_middleware/src/middlewared/middlewared/plugins/keychain_/ssh_connections.py | from middlewared.api import api_method
from middlewared.api.current import KeychainCredentialSetupSSHConnectionArgs, KeychainCredentialSetupSSHConnectionResult
from middlewared.service import Service, ValidationErrors
class KeychainCredentialService(Service):
@api_method(KeychainCredentialSetupSSHConnectionArgs, KeychainCredentialSetupSSHConnectionResult,
roles=['KEYCHAIN_CREDENTIAL_WRITE'])
async def setup_ssh_connection(self, options):
"""
Creates a SSH Connection performing the following steps:
1) Generating SSH Key Pair if required
2) Setting up SSH Credentials based on `setup_type`
In case (2) fails, it will be ensured that SSH Key Pair generated ( if applicable ) in the process is
removed.
"""
verrors = ValidationErrors()
pkey_config = options['private_key']
schema_name = 'setup_ssh_connection'
if pkey_config['generate_key']:
if pkey_config.get('existing_key_id'):
verrors.add(
f'{schema_name}.private_key.existing_key_id', 'Should not be specified when "generate_key" is set'
)
if not pkey_config.get('name'):
verrors.add(f'{schema_name}.private_key.name', 'Must be set when SSH Key pair is to be generated')
elif await self.middleware.call('keychaincredential.query', [['name', '=', pkey_config['name']]]):
verrors.add(f'{schema_name}.private_key.name', 'Is already in use by another SSH Key pair')
else:
if not pkey_config.get('existing_key_id'):
verrors.add(
f'{schema_name}.private_key.existing_key_id',
'Must be specified when SSH Key pair is not to be generated'
)
elif not await self.middleware.call(
'keychaincredential.query', [['id', '=', pkey_config['existing_key_id']]]
):
verrors.add(f'{schema_name}.private_key.existing_key_id', 'SSH Key Pair not found')
mapping = {'SEMI-AUTOMATIC': 'semi_automatic_setup', 'MANUAL': 'manual_setup'}
for setup_type, opposite_type in filter(
lambda x: x[0] == options['setup_type'], [['SEMI-AUTOMATIC', 'MANUAL'], ['MANUAL', 'SEMI-AUTOMATIC']]
):
if not options[mapping[setup_type]]:
verrors.add(f'{schema_name}.{mapping[setup_type]}', f'Must be specified for {setup_type!r} setup')
if options[mapping[opposite_type]]:
verrors.add(
f'{schema_name}.{mapping[opposite_type]}', f'Must not be specified for {setup_type!r} setup'
)
if await self.middleware.call('keychaincredential.query', [['name', '=', options['connection_name']]]):
verrors.add(f'{schema_name}.connection_name', 'Is already in use by another Keychain Credential')
verrors.check()
# We are going to generate a SSH Key pair now if required
if pkey_config['generate_key']:
key_config = await self.middleware.call('keychaincredential.generate_ssh_key_pair')
ssh_key_pair = await self.middleware.call('keychaincredential.create', {
'name': pkey_config['name'],
'type': 'SSH_KEY_PAIR',
'attributes': key_config,
})
else:
ssh_key_pair = await self.middleware.call('keychaincredential.get_instance', pkey_config['existing_key_id'])
try:
if options['setup_type'] == 'SEMI-AUTOMATIC':
resp = await self.middleware.call(
'keychaincredential.remote_ssh_semiautomatic_setup', {
**options['semi_automatic_setup'],
'private_key': ssh_key_pair['id'],
'name': options['connection_name'],
}
)
else:
resp = await self.middleware.call(
'keychaincredential.create', {
'type': 'SSH_CREDENTIALS',
'name': options['connection_name'],
'attributes': {
**options['manual_setup'],
'private_key': ssh_key_pair['id'],
}
}
)
except Exception:
if pkey_config['generate_key']:
await self.middleware.call('keychaincredential.delete', ssh_key_pair['id'])
raise
else:
return resp
| 4,609 | Python | .py | 85 | 39.882353 | 120 | 0.573992 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,546 | task_retention.py | truenas_middleware/src/middlewared/middlewared/plugins/snapshot_/task_retention.py | from collections import defaultdict
from middlewared.schema import Dict, Int, Patch, returns
from middlewared.service import accepts, item_method, Service
class PeriodicSnapshotTaskService(Service):
class Config:
namespace = "pool.snapshottask"
@item_method
@accepts(
Int("id"),
Patch(
"periodic_snapshot_create",
"periodic_snapshot_update_will_change_retention",
("attr", {"update": True}),
),
)
@returns(Dict("snapshots", additional_attrs=True))
async def update_will_change_retention_for(self, id_, data):
"""
Returns a list of snapshots which will change the retention if periodic snapshot task `id` is updated
with `data`.
"""
old = await self.middleware.call("pool.snapshottask.get_instance", id_)
new = dict(old, **data)
result = defaultdict(list)
if old != new:
old_snapshots = await self.middleware.call("zettarepl.periodic_snapshot_task_snapshots", old)
new_snapshots = await self.middleware.call("zettarepl.periodic_snapshot_task_snapshots", new)
if diff := old_snapshots - new_snapshots:
for snapshot in sorted(diff):
dataset, snapshot = snapshot.split("@", 1)
result[dataset].append(snapshot)
return result
@item_method
@accepts(
Int("id"),
)
@returns(Dict("snapshots", additional_attrs=True))
async def delete_will_change_retention_for(self, id_):
"""
Returns a list of snapshots which will change the retention if periodic snapshot task `id` is deleted.
"""
task = await self.middleware.call("pool.snapshottask.get_instance", id_)
result = defaultdict(list)
snapshots = await self.middleware.call("zettarepl.periodic_snapshot_task_snapshots", task)
for snapshot in sorted(snapshots):
dataset, snapshot = snapshot.split("@", 1)
result[dataset].append(snapshot)
return result
| 2,073 | Python | .py | 48 | 34.1875 | 110 | 0.640338 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,547 | removal_date.py | truenas_middleware/src/middlewared/middlewared/plugins/snapshot_/removal_date.py | import hashlib
from middlewared.service import Service, job, private
class PeriodicSnapshotTaskService(Service):
class Config:
namespace = "pool.snapshottask"
@private
async def removal_date_property(self):
host_id = await self.middleware.call("system.host_id")
return f"org.truenas:destroy_at_{host_id[:8]}"
@private
@job(
# Lock by pool name
lock=lambda args: "pool.snapshottask.fixate_removal_date:" + (list(args[0].keys()) + ['-'])[0].split('/')[0],
)
async def fixate_removal_date(self, job, datasets, task):
await self.middleware.call("zettarepl.fixate_removal_date", datasets, task)
| 672 | Python | .py | 16 | 35.875 | 117 | 0.678462 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,548 | state.py | truenas_middleware/src/middlewared/middlewared/plugins/zettarepl_/state.py | from collections import defaultdict
import re
from truenas_api_client import json as ejson
from middlewared.service import periodic, Service
from middlewared.utils.service.task_state import TaskStateMixin
RE_REPLICATION_TASK_ID = re.compile(r"replication_task_([0-9]+)$")
class ZettareplService(Service, TaskStateMixin):
task_state_methods = ["replication.run"]
class Config:
private = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.state = {}
self.error = None
self.definition_errors = {}
self.hold_tasks = {}
self.last_snapshot = {}
self.serializable_state = defaultdict(dict)
def get_state(self):
if self.error:
return {
"error": self.error,
}
context = self._get_state_context()
return {
"tasks": {
task_id: self._get_task_state(task_id, context)
for task_id in self._known_tasks_ids()
}
}
def get_state_internal(self, task_id):
return self.state.get(task_id)
def _known_tasks_ids(self):
return set(self.state.keys()) | set(self.definition_errors.keys()) | set(self.hold_tasks.keys())
def _get_state_context(self):
return self.middleware.call_sync("zettarepl.get_task_state_context")
def _get_task_state(self, task_id, context):
if self.error:
return self.error
if task_id in self.definition_errors:
return self.definition_errors[task_id]
if task_id in self.hold_tasks:
return self.hold_tasks[task_id]
state = self.state.get(task_id, {}).copy()
if m := RE_REPLICATION_TASK_ID.match(task_id):
state["job"] = self.middleware.call_sync("zettarepl.get_task_state_job", context, int(m.group(1)))
state["last_snapshot"] = self.last_snapshot.get(task_id)
return state
def set_error(self, error):
old_error = self.error
self.error = error
if old_error != self.error:
for task_id in self._known_tasks_ids():
self._notify_state_change(task_id)
def set_definition_errors(self, definition_errors):
old_definition_errors = self.definition_errors
self.definition_errors = definition_errors
for task_id in set(old_definition_errors.keys()) | set(self.definition_errors.keys()):
self._notify_state_change(task_id)
def notify_definition(self, definition, hold_tasks):
old_hold_tasks = self.hold_tasks
self.hold_tasks = hold_tasks
for task_id in set(old_hold_tasks.keys()) | set(self.hold_tasks.keys()):
self._notify_state_change(task_id)
task_ids = (
{f"periodic_snapshot_{k}" for k in definition["periodic-snapshot-tasks"]} |
{f"replication_{k}" for k in definition["replication-tasks"]} |
set(hold_tasks.keys())
)
for task_id in list(self.state.keys()):
if task_id not in task_ids:
self.state.pop(task_id, None)
for task_id in list(self.last_snapshot.keys()):
if task_id not in task_ids:
self.last_snapshot.pop(task_id, None)
for task_id in list(self.serializable_state.keys()):
if f"replication_task_{task_id}" not in task_ids:
self.serializable_state.pop(task_id, None)
def get_internal_task_state(self, task_id):
return self.state[task_id]
def set_state(self, task_id, state):
self.state[task_id] = state
if task_id.startswith("replication_task_"):
if state["state"] in ("ERROR", "FINISHED"):
self.serializable_state[int(task_id.split("_")[-1])]["state"] = state
self._notify_state_change(task_id)
def set_last_snapshot(self, task_id, last_snapshot):
self.last_snapshot[task_id] = last_snapshot
if task_id.startswith("replication_task_"):
self.serializable_state[int(task_id.split("_")[-1])]["last_snapshot"] = last_snapshot
self._notify_state_change(task_id)
def _notify_state_change(self, task_id):
state = self._get_task_state(task_id, self._get_state_context())
self.middleware.call_hook_sync("zettarepl.state_change", id_=task_id, fields=state)
async def load_state(self):
for replication in await self.middleware.call("datastore.query", "storage.replication"):
state = ejson.loads(replication["repl_state"])
if "last_snapshot" in state:
self.last_snapshot[f"replication_task_{replication['id']}"] = state["last_snapshot"]
if "state" in state:
self.state[f"replication_task_{replication['id']}"] = state["state"]
@periodic(3600)
async def flush_state(self):
for task_id, state in self.serializable_state.items():
try:
await self.middleware.call("datastore.update", "storage.replication", task_id,
{"repl_state": ejson.dumps(state)})
except RuntimeError:
pass
| 5,200 | Python | .py | 109 | 37.440367 | 110 | 0.611946 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,549 | util.py | truenas_middleware/src/middlewared/middlewared/plugins/zettarepl_/util.py | from types import SimpleNamespace
from zettarepl.replication.task.dataset import get_source_dataset_base, get_target_dataset
from middlewared.service import Service
class ZettareplService(Service):
class Config:
private = True
async def reverse_source_target_datasets(self, source_datasets, target_dataset):
if len(source_datasets) == 1:
return [target_dataset], source_datasets[0]
else:
replication_task = SimpleNamespace(source_datasets=source_datasets, target_dataset=target_dataset)
return (
[
get_target_dataset(replication_task, source_dataset)
for source_dataset in source_datasets
],
get_source_dataset_base(replication_task)
)
| 810 | Python | .py | 18 | 34.333333 | 110 | 0.657761 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,550 | snapshot_create.py | truenas_middleware/src/middlewared/middlewared/plugins/zettarepl_/snapshot_create.py | from middlewared.service import Service
from zettarepl.snapshot.create import create_snapshot
from zettarepl.snapshot.snapshot import Snapshot
from zettarepl.transport.local import LocalShell
class ZettareplService(Service):
def create_recursive_snapshot_with_exclude(self, dataset, snapshot, exclude):
create_snapshot(LocalShell(), Snapshot(dataset, snapshot), True, exclude, {})
| 396 | Python | .py | 7 | 53.428571 | 85 | 0.821244 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,551 | snapshot_removal_date.py | truenas_middleware/src/middlewared/middlewared/plugins/zettarepl_/snapshot_removal_date.py | from collections import defaultdict
import subprocess
from dateutil.tz import tzlocal
import isodate
from middlewared.service import Service
from middlewared.utils.time_utils import utc_now
from zettarepl.snapshot.list import list_snapshots
from zettarepl.snapshot.name import parse_snapshot_name
from zettarepl.snapshot.task.snapshot_owner import PeriodicSnapshotTaskSnapshotOwner
from zettarepl.snapshot.task.task import PeriodicSnapshotTask
from zettarepl.transport.local import LocalShell
class ZettareplService(Service):
removal_dates = defaultdict(dict)
removal_dates_loaded = False
def load_removal_dates(self, pool=None):
property_name = self.middleware.call_sync("pool.snapshottask.removal_date_property")
cmd = ["zfs", "list", "-t", "snapshot", "-H", "-o", f"name,{property_name}"]
if pool is not None:
cmd.extend(["-r", pool])
removal_dates = self.removal_dates.copy()
removal_dates[pool] = {}
else:
removal_dates = defaultdict(dict)
for snapshot, destroy_at in map(lambda s: s.split("\t", 1), subprocess.run(
cmd, check=True, capture_output=True, encoding="utf-8", errors="ignore",
).stdout.splitlines()):
if destroy_at == "-":
continue
snapshot_pool = snapshot.split("/")[0]
try:
destroy_at = isodate.parse_datetime(destroy_at)
except Exception as e:
self.middleware.logger.warning("Error parsing snapshot %r %s: %r", snapshot, property_name, e)
continue
removal_dates[snapshot_pool][snapshot] = destroy_at
self.removal_dates = removal_dates
self.removal_dates_loaded = True
def get_removal_dates(self):
if not self.removal_dates_loaded:
return None
return dict(sum([list(d.items()) for d in self.removal_dates.values()], []))
def periodic_snapshot_task_snapshots(self, task):
snapshots = list_snapshots(LocalShell(), task["dataset"], task["recursive"])
zettarepl_task = PeriodicSnapshotTask.from_data(None, self.middleware.call_sync(
"zettarepl.periodic_snapshot_task_definition", task,
))
snapshot_owner = PeriodicSnapshotTaskSnapshotOwner(utc_now(), zettarepl_task)
task_snapshots = set()
for snapshot in snapshots:
if snapshot_owner.owns_dataset(snapshot.dataset):
try:
parsed_snapshot_name = parse_snapshot_name(snapshot.name, task["naming_schema"])
except ValueError:
pass
else:
if snapshot_owner.owns_snapshot(snapshot.dataset, parsed_snapshot_name):
task_snapshots.add(str(snapshot))
return task_snapshots
def fixate_removal_date(self, datasets, task):
property_name = self.middleware.call_sync("pool.snapshottask.removal_date_property")
zettarepl_task = PeriodicSnapshotTask.from_data(None, self.middleware.call_sync(
"zettarepl.periodic_snapshot_task_definition", task,
))
for dataset, snapshots in datasets.items():
for snapshot in snapshots:
try:
parsed_snapshot_name = parse_snapshot_name(snapshot, task["naming_schema"])
except ValueError as e:
self.middleware.logger.error("Unexpected error parsing snapshot name %r with naming schema %r: %r",
snapshot, task["naming_schema"], e)
else:
destroy_at = parsed_snapshot_name.datetime + zettarepl_task.lifetime
k1 = dataset.split("/")[0]
k2 = f"{dataset}@{snapshot}"
existing_destroy_at = self.removal_dates.get(k1, {}).get(k2)
if existing_destroy_at is not None and existing_destroy_at >= destroy_at:
continue
try:
subprocess.run(
["zfs", "set", f"{property_name}={destroy_at.isoformat()}", f"{dataset}@{snapshot}"],
check=True, capture_output=True, encoding="utf-8", errors="ignore",
)
except subprocess.CalledProcessError as e:
self.middleware.logger.warning("Error setting snapshot %s@%s removal date: %r", dataset,
snapshot, e.stderr)
else:
self.removal_dates[k1][k2] = destroy_at
def annotate_snapshots(self, snapshots):
property_name = self.middleware.call_sync("pool.snapshottask.removal_date_property")
zettarepl_tasks = [
PeriodicSnapshotTask.from_data(task["id"], self.middleware.call_sync(
"zettarepl.periodic_snapshot_task_definition", task,
))
for task in self.middleware.call_sync("pool.snapshottask.query", [["enabled", "=", True]])
]
snapshot_owners = [
PeriodicSnapshotTaskSnapshotOwner(utc_now(), zettarepl_task)
for zettarepl_task in zettarepl_tasks
]
for snapshot in snapshots:
task_destroy_at = None
task_destroy_at_id = None
for snapshot_owner in snapshot_owners:
if snapshot_owner.owns_dataset(snapshot["dataset"]):
try:
parsed_snapshot_name = parse_snapshot_name(
snapshot["snapshot_name"], snapshot_owner.periodic_snapshot_task.naming_schema
)
except ValueError:
pass
else:
if snapshot_owner.owns_snapshot(snapshot["dataset"], parsed_snapshot_name):
destroy_at = parsed_snapshot_name.datetime + snapshot_owner.periodic_snapshot_task.lifetime
if task_destroy_at is None or task_destroy_at < destroy_at:
task_destroy_at = destroy_at
task_destroy_at_id = snapshot_owner.periodic_snapshot_task.id
property_destroy_at = None
if property_name in snapshot["properties"]:
try:
property_destroy_at = isodate.parse_datetime(snapshot["properties"][property_name]["value"])
except Exception as e:
self.middleware.logger.warning("Error parsing snapshot %r %s: %r", snapshot["name"], property_name,
e)
if task_destroy_at is not None and property_destroy_at is not None:
if task_destroy_at < property_destroy_at:
task_destroy_at = None
else:
property_destroy_at = None
if task_destroy_at is not None:
snapshot["retention"] = {
"datetime": task_destroy_at.replace(tzinfo=tzlocal()),
"source": "periodic_snapshot_task",
"periodic_snapshot_task_id": task_destroy_at_id,
}
elif property_destroy_at is not None:
snapshot["retention"] = {
"datetime": property_destroy_at.replace(tzinfo=tzlocal()),
"source": "property",
}
else:
snapshot["retention"] = None
return snapshots
async def pool_configuration_change(middleware, *args, **kwargs):
middleware.create_task(middleware.call("zettarepl.load_removal_dates"))
async def setup(middleware):
middleware.create_task(middleware.call("zettarepl.load_removal_dates"))
middleware.register_hook("pool.post_import", pool_configuration_change)
| 7,939 | Python | .py | 147 | 38.877551 | 119 | 0.582066 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,552 | vendor.py | truenas_middleware/src/middlewared/middlewared/plugins/system_vendor/vendor.py | import json
import os
from middlewared.api import api_method
from middlewared.api.current import (
VendorNameArgs, VendorNameResult, UnvendorArgs, UnvendorResult, IsVendoredArgs, IsVendoredResult
)
from middlewared.service import Service
SENTINEL_FILE_PATH = '/data/.vendor'
def get_vendor() -> str | None:
with open(SENTINEL_FILE_PATH, 'r') as file:
return json.load(file).get('name') or None # Don't return an empty string.
class VendorService(Service):
class Config:
namespace = 'system.vendor'
cli_private = True
@api_method(VendorNameArgs, VendorNameResult, private=True)
def name(self) -> str | None:
try:
return get_vendor()
except FileNotFoundError:
pass
except json.JSONDecodeError:
self.logger.exception('Can\'t retrieve vendor name: %r is not proper JSON format', SENTINEL_FILE_PATH)
except Exception:
self.logger.exception('Unexpected error while reading %r', SENTINEL_FILE_PATH)
@api_method(UnvendorArgs, UnvendorResult, private=True)
def unvendor(self):
try:
os.remove(SENTINEL_FILE_PATH)
except FileNotFoundError:
pass
except Exception:
self.logger.exception('Unexpected error attempting to remove %r', SENTINEL_FILE_PATH)
self.middleware.call_sync('etc.generate', 'grub')
@api_method(IsVendoredArgs, IsVendoredResult, private=True)
def is_vendored(self):
return os.path.isfile(SENTINEL_FILE_PATH)
| 1,542 | Python | .py | 37 | 34.513514 | 114 | 0.693905 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,553 | hierarchy.py | truenas_middleware/src/middlewared/middlewared/plugins/system_dataset/hierarchy.py | import os
from .utils import SYSDATASET_PATH
SYSTEM_DATASET_JSON_SCHEMA = {
'$schema': 'http://json-schema.org/draft-07/schema#',
'description': 'Schema for the output of get_system_dataset_spec function',
'type': 'array',
'items': {
'type': 'object',
'properties': {
'name': {
'type': 'string'
},
'props': {
'type': 'object',
'properties': {
'mountpoint': {
'type': 'string',
'const': 'legacy'
},
'readonly': {
'type': 'string',
'const': 'off'
},
'snapdir': {
'type': 'string',
'const': 'hidden'
},
'canmount': {'type': 'string'},
},
'required': ['mountpoint', 'readonly', 'snapdir'],
},
'chown_config': {
'type': 'object',
'properties': {
'uid': {'type': 'integer'},
'gid': {'type': 'integer'},
'mode': {'type': 'integer'},
},
'required': ['uid', 'gid', 'mode'],
},
'mountpoint': {
'type': 'string'
},
'create_paths': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'path': {'type': 'string'},
'uid': {'type': 'integer'},
'gid': {'type': 'integer'}
},
'required': ['path', 'uid', 'gid']
}
},
'post_mount_actions': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'method': {'type': 'string'},
'args': {
'type': 'array',
'items': {'type': 'string'},
},
},
'required': ['method']
}
},
},
'required': ['name', 'props', 'chown_config'],
'additionalProperties': False,
}
}
def get_system_dataset_spec(pool_name: str, uuid: str) -> list:
return [
{
'name': os.path.join(pool_name, '.system'),
'props': {
'mountpoint': 'legacy',
'readonly': 'off',
'snapdir': 'hidden',
},
'mountpoint': SYSDATASET_PATH,
'chown_config': {
'uid': 0,
'gid': 0,
'mode': 0o755,
},
},
{
'name': os.path.join(pool_name, '.system/cores'),
'props': {
'mountpoint': 'legacy',
'readonly': 'off',
'snapdir': 'hidden',
},
'chown_config': {
'uid': 0,
'gid': 0,
'mode': 0o775,
},
},
{
'name': os.path.join(pool_name, '.system/nfs'),
'props': {
'mountpoint': 'legacy',
'readonly': 'off',
'snapdir': 'hidden',
},
'chown_config': {
'uid': 0,
'gid': 0,
'mode': 0o755,
},
'post_mount_actions': [
{
'method': 'nfs.setup_directories',
'args': [],
}
]
},
{
'name': os.path.join(pool_name, '.system/samba4'),
'props': {
'mountpoint': 'legacy',
'readonly': 'off',
'snapdir': 'hidden',
},
'chown_config': {
'uid': 0,
'gid': 0,
'mode': 0o755,
},
},
{
'name': os.path.join(pool_name, f'.system/configs-{uuid}'),
'props': {
'mountpoint': 'legacy',
'readonly': 'off',
'snapdir': 'hidden',
},
'chown_config': {
'uid': 0,
'gid': 0,
'mode': 0o755,
},
},
{
'name': os.path.join(pool_name, f'.system/netdata-{uuid}'),
'props': {
'mountpoint': 'legacy',
'readonly': 'off',
'snapdir': 'hidden',
'canmount': 'noauto',
},
'chown_config': {
'uid': 999,
'gid': 997,
'mode': 0o755,
},
'mountpoint': os.path.join(SYSDATASET_PATH, 'netdata'),
'create_paths': [
{'path': '/var/log/netdata', 'uid': 999, 'gid': 997},
],
'post_mount_actions': [
{
'method': 'reporting.post_dataset_mount_action',
'args': [],
}
]
},
]
| 5,360 | Python | .py | 173 | 15.641618 | 79 | 0.320726 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,554 | ipa_join_mixin.py | truenas_middleware/src/middlewared/middlewared/plugins/directoryservices_/ipa_join_mixin.py | import base64
import json
import os
import subprocess
from dataclasses import asdict
from functools import cache
from middlewared.job import Job
from middlewared.plugins.ldap_.constants import SERVER_TYPE_FREEIPA
from middlewared.utils.directoryservices import (
ipa, ipa_constants
)
from middlewared.utils.directoryservices.constants import DSType
from middlewared.utils.directoryservices.ipactl_constants import (
ExitCode,
IpaOperation,
)
from middlewared.utils.directoryservices.krb5 import kerberos_ticket, ktutil_list_impl
from middlewared.utils.directoryservices.krb5_error import KRB5ErrCode, KRB5Error
from middlewared.utils.lang import undefined
from middlewared.service_exception import CallError
from tempfile import NamedTemporaryFile
IPACTL = ipa_constants.IPACmd.IPACTL.value
def _parse_ipa_response(resp: subprocess.CompletedProcess) -> dict:
"""
ipactl returns JSON-encoded data and depending on failure
code may also include JSON-RPC response error message from
IPA server.
"""
match resp.returncode:
case ExitCode.SUCCESS:
return json.loads(resp.stdout.decode().strip())
case ExitCode.JSON_ERROR:
err = resp.stderr.decode().strip()
err_decoded = json.loads(err)
raise CallError(err, extra=err_decoded)
case ExitCode.NO_SMB_SUPPORT:
err = resp.stderr.decode().strip()
raise FileNotFoundError(err)
case _:
err = resp.stderr or resp.stdout
raise RuntimeError(f'{resp.returncode}: {err.decode()}')
class IPAJoinMixin:
__ipa_smb_domain = undefined
def _ipa_remove_kerberos_cert_config(self, job: Job | None, ldap_config: dict | None):
if ldap_config is None:
ldap_config = self.middleware.call_sync('ldap.config')
if job:
job.set_progress(80, 'Removing kerberos configuration.')
if ldap_config['kerberos_realm']:
self.middleware.call_sync('kerberos.realm.delete', ldap_config['kerberos_realm'])
if (host_kt := self.middleware.call_sync('kerberos.keytab.query', [
['name', '=', ipa_constants.IpaConfigName.IPA_HOST_KEYTAB.value]
])):
self.middleware.call_sync('kerberos.keytab.delete', host_kt[0]['id'])
if job:
job.set_progress(90, 'Removing IPA certificate.')
if (ipa_cert := self.middleware.call_sync('certificateauthority.query', [
['name', '=', ipa_constants.IpaConfigName.IPA_CACERT.value]
])):
self.middleware.call_sync('certificateauthority.delete', ipa_cert[0]['id'])
def _ipa_leave(self, job: Job, ds_type: DSType, domain: str):
"""
Leave the IPA domain
"""
ldap_config = self.middleware.call_sync('ldap.config')
ipa_config = self.middleware.call_sync('ldap.ipa_config', ldap_config)
if ipa_config['domain'] != domain:
raise CallError(f'{domain}: TrueNAS is joined to {ipa_config["domain"]}')
job.set_progress(0, 'Deleting NFS and SMB service principals.')
self._ipa_del_spn()
job.set_progress(10, 'Removing DNS entries.')
self.unregister_dns(ipa_config['host'], False)
# now leave IPA
job.set_progress(30, 'Leaving IPA domain.')
try:
join = subprocess.run([
IPACTL, '-a', IpaOperation.LEAVE.name
], check=False, capture_output=True)
_parse_ipa_response(join)
except Exception:
self.logger.warning(
'Failed to disable TrueNAS machine account in the IPA domain. '
'Further action by the IPA administrator to fully remove '
'the server from the domain will be required.', exc_info=True
)
# At this point we can start removing local configuration
job.set_progress(50, 'Disabling LDAP service.')
# This disables the LDAP service and cancels any in progress cache
# jobs
ldap_update_job = self.middleware.call_sync('ldap.update', {
'binddn': '',
'bindpw': '',
'kerberos_principal': '',
'kerberos_realm': None,
'enable': False,
})
ldap_update_job.wait_sync()
self._ipa_remove_kerberos_cert_config(job, ldap_config)
job.set_progress(95, 'Removing privileges.')
if (priv := self.middleware.call_sync('privilege.query', [
['name', '=', ipa_config['domain'].upper()]
])):
self.middleware.call_sync('privilege.delete', priv[0]['id'])
job.set_progress(100, 'IPA leave complete.')
def _ipa_activate(self) -> None:
for etc_file in DSType.IPA.etc_files:
self.middleware.call_sync('etc.generate', etc_file)
self.middleware.call_sync('service.stop', 'sssd')
self.middleware.call_sync('service.start', 'sssd', {'silent': False})
self.middleware.call_sync('kerberos.start')
def _ipa_insert_keytab(self, service: ipa_constants.IpaConfigName, keytab_data: str) -> None:
""" Insert a keytab into the TrueNAS config (replacing existing) """
if service is ipa_constants.IpaConfigName.IPA_CACERT:
raise ValueError('Not a keytab file')
kt_name = service.value
if kt_entry := self.middleware.call_sync('kerberos.keytab.query', [
['name', '=', kt_name]
]):
self.middleware.call_sync(
'datastore.update', 'directoryservice.kerberoskeytab',
kt_entry[0]['id'],
{'keytab_name': kt_name, 'keytab_file': keytab_data}
)
else:
self.middleware.call_sync(
'datastore.insert', 'directoryservice.kerberoskeytab',
{'keytab_name': kt_name, 'keytab_file': keytab_data}
)
def _ipa_grant_privileges(self) -> None:
""" Grant domain admins ability to manage TrueNAS """
ipa_config = self.middleware.call_sync('ldap.ipa_config')
existing_privileges = self.middleware.call_sync(
'privilege.query',
[["name", "=", ipa_config['domain'].upper()]]
)
if existing_privileges:
return
try:
admins_grp = self.middleware.call_sync('group.get_group_obj', {
'groupname': 'admins',
'sid_info': True
})
except Exception:
self.logger.debug(
'Failed to look up admins group for IPA domain. API access for admin '
'accounts will have to be manually configured', exc_info=True
)
return
match admins_grp['source']:
case 'LDAP':
pass
case 'LOCAL':
self.logger.warning(
'Local "admins" group collides with name of group provided '
'by IPA domain, which prevents the IPA group from being '
'automatically granted API access.'
)
return
case _:
self.logger.warning(
'%s: unexpected source for "admins" group, which prevents '
'the IPA group from being automatically granted API access.',
admins_grp['source']
)
return
try:
self.middleware.call_sync('privilege.create', {
'name': ipa_config['domain'].upper(),
'ds_groups': [admins_grp['gr_gid']],
'roles': ['FULL_ADMIN'],
'web_shell': True
})
except Exception:
# This should be non-fatal since admin can simply fix via
# our webui
self.logger.warning(
'Failed to grant domain administrators access to the '
'TrueNAS API.', exc_info=True
)
@kerberos_ticket
def _ipa_test_join(self, dstype, domain):
"""
Rudimentary check for whether we've already joined IPA domain
This allows us to force a re-join if user has deleted relevant
config information.
"""
ldap_conf = self.middleware.call_sync('ldap.config')
if ldap_conf['server_type'] != SERVER_TYPE_FREEIPA:
return False
if not ldap_conf['kerberos_realm']:
return False
if not self.middleware.call_sync('ldap.has_ipa_host_keytab'):
return False
ipa_config = self.middleware.call_sync('ldap.ipa_config', ldap_conf)
return ipa_config['domain'].casefold() == domain.casefold()
@kerberos_ticket
def _ipa_set_spn(self):
""" internal method to create service entries on remote IPA server """
output = []
for op, spn_type in (
(IpaOperation.SET_SMB_PRINCIPAL, ipa_constants.IpaConfigName.IPA_SMB_KEYTAB),
(IpaOperation.SET_NFS_PRINCIPAL, ipa_constants.IpaConfigName.IPA_NFS_KEYTAB)
):
setspn = subprocess.run([IPACTL, '-a', op.name], check=False, capture_output=True)
try:
resp = _parse_ipa_response(setspn)
output.append(resp | {'keytab_type': spn_type})
except FileNotFoundError:
self.logger.debug('IPA domain does not provide support for SMB protocol')
continue
except Exception:
self.logger.error('%s: failed to create keytab', op.name, exc_info=True)
return output
@kerberos_ticket
def _ipa_del_spn(self):
""" internal method to delete service principals on remote IPA server
Perform remote operation and then delete keytab from datastore. At this point
the host keytab is not deleted because we need it to remove our DNS entries.
"""
for op, spn_type in (
(IpaOperation.DEL_SMB_PRINCIPAL, ipa_constants.IpaConfigName.IPA_SMB_KEYTAB),
(IpaOperation.DEL_NFS_PRINCIPAL, ipa_constants.IpaConfigName.IPA_NFS_KEYTAB)
):
setspn = subprocess.run([IPACTL, '-a', op.name], check=False, capture_output=True)
try:
_parse_ipa_response(setspn)
except Exception:
self.logger.warning('%s: failed to remove service principal from remote IPA server.',
op.name, exc_info=True)
if (kt := self.middleware.call_sync('kerberos.keytab.query', [['name', '=', spn_type]])):
self.middleware.call_sync('kerberos.keytab.delete', kt[0]['id'])
@kerberos_ticket
def _ipa_setup_services(self, job: Job):
job.set_progress(60, 'Configuring kerberos principals')
resp = self._ipa_set_spn()
domain_info = None
for entry in resp:
self._ipa_insert_keytab(entry['keytab_type'], entry['keytab'])
if entry['keytab_type'] is ipa_constants.IpaConfigName.IPA_SMB_KEYTAB:
domain_info = entry['domain_info'][0]
password = entry['password']
if domain_info:
job.set_progress(70, 'Configuring SMB server for IPA')
self.middleware.call_sync('datastore.update', 'services.cifs', 1, {
'cifs_srv_workgroup': domain_info['netbios_name']
})
# regenerate our SMB config to apply our new domain
self.middleware.call_sync('etc.generate', 'smb')
# write our domain sid to the secrets.tdb
setsid = subprocess.run([
'net', 'setdomainsid', domain_info['domain_sid']
], capture_output=True, check=False)
if setsid.returncode:
raise CallError(f'Failed to set domain SID: {setsid.stderr.decode()}')
# We must write the password encoded in the SMB keytab
# to secrets.tdb at this point.
self.middleware.call_sync(
'directoryservices.secrets.set_ipa_secret',
domain_info['netbios_name'],
base64.b64encode(password.encode())
)
self.middleware.call_sync('directoryservices.secrets.backup')
@kerberos_ticket
def ipa_get_smb_domain_info(self) -> dict | None:
"""
This information shouldn't change during normal course of
operations in a FreeIPA domain. Cache a copy of it for future
reference.
There are three possible states for this.
1. we've never checked before. In this case __ipa_smb_domain will be an
`undefined` object
2. we've checked but the IPA LDAP schema contains no SMB-related information
for some reason. In this case __ipa_smb_domain will be set to None
3. we've checked and have SMB domain info in which case we've stored an
IPASmbDomain instance and return it in dictionary form
"""
if self.__ipa_smb_domain is None:
return None
elif self.__ipa_smb_domain is not undefined:
return asdict(self.__ipa_smb_domain)
if self.middleware.call_sync('directoryservices.status')['type'] != DSType.IPA.value:
raise CallError('Not joined to IPA domain')
getdom = subprocess.run([
IPACTL, '-a', IpaOperation.SMB_DOMAIN_INFO.name,
], check=False, capture_output=True)
resp = _parse_ipa_response(getdom)
if not resp:
self.__ipa_smb_domain = None
return None
self.__ipa_smb_domain = ipa_constants.IPASmbDomain(
netbios_name=resp[0]['netbios_name'],
domain_sid=resp[0]['domain_sid'],
domain_name=resp[0]['domain_name'],
range_id_min=resp[0]['range_id_min'],
range_id_max=resp[0]['range_id_max']
)
return asdict(self.__ipa_smb_domain)
@cache
def _ipa_get_cacert(self) -> str:
""" retrieve PEM-encoded CACERT from IPA LDAP server """
getca = subprocess.run([
IPACTL, '-a', IpaOperation.GET_CACERT_FROM_LDAP.name,
], check=False, capture_output=True)
resp = _parse_ipa_response(getca)
return resp['cacert']
def _ipa_join_impl(self, host: str, basedn: str, domain: str, realm: str, server: str) -> dict:
"""
Write the IPA default config file (preliminary step to getting our cacert).
Then obtain the ipa cacert and write it in /etc/ipa (where tools expect to find it).
This allows us to call ipa-join successfully using the kerberos ticket
we already have (checked when _ipa_join() is called).
Add the cacert to the JSON-RPC response to the ipa-join request so that
caller can insert into our DB. If this fails we should remove the config
files we wrote so that we don't end up in semi-configured state.
"""
# First write our freeipa config (this allows us to get our cert)
try:
ipa.write_ipa_default_config(host, basedn, domain, realm, server)
ipa_cacert = self._ipa_get_cacert()
ipa.write_ipa_cacert(ipa_cacert.encode())
# Now we should be able to join
join = subprocess.run([
IPACTL, '-a', IpaOperation.JOIN.name
], check=False, capture_output=True)
resp = _parse_ipa_response(join)
resp['cacert'] = ipa_cacert
except Exception as e:
for p in (
ipa_constants.IPAPath.DEFAULTCONF.path,
ipa_constants.IPAPath.CACERT.path
):
try:
os.remove(p)
except FileNotFoundError:
pass
raise e
return resp
@kerberos_ticket
def _ipa_join(self, job: Job, ds_type: DSType, domain: str):
"""
This method performs all the steps required to join TrueNAS to an
IPA domain and update our TrueNAS configuration with details gleaned
from the IPA domain settings. Once it is completed we will:
1. have created host account in IPA domain
2. registered our IP addresses in IPA
3. stored up to three keytabs on TrueNAS (host, nfs, and smb)
4. stored the IPA cacert on TrueNAS
5. updated samba's secrets.tdb to contain the info from SMB keytab
6. backed up samba's secrets.tdb
"""
ldap_config = self.middleware.call_sync('ldap.config')
ipa_config = self.middleware.call_sync('ldap.ipa_config', ldap_config)
self.__ipa_smb_domain = undefined
job.set_progress(15, 'Performing IPA join')
resp = self._ipa_join_impl(
ipa_config['host'],
ipa_config['basedn'],
ipa_config['domain'],
ipa_config['realm'],
ipa_config['target_server']
)
# resp includes `cacert` for domain and `keytab` for our host principal to use
# in future.
# insert the IPA host principal keytab into our database
job.set_progress(50, 'Updating TrueNAS configuration with IPA domain details.')
self._ipa_insert_keytab(ipa_constants.IpaConfigName.IPA_HOST_KEYTAB, resp['keytab'])
# make sure database also has the IPA realm
ipa_realm = self.middleware.call_sync('kerberos.realm.query', [
['realm', '=', ipa_config['realm']]
])
if ipa_realm:
ipa_realm_id = ipa_realm[0]['id']
else:
ipa_realm_id = self.middleware.call_sync(
'datastore.insert', 'directoryservice.kerberosrealm',
{'krb_realm': ipa_config['realm']}
)
with NamedTemporaryFile() as f:
f.write(base64.b64decode(resp['keytab']))
f.flush()
krb_principal = ktutil_list_impl(f.name)[0]['principal']
# update our cacerts with IPA domain one:
existing_cacert = self.middleware.call_sync('certificateauthority.query', [
['name', '=', ipa_constants.IpaConfigName.IPA_CACERT.value]
])
if existing_cacert:
if existing_cacert[0]['certificate'] != resp['cacert']:
# We'll continue to try joining the IPA domain and hope for the best.
# It's technically possible that we will still be able to validate
# the cert / have working SSL.
self.logger.error(
'[%s]: Stored CA certificate for IPA domain does not match '
'certificate returned from the IPA LDAP server. This may '
'prevent the IPA directory service from properly functioning '
'and should be resolved by the TrueNAS administrator. '
'An example of such adminstrative action would be to remove '
'the possibly incorrect CA certificate from the TrueNAS '
'server and re-join the IPA domain to ensure the correct '
'CA certificate is installed after reviewing the issue with '
'the person or team responsible for maintaining the IPA domain.',
ipa_constants.IpaConfigName.IPA_CACERT.value
)
else:
self.middleware.call_sync('certificateauthority.create', {
'name': ipa_constants.IpaConfigName.IPA_CACERT.value,
'certificate': resp['cacert'],
'add_to_trusted_store': True,
'create_type': 'CA_CREATE_IMPORTED'
})
# make sure ldap service is updated to use realm and principal and
# clear out the bind account password since it is no longer needed. We
# don't insert the IPA cacert into the LDAP configuration since the
# certificate field is for certificate-based authentication and _not_
# providing certificate authority certificates
self.middleware.call_sync('datastore.update', 'directoryservice.ldap', ldap_config['id'], {
'ldap_kerberos_realm': ipa_realm_id,
'ldap_kerberos_principal': krb_principal,
'ldap_bindpw': ''
})
# We've joined API and have a proper host principal. Time to destroy admin keytab.
self.middleware.call_sync('kerberos.kdestroy')
# GSS-TSIG in IPA domain requires using our HOST kerberos principal
try:
self.middleware.call_sync('kerberos.start')
except KRB5Error as err:
match err.krb5_code:
case KRB5ErrCode.KRB5_REALM_UNKNOWN:
# DNS is broken in the IPA domain and so we need to roll back our config
# changes.
self._ipa_remove_kerberos_cert_config(None, None)
self.logger.warning(
'Unable to resolve kerberos realm via DNS. This may indicate misconfigured '
'nameservers on the TrueNAS server or a misconfigured IPA domain.', exc_info=True
)
self.middleware.call('datastore.update', 'directoryservice.ldap', ldap_config['id'], {
'ldap_kerberos_realm': None,
'ldap_kerberos_principal': '',
'ldap_bindpw': ldap_config['bindpw']
})
# remove any configuration files we have written
for p in (
ipa_constants.IPAPath.DEFAULTCONF.path,
ipa_constants.IPAPath.CACERT.path
):
try:
os.remove(p)
except FileNotFoundError:
pass
case _:
# Log the complete error message so that we have opportunity to improve error
# handling for weird kerberos errors.
self.logger.error('Failed to obtain kerberos ticket with host keytab.', exc_info=True)
raise err
# Verify that starting kerberos got the correct cred
cred = self.middleware.call_sync('kerberos.check_ticket')
if cred['name_type'] != 'KERBEROS_PRINCIPAL':
# This shouldn't happen, but we must fail here since the nsupdate will
# fail with REJECTED.
raise CallError(f'{cred}: unexpected kerberos credential type')
elif not cred['name'].startswith('host/'):
raise CallError(f'{cred}: not host principal.')
self.register_dns(ipa_config['host'])
self._ipa_setup_services(job)
job.set_progress(75, 'Activating IPA service.')
self._ipa_activate()
# Wrap around cache fill because this forces a wait until IPA becomes ready
cache_fill = self.middleware.call_sync('directoryservices.cache.refresh_impl')
cache_fill.wait_sync()
| 22,940 | Python | .py | 468 | 37.017094 | 106 | 0.599375 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,555 | ipa_health_mixin.py | truenas_middleware/src/middlewared/middlewared/plugins/directoryservices_/ipa_health_mixin.py | import ldap
import os
from middlewared.utils.directoryservices import (
ipa_constants, ldap_utils
)
from middlewared.utils.directoryservices.health import (
IPAHealthCheckFailReason,
IPAHealthError
)
from middlewared.plugins.ldap_.ldap_client import LdapClient
from middlewared.service_exception import CallError
class IPAHealthMixin:
def _recover_ipa_config(self) -> list[dict]:
return self.middleware.call_sync('etc.generate', 'ipa')
def _recover_ldap_config(self) -> list[dict]:
return self.middleware.call_sync('etc.generate', 'ldap')
def _recover_ipa(self, error: IPAHealthError) -> None:
"""
Attempt to recover from an ADHealthError that was raised during
our health check.
"""
match error.reason:
case IPAHealthCheckFailReason.IPA_NO_CONFIG | IPAHealthCheckFailReason.IPA_CONFIG_PERM:
self._recover_ipa_config()
case IPAHealthCheckFailReason.IPA_NO_CACERT | IPAHealthCheckFailReason.IPA_CACERT_PERM:
self._recover_ipa_config()
case IPAHealthCheckFailReason.LDAP_BIND_FAILED | IPAHealthCheckFailReason.SSSD_STOPPED:
self._recover_ldap_config()
case _:
# not recoverable
raise error from None
self.middleware.call_sync('service.stop', 'sssd')
self.middleware.call_sync('service.start', 'sssd', {'silent': False})
def _health_check_ipa(self) -> None:
"""
Perform basic health checks for IPA connection.
This method is called periodically from our alert framework.
"""
# First check that kerberos is working correctly
self._health_check_krb5()
# Next check that required IPA configuration files exist and have
# correct permissions
try:
st = os.stat(ipa_constants.IPAPath.DEFAULTCONF.path)
except FileNotFoundError:
self._faulted_reason = (
'IPA default_config file is missing. This may indicate that '
'an administrator has enabled the IPA service through '
'unsupported methods. Rejoining the IPA domain may be required.'
)
raise IPAHealthError(
IPAHealthCheckFailReason.IPA_NO_CONFIG,
self._faulted_reason
)
if (err_str := self._perm_check(st, ipa_constants.IPAPath.DEFAULTCONF.perm)) is not None:
self._faulted_reason = (
'Unexpected permissions or ownership on the IPA default '
f'configuration file {err_str}'
)
raise IPAHealthError(
IPAHealthCheckFailReason.IPA_CONFIG_PERM,
self._faulted_reason
)
try:
st = os.stat(ipa_constants.IPAPath.CACERT.path)
except FileNotFoundError:
self._faulted_reason = (
'IPA CA certificate file is missing. This may indicate that '
'an administrator has enabled the IPA service through '
'unsupported methods. Rejoining the IPA domain may be required.'
)
raise IPAHealthError(
IPAHealthCheckFailReason.IPA_NO_CACERT,
self._faulted_reason
)
if (err_str := self._perm_check(st, ipa_constants.IPAPath.CACERT.perm)) is not None:
self._faulted_reason = (
'Unexpected permissions or ownership on the IPA CA certificate '
f'file {err_str}'
)
raise IPAHealthError(
IPAHealthCheckFailReason.IPA_CACERT_PERM,
self._faulted_reason
)
config = self.middleware.call_sync('ldap.config')
# By this point we know kerberos should be healthy and we should
# have ticket. Verify we can use our kerberos ticket to access the
# IPA LDAP server.
#
# We're peforming GSSAPI bind with SEAL set so don't bother with
# ldaps. This is simple query for root DSE to detect whether LDAP
# connection is profoundly broken.
uris = ldap_utils.hostnames_to_uris(config['hostname'], False)
try:
LdapClient.search({
'uri_list': uris,
'bind_type': 'GSSAPI',
'options': {
'timeout': config['timeout'],
'dns_timeout': config['dns_timeout'],
},
'security': {
'ssl': 'OFF',
'sasl': 'SEAL'
}
}, '', ldap.SCOPE_BASE, '(objectclass=*)')
except Exception as e:
self._faulted_reason = str(e)
raise IPAHealthError(
IPAHealthCheckFailReason.LDAP_BIND_FAILED,
self._faulted_reason
)
# Finally check that sssd is running, and if it's not, try non-silent
# start so that we can dump the reason it's failing to start into an alert.
#
# We don't want to move the sssd restart into the alert itself because
# we need to populate the error reason into `_faulted_reason` so that
# it appears in our directory services summary
if not self.middleware.call_sync('service.started', 'sssd'):
try:
self.middleware.call_sync('service.start', 'sssd', {'silent': False})
except CallError as e:
self._faulted_reason = str(e)
raise IPAHealthError(
IPAHealthCheckFailReason.SSSD_STOPPED,
self._faulted_reason
)
| 5,685 | Python | .py | 127 | 32.732283 | 99 | 0.596282 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,556 | secrets.py | truenas_middleware/src/middlewared/middlewared/plugins/directoryservices_/secrets.py | import enum
import json
import struct
import subprocess
from base64 import b64encode, b64decode
from middlewared.service import Service
from middlewared.service_exception import CallError, MatchNotFound
from middlewared.utils import filter_list
from middlewared.utils.tdb import (
get_tdb_handle,
TDBDataType,
TDBOptions,
TDBPathType,
)
SECRETS_FILE = '/var/db/system/samba4/private/secrets.tdb'
SECRETS_TDB_OPTIONS = TDBOptions(TDBPathType.CUSTOM, TDBDataType.BYTES)
# c.f. source3/include/secrets.h
class Secrets(enum.Enum):
MACHINE_ACCT_PASS = 'SECRETS/$MACHINE.ACC'
MACHINE_PASSWORD = 'SECRETS/MACHINE_PASSWORD'
MACHINE_PASSWORD_PREV = 'SECRETS/MACHINE_PASSWORD.PREV'
MACHINE_LAST_CHANGE_TIME = 'SECRETS/MACHINE_LAST_CHANGE_TIME'
MACHINE_SEC_CHANNEL_TYPE = 'SECRETS/MACHINE_SEC_CHANNEL_TYPE'
MACHINE_TRUST_ACCOUNT_NAME = 'SECRETS/SECRETS_MACHINE_TRUST_ACCOUNT_NAME'
MACHINE_DOMAIN_INFO = 'SECRETS/MACHINE_DOMAIN_INFO'
DOMTRUST_ACCT_PASS = 'SECRETS/$DOMTRUST.ACC'
SALTING_PRINCIPAL = 'SECRETS/SALTING_PRINCIPAL'
DOMAIN_SID = 'SECRETS/SID'
SAM_SID = 'SAM/SID'
PROTECT_IDS = 'SECRETS/PROTECT/IDS'
DOMAIN_GUID = 'SECRETS/DOMGUID'
SERVER_GUID = 'SECRETS/GUID'
LDAP_BIND_PW = 'SECRETS/LDAP_BIND_PW'
LDAP_IDMAP_SECRET = 'SECRETS/GENERIC/IDMAP_LDAP'
LOCAL_SCHANNEL_KEY = 'SECRETS/LOCAL_SCHANNEL_KEY'
AUTH_USER = 'SECRETS/AUTH_USER'
AUTH_DOMAIN = 'SECRETS/AUTH_DOMAIN'
AUTH_PASSWORD = 'SECRETS/AUTH_PASSWORD'
def fetch_secrets_entry(key: str) -> str:
with get_tdb_handle(SECRETS_FILE, SECRETS_TDB_OPTIONS) as hdl:
return hdl.get(key)
def store_secrets_entry(key: str, val: str) -> str:
with get_tdb_handle(SECRETS_FILE, SECRETS_TDB_OPTIONS) as hdl:
return hdl.store(key, val)
def query_secrets_entries(filters: list, options: dict) -> list:
with get_tdb_handle(SECRETS_FILE, SECRETS_TDB_OPTIONS) as hdl:
return filter_list(hdl.entries(), filters, options)
class DomainSecrets(Service):
class Config:
namespace = 'directoryservices.secrets'
cli_private = True
private = True
def has_domain(self, domain):
"""
Check whether running version of secrets.tdb has our machine account password
"""
try:
fetch_secrets_entry(f"{Secrets.MACHINE_PASSWORD.value}/{domain.upper()}")
except MatchNotFound:
return False
return True
def last_password_change(self, domain):
"""
Retrieve the last password change timestamp for the specified domain.
Raises MatchNotFound if entry is not present in secrets.tdb
"""
encoded_change_ts = fetch_secrets_entry(
f"{Secrets.MACHINE_LAST_CHANGE_TIME.value}/{domain.upper()}"
)
try:
bytes_passwd_chng = b64decode(encoded_change_ts)
except Exception:
self.logger.warning("Failed to retrieve last password change time for domain "
"[%s] from domain secrets. Directory service functionality "
"may be impacted.", domain, exc_info=True)
return None
return struct.unpack("<L", bytes_passwd_chng)[0]
def set_ipa_secret(self, domain, secret):
# The stored secret in secrets.tdb and our kerberos keytab for SMB must be kept in-sync
store_secrets_entry(
f'{Secrets.MACHINE_PASSWORD.value}/{domain.upper()}', b64encode(b"2\x00")
)
# Password changed field must be initialized (but otherwise is not required)
store_secrets_entry(
f"{Secrets.MACHINE_LAST_CHANGE_TIME.value}/{domain.upper()}", b64encode(b"2\x00")
)
setsecret = subprocess.run(
['net', 'changesecretpw', '-f', '-d', '5'],
capture_output=True, check=False, input=secret
)
if setsecret.returncode != 0:
raise CallError(f'Failed to set machine account secret: {setsecret.stdout.decode()}')
# Ensure we back this info up into our sqlite database as well
self.backup()
def set_ldap_idmap_secret(self, domain, user_dn, secret):
"""
Some idmap backends (ldap and rfc2307) store credentials in secrets.tdb.
This method is used by idmap plugin to write the password.
"""
store_secrets_entry(
f'{Secrets.LDAP_IDMAP_SECRET.value}_{domain.upper()}/{user_dn}',
b64encode(secret.encode() + b'\x00')
)
def get_ldap_idmap_secret(self, domain, user_dn):
"""
Retrieve idmap secret for the specifed domain and user dn.
"""
return fetch_secrets_entry(f'{Secrets.LDAP_IDMAP_SECRET.value}_{domain.upper()}/{user_dn}')
def get_machine_secret(self, domain):
return fetch_secrets_entry(f'{Secrets.MACHINE_PASSWORD.value}/{domain.upper()}')
def get_salting_principal(self, realm):
return fetch_secrets_entry(f'{Secrets.SALTING_PRINCIPAL.value}/DES/{realm.upper()}')
def dump(self):
"""
Dump contents of secrets.tdb. Values are base64-encoded
"""
entries = query_secrets_entries([], {})
return {entry['key']: entry['value'] for entry in entries}
async def get_db_secrets(self):
"""
Retrieve secrets that are stored currently in freenas-v1.db.
"""
db = await self.middleware.call('datastore.config', 'services.cifs', {
'prefix': 'cifs_srv_', 'select': ['id', 'secrets']
})
if not db['secrets']:
return {'id': db['id']}
try:
secrets = json.loads(db['secrets'])
except json.decoder.JSONDecodeError:
self.logger.warning("Stored secrets are not valid JSON "
"a new backup of secrets should be generated.")
return {'id': db['id']} | secrets
async def backup(self):
"""
store backup of secrets.tdb contents (keyed on current netbios name) in
freenas-v1.db file.
"""
ha_mode = await self.middleware.call('smb.get_smb_ha_mode')
if ha_mode == "UNIFIED":
failover_status = await self.middleware.call("failover.status")
if failover_status != "MASTER":
self.logger.debug("Current failover status [%s]. Skipping secrets backup.",
failover_status)
return
netbios_name = (await self.middleware.call('smb.config'))['netbiosname']
db_secrets = await self.get_db_secrets()
id_ = db_secrets.pop('id')
if not (secrets := (await self.middleware.call('directoryservices.secrets.dump'))):
self.logger.warning("Unable to parse secrets")
return
db_secrets.update({f"{netbios_name.upper()}$": secrets})
await self.middleware.call(
'datastore.update',
'services.cifs', id_,
{'secrets': json.dumps(db_secrets)},
{'prefix': 'cifs_srv_'}
)
async def restore(self, netbios_name=None):
ha_mode = await self.middleware.call('smb.get_smb_ha_mode')
if ha_mode == "UNIFIED":
failover_status = await self.middleware.call("failover.status")
if failover_status != "MASTER":
self.logger.debug("Current failover status [%s]. Skipping secrets restore.",
failover_status)
return False
if netbios_name is None:
netbios_name = (await self.middleware.call('smb.config'))['netbiosname']
db_secrets = await self.get_db_secrets()
server_secrets = db_secrets.get(f"{netbios_name.upper()}$")
if server_secrets is None:
self.logger.warning("Unable to find stored secrets for [%s]. "
"Directory service functionality may be impacted.",
netbios_name)
return False
self.logger.debug('Restoring secrets.tdb for %s', netbios_name)
for key, value in server_secrets.items():
await self.middleware.run_in_thread(store_secrets_entry, key, value)
return True
| 8,235 | Python | .py | 179 | 36.642458 | 99 | 0.63175 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,557 | activedirectory_join_mixin.py | truenas_middleware/src/middlewared/middlewared/plugins/directoryservices_/activedirectory_join_mixin.py | import os
import subprocess
import wbclient
from middlewared.job import Job
from middlewared.plugins.smb_.constants import SMBCmd, SMBPath
from middlewared.service_exception import CallError, MatchNotFound
from middlewared.utils.directoryservices.ad import (
get_domain_info,
lookup_dc
)
from middlewared.utils.directoryservices.ad_constants import (
MAX_KERBEROS_START_TRIES
)
from middlewared.utils.directoryservices.constants import DSType
from middlewared.utils.directoryservices.krb5 import (
gss_get_current_cred,
kerberos_ticket,
)
from middlewared.utils.directoryservices.krb5_constants import krb5ccache
from middlewared.utils.directoryservices.krb5_error import (
KRB5Error,
KRB5ErrCode,
)
from time import sleep, time
class ADJoinMixin:
def _ad_activate(self) -> None:
for etc_file in DSType.AD.etc_files:
self.middleware.call_sync('etc.generate', etc_file)
self.middleware.call_sync('service.stop', 'idmap')
self.middleware.call_sync('service.start', 'idmap', {'silent': False})
# Wait for winbind to come online to provide some time for sysvol replication
self._ad_wait_wbclient()
self.middleware.call_sync('kerberos.start')
def _ad_wait_wbclient(self) -> None:
waited = 0
ctx = wbclient.Ctx()
while waited <= 60:
if ctx.domain().domain_info()['online']:
return
self.logger.debug('Waiting for domain to come online')
sleep(1)
waited += 1
raise CallError('Timed out while waiting for domain to come online')
def _ad_wait_kerberos_start(self) -> None:
"""
After initial AD join we reconfigure kerberos to find KDC via DNS.
Unfortunately, depending on the AD environment it may take a significant
amount of time to replicate the new machine account to other domain
controllers. This means we have a retry loop on starting the kerberos
service.
"""
tries = 0
while tries < MAX_KERBEROS_START_TRIES:
try:
self.middleware.call_sync('kerberos.start')
return
except KRB5Error as krberr:
# KRB5KDC_ERR_C_PRINCIPAL_UNKNOWN - account doesn't exist yet
# KRB5KDC_ERR_CLIENT_REVOKED - account locked (unlock maybe not replicated)
# KRB5KDC_ERR_PREAUTH_FAILED - bad password (password update not replicated)
if krberr.krb5_code not in (
KRB5ErrCode.KRB5KDC_ERR_C_PRINCIPAL_UNKNOWN,
KRB5ErrCode.KRB5KDC_ERR_CLIENT_REVOKED,
KRB5ErrCode.KRB5KDC_ERR_PREAUTH_FAILED,
):
raise krberr
sleep(1)
tries += 1
def _ad_domain_info(self, domain: str, retry: bool = True) -> dict:
"""
Use libads from Samba to query information about the specified domain.
If it is left unspecifed then the value of `domainname` in the
AD configuration will be used.
Args:
domain (str) : name of domain for which to query basic information.
retry (bool) : if specified then flush out possible caches on failure
and retry
Returns:
See get_domain_info() documentation
Raises:
CallError
"""
try:
domain_info = get_domain_info(domain)
except Exception as e:
if not retry:
raise e from None
# samba's gencache may have a stale server affinity entry
# or stale negative cache results
self.middleware.call_sync('idmap.gencache.flush')
domain_info = get_domain_info(domain)
return domain_info
def _ad_lookup_dc(self, domain: str, retry: bool = True) -> dict:
"""
Look up some basic information about the domain controller that
is currently set in the libads server affinity cache.
Args:
domain (str) : name of domain for which to look up domain controller info
retry (bool) : if specified then flush out possible caches on failure
and retry
Returns:
See lookup_dc() documentation
Raises:
CallError
"""
try:
dc_info = lookup_dc(domain)
except Exception as e:
if not retry:
raise e from None
# samba's gencache may have a stale server affinity entry
# or stale negative cache results
self.middleware.call_sync('idmap.gencache.flush')
dc_info = lookup_dc(domain)
return dc_info
def _ad_leave(self, job: Job, ds_type: DSType, domain: str):
""" Delete our computer object from active directory """
username = str(gss_get_current_cred(krb5ccache.SYSTEM.value).name)
netads = subprocess.run([
SMBCmd.NET.value,
'--use-kerberos', 'required',
'--use-krb5-ccache', krb5ccache.SYSTEM.value,
'-U', username,
'ads', 'leave',
], check=False, capture_output=True)
if netads.returncode != 0:
self.logger.warning(
'Failed to cleanly leave domain. Further action may be required '
'by an Active Directory administrator: %s', netads.stderr.decode()
)
@kerberos_ticket
def _ad_set_spn(self):
cmd = [
SMBCmd.NET.value,
'--use-kerberos', 'required',
'--use-krb5-ccache', krb5ccache.SYSTEM.value,
'ads', 'keytab',
'add_update_ads', 'nfs'
]
netads = subprocess.run(cmd, check=False, capture_output=True)
if netads.returncode != 0:
raise CallError(
'Failed to set spn entry: '
f'{netads.stdout.decode().strip()}'
)
self.middleware.call_sync('kerberos.keytab.store_ad_keytab')
@kerberos_ticket
def _ad_test_join(self, ds_type: DSType, domain: str):
"""
Test to see whether we're currently joined to an AD domain.
"""
netads = subprocess.run([
SMBCmd.NET.value,
'--use-kerberos', 'required',
'--use-krb5-ccache', krb5ccache.SYSTEM.value,
'--realm', domain,
'-d', '5',
'ads', 'testjoin'
], check=False, capture_output=True)
if netads.returncode == 0:
return True
err_msg = netads.stderr.decode()
log_path = f'{SMBPath.LOGDIR.platform()}/domain_testjoin_{time()}.log'
with open(log_path, 'w') as f:
os.fchmod(f.fileno(), 0o600)
f.write(err_msg)
f.flush()
# We only want to forcible rejoin active directory if it's clear
# that our credentials are wrong or the computer account doesn't
# exist
for err_str in (
'Join to domain is not valid',
'0xfffffff6',
'LDAP_INVALID_CREDENTIALS',
'The name provided is not a properly formed account name',
'The attempted logon is invalid.'
):
if err_str in err_msg:
return False
raise CallError(
'Attempt to check AD join status failed unexpectedly. '
f'Please review logs at {log_path} and file a bug report.'
)
def _ad_grant_privileges(self) -> None:
""" Grant domain admins ability to manage TrueNAS """
dom = wbclient.Ctx().domain()
existing_privileges = self.middleware.call_sync(
'privilege.query',
[["name", "=", dom.dns_name.upper()]]
)
if existing_privileges:
return
try:
self.middleware.call_sync('privilege.create', {
'name': dom.dns_name.upper(),
'ds_groups': [f'{dom.sid}-512'],
'allowlist': [{'method': '*', 'resource': '*'}],
'web_shell': True
})
except Exception:
# This should be non-fatal since admin can simply fix via
# our webui
self.logger.warning(
'Failed to grant domain administrators access to the '
'TrueNAS API.', exc_info=True
)
def _ad_post_join_actions(self, job: Job):
self._ad_set_spn()
# The password in secrets.tdb has been replaced so make
# sure we have it backed up in our config.
self.middleware.call_sync('directoryservices.secrets.backup')
self.middleware.call_sync('activedirectory.register_dns')
# start up AD service
try:
self._ad_activate()
except KRB5Error:
job.set_progress(65, 'Waiting for active directory to replicate machine account changes.')
self._ad_wait_kerberos_start()
def _ad_join_impl(self, job: Job, conf: dict):
"""
Join an active directory domain. Requires admin kerberos ticket.
If post-join operations fail, then we attempt to roll back changes on
the DC.
"""
cmd = [
SMBCmd.NET.value,
'--use-kerberos', 'required',
'--use-krb5-ccache', krb5ccache.SYSTEM.value,
'-U', conf['bindname'],
'-d', '5',
'ads', 'join',
]
if conf['createcomputer']:
cmd.append(f'createcomputer={conf["createcomputer"]}')
cmd.extend([
'--no-dns-updates', conf['domainname']
])
netads = subprocess.run(cmd, check=False, capture_output=True)
if netads.returncode != 0:
err_msg = netads.stderr.decode().split(':', 1)[1]
raise CallError(err_msg)
# we've now successfully joined AD and can proceed with post-join
# operations
try:
job.set_progress(60, 'Performing post-join actions')
return self._ad_post_join_actions(job)
except KRB5Error:
# if there's an actual unrecoverable kerberos error
# in our post-join actions then leaving AD will also fail
raise
except Exception as e:
# We failed to set up DNS / keytab cleanly
# roll back and present user with error
self._ad_leave(job, DSType.AD, conf['domainname'])
self.middleware.call_sync('idmap.gencache.flush')
raise e from None
@kerberos_ticket
def _ad_join(self, job: Job, ds_type: DSType, domain: str):
ad_config = self.middleware.call_sync('activedirectory.config')
smb = self.middleware.call_sync('smb.config')
workgroup = smb['workgroup']
if (failover_status := self.middleware.call_sync('failover.status')) not in ('MASTER', 'SINGLE'):
raise CallError(
f'{failover_status}: TrueNAS may only be joined to active directory '
'through the active storage controller and if high availability is healthy.'
)
dc_info = self._ad_lookup_dc(ad_config['domainname'])
job.set_progress(0, 'Preparing to join Active Directory')
self.middleware.call_sync('etc.generate', 'smb')
self.middleware.call_sync('etc.generate', 'hostname')
"""
Kerberos realm field must be populated so that we can perform a kinit
and use the kerberos ticket to execute 'net ads' commands.
"""
job.set_progress(5, 'Configuring Kerberos Settings.')
if not ad_config['kerberos_realm']:
try:
realm_id = self.middleware.call_sync(
'kerberos.realm.query',
[('realm', '=', ad_config['domainname'])],
{'get': True}
)['id']
except MatchNotFound:
realm_id = self.middleware.call_sync(
'datastore.insert', 'directoryservice.kerberosrealm',
{'krb_realm': ad_config['domainname'].upper()}
)
self.middleware.call_sync(
'datastore.update', 'directoryservice.activedirectory', ad_config['id'],
{"kerberos_realm": realm_id}, {'prefix': 'ad_'}
)
ad_config['kerberos_realm'] = realm_id
job.set_progress(20, 'Detecting Active Directory Site.')
site = ad_config['site'] or dc_info['client_site_name']
job.set_progress(30, 'Detecting Active Directory NetBIOS Domain Name.')
if workgroup != dc_info['pre-win2k_domain']:
self.middleware.call_sync('datastore.update', 'services.cifs', smb['id'], {
'cifs_srv_workgroup': dc_info['pre-win2k_domain']
})
workgroup = dc_info['pre-win2k_domain']
# Ensure smb4.conf has correct workgorup.
self.middleware.call_sync('etc.generate', 'smb')
job.set_progress(50, 'Performing domain join.')
self._ad_join_impl(job, ad_config)
machine_acct = f'{ad_config["netbiosname"].upper()}$@{ad_config["domainname"]}'
self.middleware.call_sync('datastore.update', 'directoryservice.activedirectory', ad_config['id'], {
'kerberos_principal': machine_acct,
'site': site,
'kerberos_realm': ad_config['kerberos_realm']
}, {'prefix': 'ad_'})
job.set_progress(75, 'Performing kinit using new computer account.')
# Remove our temporary administrative ticket and replace with machine account.
# Sysvol replication may not have completed (new account only exists on the DC we're
# talking to) and so during this operation we need to hard-code which KDC we use for
# the new kinit.
domain_info = self._ad_domain_info(ad_config['domainname'])
cred = self.middleware.call_sync('kerberos.get_cred', {
'dstype': DSType.AD.value,
'conf': {
'domainname': ad_config['domainname'],
'kerberos_principal': machine_acct,
}
})
# remove admin ticket
self.middleware.call_sync('kerberos.kdestroy')
# remove stub krb5.conf to allow overriding with fix on KDC
os.remove('/etc/krb5.conf')
self.middleware.call_sync('kerberos.do_kinit', {
'krb5_cred': cred,
'kinit-options': {
'kdc_override': {'domain': ad_config['domainname'], 'kdc': domain_info['kdc_server']}
}
})
self.middleware.call_sync('kerberos.wait_for_renewal')
self.middleware.call_sync('etc.generate', 'kerberos')
self.middleware.call_sync('service.update', 'cifs', {'enable': True})
| 14,792 | Python | .py | 336 | 33.050595 | 108 | 0.591929 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,558 | ldap_join_mixin.py | truenas_middleware/src/middlewared/middlewared/plugins/directoryservices_/ldap_join_mixin.py | from middlewared.utils.directoryservices.constants import DSType
class LDAPJoinMixin:
def _ldap_activate(self) -> None:
for etc_file in DSType.LDAP.etc_files:
self.middleware.call_sync('etc.generate', etc_file)
ldap_config = self.middleware.call_sync('ldap.config')
self.middleware.call_sync('service.stop', 'sssd')
self.middleware.call_sync('service.start', 'sssd', {'silent': False})
if ldap_config['kerberos_realm']:
self.middleware.call_sync('kerberos.start')
| 537 | Python | .py | 10 | 45.4 | 77 | 0.683908 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,559 | cache.py | truenas_middleware/src/middlewared/middlewared/plugins/directoryservices_/cache.py | from middlewared.schema import Str, Ref, Int, Dict, Bool, accepts
from middlewared.service import Service, job
from middlewared.service_exception import CallError, MatchNotFound
from middlewared.utils.directoryservices.constants import (
DSStatus, DSType
)
from middlewared.utils.nss.pwd import iterpw
from middlewared.utils.nss.grp import itergrp
from middlewared.utils.nss.nss_common import NssModule
from middlewared.plugins.idmap_.idmap_constants import IDType
from middlewared.plugins.idmap_.idmap_winbind import WBClient
from .util_cache import (
DSCacheFill,
insert_cache_entry,
query_cache_entries,
retrieve_cache_entry
)
from time import sleep
class DSCache(Service):
class Config:
namespace = 'directoryservices.cache'
private = True
@accepts(
Str('idtype', enum=['USER', 'GROUP'], required=True),
Dict('cache_entry', additional_attrs=True),
)
def _insert(self, idtype, entry):
"""
Internal method to insert an entry into cache. Only consumers should be in this
plugin
Raises:
RuntimeError (tdb library error / corruption)
"""
match (id_type := IDType[idtype]):
case IDType.GROUP:
insert_cache_entry(id_type, entry['gid'], entry['name'], entry)
case IDType.USER:
insert_cache_entry(id_type, entry['uid'], entry['username'], entry)
case _:
raise ValueError(f'{id_type}: unexpected ID type')
@accepts(
Dict(
'principal_info',
Str('idtype', enum=['USER', 'GROUP']),
Str('who'),
Int('id'),
),
Dict(
'options',
Bool('smb', default=False)
)
)
def _retrieve(self, data, options):
"""
Internal method to retrieve an entry from cache. If the entry does not exist then
a lookup via NSS will be attempted and if successful a cache entry will be generated.
Only consumers should be in this plugin. Either `who` or `id` should be specified.
Returns:
user.query entry (successful user lookup)
group.query entry (successful group lookup)
None (lookup failure)
Raises:
RuntimeError (tdb library error)
CallError (Idmap lookup failure -- unexpected)
"""
try:
entry = retrieve_cache_entry(IDType[data['idtype']], data.get('who'), data.get('id'))
except MatchNotFound:
entry = None
if not entry:
"""
If cache lacks entry, create one from passwd / grp info, insert into cache
user.get_user_obj and group.get_group_obj will raise KeyError if NSS lookup fails.
"""
try:
if data['idtype'] == 'USER':
if data.get('who') is not None:
who = {'username': data['who']}
else:
who = {'uid': data.get('id')}
pwdobj = self.middleware.call_sync('user.get_user_obj', {
'get_groups': False, 'sid_info': True
} | who)
if pwdobj['sid'] is None:
# This indicates that idmapping is significantly broken
return None
entry = self.middleware.call_sync('idmap.synthetic_user',
pwdobj, pwdobj['sid'])
if entry is None:
return None
else:
if data.get('who') is not None:
who = {'groupname': data.get('who')}
else:
who = {'gid': data.get('id')}
grpobj = self.middleware.call_sync('group.get_group_obj', {'sid_info': True} | who)
if grpobj['sid'] is None:
# This indicates that idmapping is significantly broken
return None
entry = self.middleware.call_sync('idmap.synthetic_group',
grpobj, grpobj['sid'])
if entry is None:
return None
self._insert(data['idtype'], entry)
except KeyError:
entry = None
if entry and not options['smb']:
# caller has not requested SMB information and so we should strip it
entry['sid'] = None
if entry is not None:
entry['roles'] = []
return entry
@accepts(
Str('id_type', enum=['USER', 'GROUP'], default='USER'),
Ref('query-filters'),
Ref('query-options'),
)
def query(self, id_type, filters, options):
"""
Query User / Group cache with `query-filters` and `query-options`.
NOTE: only consumers for this endpoint should be user.query and group.query.
query-options (apart from determining whether to include "SMB" information)
are not evaluated here because user.query and group.query applies pagination
on full results.
"""
ds = self.middleware.call_sync('directoryservices.status')
if ds['type'] is None:
return []
is_name_check = bool(filters and len(filters) == 1 and filters[0][0] in ['username', 'name', 'group'])
is_id_check = bool(filters and len(filters) == 1 and filters[0][0] in ['uid', 'gid'])
if (is_name_check or is_id_check) and filters[0][1] == '=':
# Special case where explitly single user / group is being queried.
# If it's not present in cache we will directly issue NSS request and
# generate cache entry based on its results. This allows slowly building
# a cache when user / group enumeration is disabled.
key = 'who' if is_name_check else 'id'
entry = self._retrieve({
'idtype': id_type,
key: filters[0][2],
}, {'smb': True})
return [entry] if entry else []
# options must be omitted to defer pagination logic to caller
entries = query_cache_entries(IDType[id_type], filters, {})
return sorted(entries, key=lambda i: i['id'])
def idmap_online_check_wait_wbclient(self, job):
"""
Check internal winbind status report for the domain. We want to wait
for the domain to come fully online before proceeding with cache fill
to avoid spurious errors.
"""
waited = 0
client = WBClient()
while waited <= 60:
if client.domain_info()['online']:
return
# only log every 10th iteration
if waited % 10 == 0:
job.set_progress(10, 'Waiting for domain to come online')
self.logger.debug('Waiting for domain to come online')
sleep(1)
waited += 1
raise CallError('Timed out while waiting for domain to come online')
def idmap_online_check_wait_sssd(self, job):
"""
SSSD reports a domain as online before it will _actually_ return results
for NSS queries. This is because getpwent and getgrent iterate the SSSD
cache rather than reaching out to remote server. Since we know that
enumeration is enabled if this is called then we can use getpwent and getgrent
calls to determine whether the domain is in a state where we can actually
fill our caches. This does present some minor risk that our initial cache
fill on SSSD join will be incomplete, but there is no easy way to check
intern status of SSSD's cache fill and so getting some users and groups
initially and then retrieving remainder on next scheduled refresh is
a suitable compromise.
"""
waited = 0
has_users = has_groups = False
while waited <= 60:
if not has_users:
for pwd in iterpw(module=NssModule.SSS.name):
has_users = True
break
if not has_groups:
for grp in itergrp(module=NssModule.SSS.name):
has_groups = True
break
if has_users and has_groups:
# allow SSSD a little more time to build cache
sleep(5)
return
# only log every 10th iteration
if waited % 10 == 0:
job.set_progress(10, 'Waiting for domain to come online')
self.logger.debug('Waiting for domain to come online')
sleep(1)
waited += 1
raise CallError('Timed out while waiting for domain to come online')
@job(lock="directoryservices_cache_fill", lock_queue_size=1)
def refresh_impl(self, job):
"""
Rebuild the directory services cache. This is performed in the following
situations:
1. User starts a directory service
2. User triggers manually through API or webui
3. Once every 24 hours via cronjob
"""
ds = self.middleware.call_sync('directoryservices.status')
if ds['type'] is None:
return
if ds['status'] not in (DSStatus.HEALTHY.name, DSStatus.JOINING.name):
self.logger.warning(
'Unable to refresh [%s] cache, state is: %s',
ds['type'], ds['status']
)
return
dom_by_sid = None
ds_type = DSType(ds['type'])
match ds_type:
case DSType.AD:
self.idmap_online_check_wait_wbclient(job)
domain_info = self.middleware.call_sync(
'idmap.query',
[["domain_info", "!=", None]],
{'extra': {'additional_information': ['DOMAIN_INFO']}}
)
dom_by_sid = {dom['domain_info']['sid']: dom for dom in domain_info}
case DSType.IPA | DSType.LDAP:
self.idmap_online_check_wait_sssd(job)
case _:
raise ValueError(f'{ds_type}: unexpected DSType')
with DSCacheFill() as dc:
job.set_progress(15, 'Filling cache')
dc.fill_cache(job, ds_type, dom_by_sid)
async def abort_refresh(self):
cache_job = await self.middleware.call('core.get_jobs', [
['method', '=', 'directoryservices.cache.refresh_impl'],
['state', '=', 'RUNNING']
])
if cache_job:
await self.middleware.call('core.job_abort', cache_job[0]['id'])
| 10,743 | Python | .py | 240 | 32.3875 | 110 | 0.565338 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,560 | kerberos_health_mixin.py | truenas_middleware/src/middlewared/middlewared/plugins/directoryservices_/kerberos_health_mixin.py | import os
from middlewared.utils.directoryservices import (
krb5, krb5_constants
)
from middlewared.utils.directoryservices.health import (
KRB5HealthCheckFailReason, KRB5HealthError
)
class KerberosHealthMixin:
"""
Base directory services class. This provides common status-related code
for directory
"""
def _recover_krb5(self, error: KRB5HealthError) -> None:
# For now we can simply try to start kerberos
# to recover from the health issue.
#
# This fixes permissions on files (which generates additional
# error messages regarding type of changes made), gets a
# fresh kerberos ticket, and sets up a transient job to
# renew our tickets.
self.logger.warning(
'Attempting to recover kerberos service after health '
'check failure for the following reason: %s',
error.errmsg
)
self.middleware.call_sync('kerberos.start')
def _health_check_krb5(self) -> None:
"""
Individual directory services may call this within their
`_health_check_impl()` method if the directory service uses
kerberos.
"""
try:
st = os.stat('/etc/krb5.conf')
except FileNotFoundError:
faulted_reason = (
'Kerberos configuration file is missing. This may indicate '
'the file was accidentally deleted by a user with '
'admin shell access to the TrueNAS server.'
)
raise KRB5HealthError(
KRB5HealthCheckFailReason.KRB5_NO_CONFIG,
faulted_reason
)
if (err_str := self._perm_check(st, 0o644)) is not None:
faulted_reason = (
'Unexpected permissions or ownership on the kerberos '
f'configuration file: {err_str}'
)
raise KRB5HealthError(
KRB5HealthCheckFailReason.KRB5_CONFIG_PERM,
faulted_reason
)
try:
st = os.stat(krb5_constants.KRB_Keytab.SYSTEM.value)
except FileNotFoundError:
faulted_reason = (
'System keytab is missing. This may indicate that an administrative '
'action was taken to remove the required machine account '
'keytab from the TrueNAS server. Rejoining domain may be '
'required in order to resolve this issue.'
)
raise KRB5HealthError(
KRB5HealthCheckFailReason.KRB5_NO_KEYTAB,
faulted_reason
)
if (err_str := self._perm_check(st, 0o600)) is not None:
faulted_reason = (
'Unexpected permissions or ownership on the keberos keytab '
f'file: {err_str} '
'This error may have exposed the TrueNAS server\'s host principal '
'credentials to unauthorized users. Revoking keytab and rejoining '
'domain may be required.'
)
raise KRB5HealthError(
KRB5HealthCheckFailReason.KRB5_KEYTAB_PERM,
faulted_reason
)
if not krb5.gss_get_current_cred(krb5_constants.krb5ccache.SYSTEM.value, raise_error=False):
faulted_reason = (
'Kerberos ticket for domain is expired. Failure to renew '
'kerberos ticket may indicate issues with DNS resolution or '
'IPA domain or realm changes that need to be accounted for '
'in the TrueNAS configuration.'
)
raise KRB5HealthError(
KRB5HealthCheckFailReason.KRB5_TKT_EXPIRED,
faulted_reason
)
| 3,770 | Python | .py | 89 | 30.505618 | 100 | 0.59793 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,561 | util_cache.py | truenas_middleware/src/middlewared/middlewared/plugins/directoryservices_/util_cache.py | import enum
import os
from collections import defaultdict
from collections.abc import Iterable
from middlewared.utils.directoryservices.constants import (
DSType
)
from middlewared.job import Job
from middlewared.utils import filter_list
from middlewared.utils.itertools import batched
from middlewared.utils.nss import pwd, grp
from middlewared.utils.nss.nss_common import NssModule
from middlewared.plugins.idmap_ import idmap_winbind, idmap_sss
from middlewared.plugins.idmap_.idmap_constants import (
BASE_SYNTHETIC_DATASTORE_ID,
IDType,
MAX_REQUEST_LENGTH,
SID_BUILTIN_PREFIX,
SID_LOCAL_USER_PREFIX,
SID_LOCAL_GROUP_PREFIX,
)
from middlewared.utils.tdb import (
get_tdb_handle,
TDBBatchAction,
TDBBatchOperation,
TDBPathType,
TDBDataType,
TDBHandle,
TDBOptions
)
from threading import Lock
from uuid import uuid4
# Update progress of job every nth user / group, we expect possibly hundreds to
# a few thousand users and groups, but some edge cases where they number in
# tens of thousands. Percentage complete is not updated when generating
# progress messages because retrieving an approximate count of users and groups
# first is as expensive as generating the cache itself.
LOG_CACHE_ENTRY_INTERVAL = 10 # Update progress of job every nth user / group
TDB_LOCKS = defaultdict(Lock)
CACHE_OPTIONS = TDBOptions(TDBPathType.PERSISTENT, TDBDataType.JSON)
class DSCacheFile(enum.Enum):
USER = 'directoryservice_cache_user'
GROUP = 'directoryservice_cache_group'
@property
def path(self):
return os.path.join(TDBPathType.PERSISTENT.value, f'{self.value}.tdb')
class DSCacheFill:
"""
This class creates two temporary TDB files that contain cache entries for
users and groups that contain same keys as results for user.query and group.query
via the method `fill_cache()` once the cache is filled. The temporary TDB
files are renamed over the current ones in-use by middleware. On context manager
exit the handles on the TDB files are closed.
NOTE: cache fill here is performed without taking on the USER_TDB_LOCK or
GROUP_TDB_LOCK because the middleware caches will only be renamed over when fill
is complete. This is to ensure relative continuity in cache results.
"""
users_handle = None
groups_handle = None
def __enter__(self):
file_prefix = f'directory_service_cache_tmp_{uuid4()}'
self.users_handle = TDBHandle(f'{file_prefix}_user', CACHE_OPTIONS)
self.groups_handle = TDBHandle(f'{file_prefix}_group', CACHE_OPTIONS)
# Ensure we have clean initial state and restrictive permissions
self.users_handle.clear()
os.chmod(self.users_handle.full_path, 0o600)
self.groups_handle.clear()
os.chmod(self.groups_handle.full_path, 0o600)
return self
def __exit__(self, tp, value, tb):
stored_exception = None
try:
if self.users_handle:
self.users_handle.close()
except Exception as exc:
stored_exception = exc
try:
if self.groups_handle:
self.groups_handle.close()
except Exception as exc:
stored_exception = exc
if stored_exception:
raise stored_exception
def _commit(self):
"""
Rename our temporary caches over ones in-use by middleware.
This will be detected on next call to read / insert into cache.
Stale handle will be closed and new one opened.
"""
os.rename(self.users_handle.full_path, DSCacheFile.USER.path)
os.rename(self.groups_handle.full_path, DSCacheFile.GROUP.path)
def _add_sid_info_to_entries(
self,
idmap_ctx: idmap_winbind.WBClient | idmap_sss.SSSClient,
nss_entries: list,
dom_by_sid: dict
) -> list[dict]:
"""
Add SID information to entries that NSS has returned. Dictionary
entries in list `nss_entries` are modified in-place.
`idmap_ctx` - is the winbind or sssd client handle to use to resolve
posix accounts to SIDs
`nss_entries` - list of posix accounts to look up
`dom_by_sid` - mapping for trusted domains to provide idmap backend
information for trusted domains. This is used to ensure that synthetic
database IDs are unique and guaranteed to not change.
Returns:
Same list passed in as nss_entries
"""
idmaps = idmap_ctx.users_and_groups_to_idmap_entries(nss_entries)
to_remove = []
for idx, entry in enumerate(nss_entries):
unixkey = f'{IDType[entry["id_type"]].wbc_str()}:{entry["id"]}'
if unixkey not in idmaps['mapped']:
# not all users / groups in SSSD have SIDs
# and so we'll leave them with a null SID
# rather than removing from nss_entries
continue
idmap_entry = idmaps['mapped'][unixkey]
if idmap_entry['sid'].startswith((SID_LOCAL_GROUP_PREFIX, SID_LOCAL_USER_PREFIX)):
# There is a collision between local user / group and our AD one.
# pop from cache
to_remove.append(idx)
continue
if idmap_entry['sid'].startswith(SID_BUILTIN_PREFIX):
# We don't want users to select auto-generated builtin groups
to_remove.append(idx)
continue
entry['sid'] = idmap_entry['sid']
entry['id_type'] = idmap_entry['id_type']
if dom_by_sid:
entry['domain_info'] = dom_by_sid[idmap_entry['sid'].rsplit('-', 1)[0]]
else:
entry['domain_info'] = None
to_remove.reverse()
for idx in to_remove:
nss_entries.pop(idx)
return nss_entries
def _get_entries_for_cache(
self,
idmap_ctx: idmap_winbind.WBClient | idmap_sss.SSSClient | None,
nss_module: NssModule,
entry_type: IDType,
dom_by_sid: dict
) -> Iterable[dict]:
"""
This method yields the users or groups in batches of 100 entries.
If the directory service supports SIDs then these will also be added
to the results.
"""
match entry_type:
case IDType.USER:
nss_fn = pwd.iterpw
case IDType.GROUP:
nss_fn = grp.itergrp
case _:
raise ValueError(f'{entry_type}: unexpected `entry_type`')
nss = nss_fn(module=nss_module.name)
for entries in batched(nss, MAX_REQUEST_LENGTH):
out = []
for entry in entries:
out.append({
'id': entry.pw_uid if entry_type is IDType.USER else entry.gr_gid,
'sid': None,
'nss': entry,
'id_type': entry_type.name,
'domain_info': None
})
# Depending on the directory sevice we may need to add SID
# information to the NSS entries.
if idmap_ctx is None:
yield out
else:
yield self._add_sid_info_to_entries(idmap_ctx, out, dom_by_sid)
def fill_cache(
self,
job: Job,
ds_type: DSType,
dom_by_sid: dict
) -> None:
match ds_type:
case DSType.AD:
nss_module = NssModule.WINBIND
idmap_ctx = idmap_winbind.WBClient()
case DSType.LDAP:
nss_module = NssModule.SSS
idmap_ctx = None
case DSType.IPA:
nss_module = NssModule.SSS
idmap_ctx = idmap_sss.SSSClient()
case _:
raise ValueError(f'{ds_type}: unknown DSType')
user_count = 0
group_count = 0
job.set_progress(40, 'Preparing to add users to cache')
# First grab batches of 100 entries
for users in self._get_entries_for_cache(
idmap_ctx,
nss_module,
IDType.USER,
dom_by_sid
):
# Now iterate members of 100 for insertion
for u in users:
if u['domain_info']:
id_type_both = u['domain_info']['idmap_backend'] in ('AUTORID', 'RID')
else:
id_type_both = False
user_data = u['nss']
entry = {
'id': BASE_SYNTHETIC_DATASTORE_ID + user_data.pw_uid,
'uid': user_data.pw_uid,
'username': user_data.pw_name,
'unixhash': None,
'smbhash': None,
'group': {},
'home': user_data.pw_dir,
'shell': user_data.pw_shell,
'full_name': user_data.pw_gecos,
'builtin': False,
'email': None,
'password_disabled': False,
'locked': False,
'sudo_commands': [],
'sudo_commands_nopasswd': [],
'groups': [],
'sshpubkey': None,
'immutable': True,
'twofactor_auth_configured': False,
'local': False,
'id_type_both': id_type_both,
'smb': u['sid'] is not None,
'sid': u['sid'],
'roles': [],
'api_keys': [],
}
if user_count % LOG_CACHE_ENTRY_INTERVAL == 0:
job.set_progress(50, f'{user_data.pw_name}: adding user to cache. User count: {user_count}')
# Store forward and reverse entries
_tdb_add_entry(self.users_handle, user_data.pw_uid, user_data.pw_name, entry)
user_count += 1
job.set_progress(70, 'Preparing to add groups to cache')
# First grab batches of 100 entries
for groups in self._get_entries_for_cache(
idmap_ctx,
nss_module,
IDType.GROUP,
dom_by_sid
):
for g in groups:
if g['domain_info']:
id_type_both = g['domain_info']['idmap_backend'] in ('AUTORID', 'RID')
else:
id_type_both = False
group_data = g['nss']
entry = {
'id': BASE_SYNTHETIC_DATASTORE_ID + group_data.gr_gid,
'gid': group_data.gr_gid,
'name': group_data.gr_name,
'group': group_data.gr_name,
'builtin': False,
'sudo_commands': [],
'sudo_commands_nopasswd': [],
'users': [],
'local': False,
'id_type_both': id_type_both,
'smb': g['sid'] is not None,
'sid': g['sid'],
'roles': []
}
if group_count % LOG_CACHE_ENTRY_INTERVAL == 0:
job.set_progress(80, f'{group_data.gr_name}: adding group to cache. Group count: {group_count}')
_tdb_add_entry(self.groups_handle, group_data.gr_gid, group_data.gr_name, entry)
group_count += 1
job.set_progress(100, f'Cached {user_count} users and {group_count} groups.')
self._commit()
def _tdb_add_entry(
handle: TDBHandle,
xid: int,
name: str,
entry: dict
) -> None:
"""
Unlocked variant of adding cache entries. Should only be performed during initial cache fill.
Performed without transaction as well because file will be removed in case of failure.
Raises:
RuntimeError via `tdb` library
"""
handle.store(f'ID_{xid}', entry)
handle.store(f'NAME_{name}', entry)
def insert_cache_entry(
id_type: IDType,
xid: int,
name: str,
entry: dict
) -> None:
"""
This method is used to lazily insert cache entries that we don't already have.
We perform under transaction lock since we don't want mismatched id and name entries
Raises:
RuntimeError via `tdb` library
"""
with get_tdb_handle(DSCacheFile[id_type.name].value, CACHE_OPTIONS) as handle:
handle.batch_op([
TDBBatchOperation(action=TDBBatchAction.SET, key=f'ID_{xid}', value=entry),
TDBBatchOperation(action=TDBBatchAction.SET, key=f'NAME_{xid}', value=entry),
])
def retrieve_cache_entry(
id_type: IDType,
name: str,
xid: int
) -> None:
"""
Retrieve cache entry under lock using stored handle. If both name and xid
are specified, preference is given to xid.
Raises:
MatchNotFound
"""
if xid is not None:
key = f'ID_{xid}'
else:
key = f'NAME_{name}'
with get_tdb_handle(DSCacheFile[id_type.name].value, CACHE_OPTIONS) as handle:
return handle.get(key)
def query_cache_entries(
id_type: IDType,
filters: list,
options: dict
) -> list:
with get_tdb_handle(DSCacheFile[id_type.name].value, CACHE_OPTIONS) as handle:
return filter_list(handle.entries(include_keys=False, key_prefix='ID_'), filters, options)
| 13,397 | Python | .py | 331 | 29.691843 | 116 | 0.580464 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,562 | health.py | truenas_middleware/src/middlewared/middlewared/plugins/directoryservices_/health.py | import os
import stat
from .activedirectory_health_mixin import ADHealthMixin
from .ipa_health_mixin import IPAHealthMixin
from .kerberos_health_mixin import KerberosHealthMixin
from .ldap_health_mixin import LDAPHealthMixin
from middlewared.plugins.ldap_.constants import SERVER_TYPE_FREEIPA
from middlewared.service import Service
from middlewared.service_exception import CallError
from middlewared.utils.directoryservices.constants import DSStatus, DSType
from middlewared.utils.directoryservices.health import (
ADHealthError, DSHealthObj, IPAHealthError, KRB5HealthError,
LDAPHealthError
)
class DomainHealth(
Service,
ADHealthMixin,
IPAHealthMixin,
KerberosHealthMixin,
LDAPHealthMixin,
):
class Config:
namespace = 'directoryservices.health'
cli_private = True
private = True
def _get_enabled_ds(self):
ad = self.middleware.call_sync('datastore.config', 'directoryservice.activedirectory')
if ad['ad_enable']:
return DSType.AD
ldap = self.middleware.call_sync('datastore.config', 'directoryservice.ldap')
if ldap['ldap_enable'] is False:
return None
# For now we are handling the IPA join as a layer on top of LDAP
# plugin.
if ldap['ldap_server_type'] == SERVER_TYPE_FREEIPA:
# there is no way to become healthy for IPA join without a host
# keytab and so we'll try to fall through to a regular LDAP bind
if self.middleware.call_sync('ldap.has_ipa_host_keytab'):
return DSType.IPA
return DSType.LDAP
def _perm_check(
self,
st: os.stat_result,
expected_mode: int
) -> str | None:
"""
Perform basic checks that stat security info matches expectations.
This method is called by during health checks.
returns a string that will be appended to error messages or None
type if no errors found
"""
if st.st_uid != 0:
return f'file owned by uid {st.st_uid} rather than root.'
if st.st_gid != 0:
return f'file owned by gid {st.st_gid} rather than root.'
if stat.S_IMODE(st.st_mode) != expected_mode:
return (
f'file permissions {oct(stat.S_IMODE(st.st_mode))} '
f'instead of expected value of {oct(expected_mode)}.'
)
return None
def check(self) -> bool:
"""
Basic health check for directory services
Returns:
True if directory services enabled and healthy
False if directory services disabled
Raises:
KRB5HealthError
ADHealthError
IPAHealthError
LDAPHealthError
"""
if (enabled_ds := self._get_enabled_ds()) is None:
# Nothing is enabled and so reset values
DSHealthObj.update(None, None, None)
return False
initial_status = DSHealthObj.status
if initial_status in (DSStatus.LEAVING, DSStatus.JOINING):
self.logger.debug("Deferring health check due to status of %s", initial_status.name)
return True
elif initial_status is None:
# Our directory service hasn't been initialized.
#
# We'll be optimistic and call it HEALTHY before we run the
# the actual health checks below. The reason for this is so that
# if we attempt to etc.generate files during health check a
# second call to directoryservices.status won't land us here again.
DSHealthObj.update(enabled_ds, DSStatus.HEALTHY, None)
try:
match enabled_ds:
case DSType.AD:
self._health_check_krb5()
self._health_check_ad()
case DSType.IPA:
self._health_check_krb5()
self._health_check_ipa()
case DSType.LDAP:
self._health_check_ldap()
case _:
raise ValueError(f'{enabled_ds}: Unexpected directory service.')
except (ADHealthError, IPAHealthError, KRB5HealthError, LDAPHealthError) as e:
# Update our stored status to reflect reason for it being faulted
# then re-raise
DSHealthObj.update(enabled_ds, DSStatus.FAULTED, e.errmsg)
raise
except Exception:
# Not a health related exception and so simply log it to prevent accidentally
# disrupting services
self.logger.error('Unexpected error while checking directory service health', exc_info=True)
DSHealthObj.update(enabled_ds, DSStatus.HEALTHY, None)
return True
def recover(self):
"""
Attempt to recover directory services from a failed health check
If recovery attempt fails a new exception is raised indicating current
source of failure
Returns:
None
Raises:
KRB5HealthError
ADHealthError
IPAHealthError
LDAPHealthError
"""
try:
self.check()
return
except ADHealthError as e:
self._recover_ad(e)
except IPAHealthError as e:
self._recover_ipa(e)
except KRB5HealthError as e:
self._recover_krb5(e)
except LDAPHealthError as e:
self._recover_ldap(e)
# hopefully this fixed the issue
self.check()
def set_state(self, ds_type, ds_status, status_msg=None):
ds = DSType(ds_type)
status = DSStatus[ds_status]
match status:
case DSStatus.HEALTHY | DSStatus.JOINING | DSStatus.LEAVING:
if status_msg is not None:
raise CallError('status_msg may only be set when changing state to FAULTED')
case DSStatus.FAULTED:
if status_msg is None:
raise CallError('status_msg is required when setting state to FAULTED')
case DSStatus.DISABLED:
DSHealthObj.update(None, None, None)
return
DSHealthObj.update(ds, status, status_msg)
| 6,272 | Python | .py | 152 | 30.677632 | 104 | 0.621409 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,563 | join.py | truenas_middleware/src/middlewared/middlewared/plugins/directoryservices_/join.py | import ipaddress
from .activedirectory_join_mixin import ADJoinMixin
from .ipa_join_mixin import IPAJoinMixin
from .ldap_join_mixin import LDAPJoinMixin
from middlewared.job import Job
from middlewared.plugins.ldap_.constants import SERVER_TYPE_FREEIPA
from middlewared.service import job, Service
from middlewared.service_exception import CallError
from middlewared.utils.directoryservices.constants import DomainJoinResponse, DSType
from middlewared.utils.directoryservices.krb5 import kerberos_ticket
from os import curdir as dot
class DomainConnection(
Service,
ADJoinMixin,
IPAJoinMixin,
LDAPJoinMixin,
):
class Config:
namespace = 'directoryservices.connection'
cli_private = True
private = True
def _get_enabled_ds(self):
ad = self.middleware.call_sync('datastore.config', 'directoryservice.activedirectory')
if ad['ad_enable']:
return DSType.AD
ldap = self.middleware.call_sync('datastore.config', 'directoryservice.ldap')
if ldap['ldap_enable'] is False:
return None
return DSType.IPA if ldap['ldap_server_type'] == SERVER_TYPE_FREEIPA else DSType.LDAP
def activate(self) -> int:
""" Generate etc files and start services, then start cache fill job and return job id """
match (enabled_ds := self._get_enabled_ds()):
case None:
return
case DSType.IPA:
self._ipa_activate()
case DSType.AD:
self._ad_activate()
case DSType.LDAP:
self._ldap_activate()
case _:
raise ValueError(f'{enabled_ds}: unknown directory service')
return self.middleware.call_sync('directoryservices.cache.refresh_impl').id
def _create_nsupdate_payload(self, fqdn: str, cmd_type: str, do_ptr: bool = False):
if fqdn.startswith('localhost'):
raise CallError(f'{fqdn}: Invalid domain name.')
if not fqdn.endswith(dot):
fqdn += dot
payload = []
if self.middleware.call_sync('failover.licensed'):
master, backup, init = self.middleware.call_sync('failover.vip.get_states')
for master_iface in self.middleware.call_sync('interface.query', [["id", "in", master + backup]]):
for i in master_iface['failover_virtual_aliases']:
addr = ipaddress.ip_address(i['address'])
payload.append({
'command': cmd_type,
'name': fqdn,
'address': str(addr),
'do_ptr': do_ptr,
'type': 'A' if addr.version == 4 else 'AAAA'
})
else:
for i in self.middleware.call_sync('interface.ip_in_use'):
addr = ipaddress.ip_address(i['address'])
payload.append({
'command': cmd_type,
'name': fqdn,
'address': str(addr),
'do_ptr': do_ptr,
'type': 'A' if addr.version == 4 else 'AAAA'
})
return payload
@kerberos_ticket
def register_dns(self, fqdn: str, do_ptr: bool = False):
"""
This method performs DNS update via GSS-TSIG using middlewared's current kerberos credential
and should only be called within the context initially joining the domain. In the future
this can be enhanced to be a periodic job that can also perform dynamic DNS updates.
Args:
`fqdn` - should be the fully qualified domain name of the TrueNAS server.
`do_ptr` - set associated PTR record when registering fqdn. Not all domains will
have a reverse zone configured and so detection should be done prior to calling
this method.
Returns:
None
Raises:
TypeError
ValueError
CallError
"""
if not isinstance(fqdn, str):
raise TypeError(f'{type(fqdn)}: must be a string')
elif dot not in fqdn:
raise ValueError(f'{fqdn}: missing domain component of name')
ds_type_str = self.middleware.call_sync('directoryservices.status')['type']
match ds_type_str:
case DSType.AD.value | DSType.IPA.value:
pass
case None:
raise CallError('Directory services must be enabled in order to register DNS')
case _:
raise CallError(f'{ds_type_str}: directory service type does not support DNS registration')
if fqdn.startswith('localhost'):
raise CallError(f'{fqdn}: Invalid domain name.')
if not fqdn.endswith(dot):
fqdn += dot
payload = self._create_nsupdate_payload(fqdn, 'ADD', do_ptr)
self.middleware.call_sync('dns.nsupdate', {'ops': payload})
@kerberos_ticket
def unregister_dns(self, fqdn: str, do_ptr: bool = False):
if not isinstance(fqdn, str):
raise TypeError(f'{type(fqdn)}: must be a string')
elif dot not in fqdn:
raise ValueError(f'{fqdn}: missing domain component of name')
ds_type_str = self.middleware.call_sync('directoryservices.status')['type']
match ds_type_str:
case DSType.AD.value | DSType.IPA.value:
pass
case None:
raise CallError('Directory services must be enabled in order to unregister DNS')
case _:
raise CallError(f'{ds_type_str}: directory service type does not support DNS registration')
if fqdn.startswith('localhost'):
raise CallError(f'{fqdn}: Invalid domain name.')
if not fqdn.endswith(dot):
fqdn += dot
payload = self._create_nsupdate_payload(fqdn, 'DELETE', do_ptr)
self.middleware.call_sync('dns.nsupdate', {'ops': payload})
@kerberos_ticket
def _test_is_joined(self, ds_type: DSType, domain: str) -> bool:
""" Test to see whether TrueNAS is already joined to the domain
Args:
ds_type: Type of directory service that is being tested. Choices
are DSType.AD and DSType.IPA
domain: Name of domain to be joined. For AD domains this should
be the pre-win2k domain, and for IPA domains the kerberos
realm.
Returns:
True - joined to domain
False - not joined to domain
Raises:
CallError
TypeError
"""
if not isinstance(ds_type, DSType):
raise TypeError(f'{type(ds_type)}: DSType is required')
match ds_type:
case DSType.AD:
is_joined_fn = self._ad_test_join
case DSType.IPA:
is_joined_fn = self._ipa_test_join
case _:
raise CallError(
f'{ds_type}: specified directory service type does not '
'support domain join functionality.'
)
return is_joined_fn(ds_type, domain)
@job(lock="directoryservices_join_leave")
@kerberos_ticket
def join_domain(self, job: Job, ds_type_str: str, domain: str, force: bool = False) -> None:
""" Join an IPA or active directory domain
Create TrueNAS account on remote domain controller (DC) and clean
update TrueNAS configuration to reflect settings determined during
the join process. Requires a valid kerberos ticket for a privileged
account on the domain because we performing operations on the DC.
If join fails then TrueNAS will attempt to roll back changes to a
clean state.
Args:
ds_type_str: String value of the DSType to be joined. Supported
values are 'ACTIVEDIRECTORY' and 'IPA'
domain: Name of domain to be joined. For AD domains this should
be the pre-win2k domain, and for IPA domains the kerberos
realm.
force: Skip the step where we check whether TrueNAS is already
joined to the domain. Join should not be forced without very
good reason as this will cause auditing events on the domain
controller and may disrupt services on the TrueNAS server.
Returns:
str - One of DomainJoinResponse strings
Raises:
ValueError - ds_type_str is an invalid DSType
"""
ds_type = DSType(ds_type_str)
if not force:
if self._test_is_joined(ds_type, domain):
self.logger.debug(
'%s: server is already joined to domain %s',
ds_type_str, domain
)
return DomainJoinResponse.ALREADY_JOINED.value
match ds_type:
case DSType.AD:
do_join_fn = self._ad_join
case DSType.IPA:
do_join_fn = self._ipa_join
case _:
raise CallError(
f'{ds_type}: specified directory service type does not '
'support domain join functionality.'
)
do_join_fn(job, ds_type, domain)
return DomainJoinResponse.PERFORMED_JOIN.value
def grant_privileges(self, ds_type_str: str, domain: str) -> None:
ds_type = DSType(ds_type_str)
if not self._test_is_joined(ds_type, domain):
raise CallError('TrueNAS is not joined to domain')
match ds_type:
case DSType.AD:
self._ad_grant_privileges()
case DSType.IPA:
self._ipa_grant_privileges()
case _:
raise ValueError(f'{ds_type}: unexpected directory sevice type')
@job(lock="directoryservices_join_leave")
@kerberos_ticket
def leave_domain(self, job: Job, ds_type_str: str, domain: str) -> None:
""" Leave an IPA or active directory domain
Remove TrueNAS configuration from remote domain controller (DC) and clean
up local configuration. Requires a valid kerberos ticket for a privileged
account on the domain because we performing operations on the DC.
Args:
ds_type_str: String value of the DSType to be left. Supported
values are 'ACTIVEDIRECTORY' and 'IPA'
domain: Name of domain to be left. For AD domains this should
be the pre-win2k domain, and for IPA domains the kerberos
realm.
Returns:
None
Raises:
ValueError - ds_type_str is an invalid DSType
"""
ds_type = DSType(ds_type_str)
match ds_type:
case DSType.AD:
do_leave_fn = self._ad_leave
case DSType.IPA:
do_leave_fn = self._ipa_leave
case _:
raise CallError(
f'{ds_type}: specified directory service type does not '
'support domain join functionality.'
)
# Only make actual attempt to leave the domain if we have a valid join
if self._test_is_joined(ds_type, domain):
do_leave_fn(job, ds_type, domain)
else:
self.logger.warning(
'%s: domain join is not healthy. Manual cleanup of machine account on '
'remote domain controller for domain may be required.', domain
)
# TODO move cleanup methods here
| 11,605 | Python | .py | 251 | 34.115538 | 110 | 0.5943 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,564 | activedirectory_health_mixin.py | truenas_middleware/src/middlewared/middlewared/plugins/directoryservices_/activedirectory_health_mixin.py | import subprocess
from base64 import b64decode
from middlewared.utils.directoryservices.ad import get_domain_info
from middlewared.utils.directoryservices.ad_constants import (
MACHINE_ACCOUNT_KT_NAME,
MAX_SERVER_TIME_OFFSET,
)
from middlewared.utils.directoryservices.constants import DSType
from middlewared.utils.directoryservices.health import (
ADHealthCheckFailReason,
ADHealthError,
)
from middlewared.plugins.idmap_.idmap_winbind import WBClient
from middlewared.service_exception import CallError, MatchNotFound
class ADHealthMixin:
def _test_machine_account_password(
self,
kdc: str,
account_password: bytes
) -> None:
"""
Validate that our machine account password can be used to kinit
"""
config = self.middleware.call_sync('activedirectory.config')
cred = self.middleware.call_sync('kerberos.get_cred', {
'dstype': DSType.AD.value,
'conf': {
'bindname': config['netbiosname'].upper() + '$',
'bindpw': b64decode(account_password).decode(),
'domainname': config['domainname']
}
})
# validate machine account secret can kinit
self.middleware.call_sync('kerberos.do_kinit', {
'krb5_cred': cred,
'kinit-options': {'ccache': 'TEMP', 'kdc_override': {
'domain': config['domainname'].upper(),
'kdc': kdc
}}
})
# remove our ticket
self.middleware.call_sync('kerberos.kdestroy', {'ccache': 'TEMP'})
# regenerate krb5.conf
self.middleware.call_sync('etc.generate', 'kerberos')
def _recover_keytab(self) -> None:
"""
TrueNAS administrator has deleted the active directory machine account
keytab. We can most likely recover it using the stored secrets in Samba's
secrets.tdb file.
"""
self.logger.warning('Attempting to recover from missing machine account keytab')
# Use net command to build a kerberos keytab from our stored secrets
results = subprocess.run(['net', 'ads', 'keytab', 'create'], check=False, capture_output=True)
if results.returncode != 0:
raise CallError(
f'Failed to generate kerberos keytab from stored secrets: {results.stderr.decode()}'
)
self.middleware.call_sync('kerberos.keytab.store_ad_keytab')
self.logger.warning('Recovered from missing machine account keytab')
def _recover_secrets(self) -> None:
"""
The secrets.tdb file is missing or lacks an entry for our server. We keep a backup
copy of this in our database. Restore the old one and attempt to kinit with the
credentials it contains.
"""
self.logger.warning('Attempting to recover from broken or missing AD secrets file')
config = self.middleware.call_sync('activedirectory.config')
smb_config = self.middleware.call_sync('smb.config')
domain_info = get_domain_info(config['domainname'])
if not self.middleware.call_sync('directoryservices.secrets.restore', smb_config['netbiosname']):
raise CallError(
'File containing AD machine account password has been removed without a viable '
'candidate for restoration. Full rejoin of active directory will be required.'
)
machine_pass = self.middleware.call_sync(
'directoryservices.secrets.get_machine_secret',
smb_config['workgroup']
)
self._test_machine_account_password(domain_info['kdc_server'], machine_pass)
self.middleware.call_sync('service.stop', 'idmap')
self.middleware.call_sync('service.start', 'idmap', {'silent': False})
self.logger.warning('Recovered from broken or missing AD secrets file')
def _recover_ad(self, error: ADHealthError) -> None:
"""
Attempt to recover from an ADHealthError that was raised during
our health check.
"""
match error.reason:
case ADHealthCheckFailReason.AD_KEYTAB_INVALID:
self._recover_keytab()
case ADHealthCheckFailReason.AD_SECRET_FILE_MISSING:
self._recover_secrets()
case ADHealthCheckFailReason.AD_SECRET_ENTRY_MISSING:
self._recover_secrets()
case ADHealthCheckFailReason.AD_NETLOGON_FAILURE:
# It's possible that our smb.conf has incorrect
# information in it. We'll try to regenerate the config
# file and the restart winbindd for good measure
self.middleware.call_sync('etc.generate', 'smb')
case ADHealthCheckFailReason.WINBIND_STOPPED:
# pick up winbind restart below
pass
case _:
# not recoverable
raise error from None
self.middleware.call_sync('service.stop', 'idmap')
self.middleware.call_sync('service.start', 'idmap', {'silent': False})
def _health_check_ad(self):
"""
Perform basic health checks for AD connection.
This method is called periodically from our alert framework.
"""
# We should validate some basic AD configuration before the common
# kerberos health checks. This will expose issues with clock slew
# and invalid stored machine account passwords
config = self.middleware.call_sync('activedirectory.config')
try:
domain_info = get_domain_info(config['domainname'])
except Exception:
domain_info = None
workgroup = self.middleware.call_sync('smb.config')['workgroup']
if domain_info:
if domain_info['server_time_offset'] > MAX_SERVER_TIME_OFFSET:
faulted_reason = (
'Time offset from Active Directory domain exceeds maximum '
'permitted value. This may indicate an NTP misconfiguration.'
)
raise ADHealthError(
ADHealthCheckFailReason.NTP_EXCESSIVE_SLEW,
faulted_reason
)
try:
machine_pass = self.middleware.call_sync('directoryservices.secrets.get_machine_secret', workgroup)
except FileNotFoundError:
# our secrets.tdb file has been deleted for some reason
# unfortunately sometimes users do this when trying to debug issues
faulted_reason = (
'File containing Active Directory machine account password is missing from server.'
)
raise ADHealthError(
ADHealthCheckFailReason.AD_SECRET_FILE_MISSING,
faulted_reason
)
except MatchNotFound:
faulted_reason = (
'Active Directory secrets file lacks an entry for this TrueNAS server.'
)
raise ADHealthError(
ADHealthCheckFailReason.AD_SECRET_ENTRY_MISSING,
faulted_reason
)
if domain_info:
try:
self._test_machine_account_password(
domain_info['kdc_server'],
machine_pass
)
except CallError:
faulted_reason = (
'Stored machine account secret is invalid. This may indicate that '
'the machine account password was reset in Active Directory without '
'coresponding changes being made to the TrueNAS server configuration.'
)
raise ADHealthError(
ADHealthCheckFailReason.AD_SECRET_INVALID,
faulted_reason
)
try:
self.middleware.call_sync('kerberos.keytab.query', [
['name', '=', MACHINE_ACCOUNT_KT_NAME]
], {'get': True})
except MatchNotFound:
faulted_reason = (
'Machine account keytab is absent from TrueNAS configuration.'
)
raise ADHealthError(
ADHealthCheckFailReason.AD_KEYTAB_INVALID,
faulted_reason
)
# Now check that winbindd is started
if not self.middleware.call_sync('service.started', 'idmap'):
try:
self.middleware.call_sync('service.start', 'idmap', {'silent': False})
except CallError as e:
faulted_reason = str(e.errmsg)
raise ADHealthError(
ADHealthCheckFailReason.WINBIND_STOPPED,
faulted_reason
)
# Winbind is running and so we can check our netlogon connection
# First open the libwbclient handle. This should in theory never fail.
try:
ctx = WBClient()
except Exception as e:
faulted_reason = str(e)
raise ADHealthError(
ADHealthCheckFailReason.AD_WBCLIENT_FAILURE,
faulted_reason
)
# If needed we can replace `ping_dc()` with `check_trust()`
# for now we're defaulting to lower-cost test unless it gives
# false reports of being up
try:
ctx.ping_dc()
except Exception as e:
faulted_reason = str(e)
raise ADHealthError(
ADHealthCheckFailReason.AD_NETLOGON_FAILURE,
faulted_reason
)
| 9,567 | Python | .py | 210 | 33.542857 | 111 | 0.606883 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,565 | ldap_health_mixin.py | truenas_middleware/src/middlewared/middlewared/plugins/directoryservices_/ldap_health_mixin.py | from middlewared.utils.directoryservices.health import (
LDAPHealthCheckFailReason,
LDAPHealthError
)
from middlewared.service_exception import CallError
class LDAPHealthMixin:
def _recover_ldap_config(self) -> list[dict]:
return self.middleware.call_sync('etc.generate', 'ldap')
def _recover_ldap(self, error: LDAPHealthError) -> None:
"""
Attempt to recover from an ADHealthError that was raised during
our health check.
"""
match error.reason:
case LDAPHealthCheckFailReason.LDAP_BIND_FAILED | LDAPHealthCheckFailReason.SSSD_STOPPED:
self._recover_ldap_config()
case _:
# not recoverable
raise error from None
self.middleware.call_sync('service.stop', 'sssd')
self.middleware.call_sync('service.start', 'sssd', {'silent': False})
def _health_check_ldap(self) -> None:
"""
Perform basic health checks for IPA connection.
This method is called periodically from our alert framework.
"""
ldap_config = self.middleware.call_sync('ldap.config')
# There is a small chance we have an oddball generic LDAP + KRB5
# domain and will need to perform LDAP health checks.
if ldap_config['kerberos_realm']:
self._health_check_krb5()
# Verify that our stored credentials are sufficient to authenticate
# to LDAP server via python-ldap
try:
self.middleware.call_sync('ldap.get_root_DSE')
except Exception as e:
self._faulted_reason = str(e)
raise LDAPHealthError(
LDAPHealthCheckFailReason.LDAP_BIND_FAILED,
self._faulted_reason
)
# Finally check that sssd is running, and if it's not, try non-silent
# start so that we can dump the reason it's failing to start into an alert.
#
# We don't want to move the sssd restart into the alert itself because
# we need to populate the error reason into `_faulted_reason` so that
# it appears in our directory services summary
if not self.middleware.call_sync('service.started', 'sssd'):
try:
self.middleware.call_sync('service.start', 'sssd', {'silent': False})
except CallError as e:
self._faulted_reason = str(e)
raise LDAPHealthError(
LDAPHealthCheckFailReason.SSSD_STOPPED,
self._faulted_reason
)
| 2,567 | Python | .py | 56 | 35.232143 | 101 | 0.628549 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,566 | restore.py | truenas_middleware/src/middlewared/middlewared/plugins/cloud_backup/restore.py | from middlewared.async_validators import check_path_resides_within_volume
from middlewared.plugins.cloud_backup.restic import get_restic_config, run_restic
from middlewared.schema import accepts, Dict, Int, List, Str
from middlewared.service import job, Service, ValidationErrors
from middlewared.validators import NotMatch
class CloudBackupService(Service):
class Config:
cli_namespace = "task.cloud_backup"
namespace = "cloud_backup"
@accepts(
Int("id"),
Str("snapshot_id", validators=[NotMatch(r"^-")]),
Str("subfolder"),
Str("destination_path"),
Dict(
"options",
List("exclude", items=[Str("item")]),
List("include", items=[Str("item")]),
),
)
@job(logs=True)
async def restore(self, job, id_, snapshot_id, subfolder, destination_path, options):
"""
Restore files to the directory `destination_path` from the `snapshot_id` subfolder `subfolder`
created by the cloud backup job `id`.
"""
await self.middleware.call("network.general.will_perform_activity", "cloud_backup")
verrors = ValidationErrors()
await check_path_resides_within_volume(verrors, self.middleware, "destination_path", destination_path)
verrors.check()
cloud_backup = await self.middleware.call("cloud_backup.get_instance", id_)
restic_config = get_restic_config(cloud_backup)
cmd = ["restore", f"{snapshot_id}:{subfolder}", "--target", destination_path]
cmd += sum([["--exclude", exclude] for exclude in options["exclude"]], [])
cmd += sum([["--include", include] for include in options["include"]], [])
await run_restic(
job,
restic_config.cmd + cmd,
restic_config.env,
)
| 1,831 | Python | .py | 40 | 37.6 | 110 | 0.644944 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,567 | crud.py | truenas_middleware/src/middlewared/middlewared/plugins/cloud_backup/crud.py | from middlewared.alert.base import Alert, AlertCategory, AlertClass, AlertLevel, OneShotAlertClass
from middlewared.common.attachment import LockableFSAttachmentDelegate
from middlewared.plugins.cloud.crud import CloudTaskServiceMixin
from middlewared.plugins.cloud.model import CloudTaskModelMixin, cloud_task_schema
from middlewared.schema import accepts, Bool, Cron, Dict, Int, Password, Patch
from middlewared.service import pass_app, private, TaskPathService, ValidationErrors
import middlewared.sqlalchemy as sa
from middlewared.utils.path import FSLocation
from middlewared.utils.service.task_state import TaskStateMixin
from middlewared.validators import Range
from .init import IncorrectPassword
class CloudBackupModel(CloudTaskModelMixin, sa.Model):
__tablename__ = "tasks_cloud_backup"
password = sa.Column(sa.EncryptedText())
keep_last = sa.Column(sa.Integer())
class CloudBackupService(TaskPathService, CloudTaskServiceMixin, TaskStateMixin):
allow_zvol = True
share_task_type = "CloudBackup"
allowed_path_types = [FSLocation.CLUSTER, FSLocation.LOCAL]
task_state_methods = ["cloud_backup.sync", "cloud_backup.restore"]
class Config:
datastore = "tasks.cloud_backup"
datastore_extend = "cloud_backup.extend"
datastore_extend_context = "cloud_backup.extend_context"
cli_namespace = "task.cloud_backup"
namespace = "cloud_backup"
role_prefix = "CLOUD_BACKUP"
ENTRY = Patch(
'cloud_backup_create',
'cloud_backup_entry',
('add', Int('id')),
("replace", Dict("credentials", additional_attrs=True, private_keys=["attributes"])),
("add", Dict("job", additional_attrs=True, null=True)),
("add", Bool("locked")),
)
@private
async def extend_context(self, rows, extra):
return {
"task_state": await self.get_task_state_context(),
}
@private
async def extend(self, cloud_backup, context):
cloud_backup["credentials"] = cloud_backup.pop("credential")
if job := await self.get_task_state_job(context["task_state"], cloud_backup["id"]):
cloud_backup["job"] = job
Cron.convert_db_format_to_schedule(cloud_backup)
return cloud_backup
@private
async def _compress(self, cloud_backup):
cloud_backup["credential"] = cloud_backup.pop("credentials")
Cron.convert_schedule_to_db_format(cloud_backup)
cloud_backup.pop("job", None)
cloud_backup.pop(self.locked_field, None)
return cloud_backup
@accepts(Dict(
"cloud_backup_create",
*cloud_task_schema,
Password("password", required=True, empty=False),
Int("keep_last", required=True, validators=[Range(min_=1)]),
register=True,
))
@pass_app(rest=True)
async def do_create(self, app, cloud_backup):
"""
"""
verrors = ValidationErrors()
await self._validate(app, verrors, "cloud_backup_create", cloud_backup)
verrors.check()
cloud_backup = await self._compress(cloud_backup)
cloud_backup["id"] = await self.middleware.call("datastore.insert", "tasks.cloud_backup",
{**cloud_backup, "job": None})
await self.middleware.call("service.restart", "cron")
return await self.get_instance(cloud_backup["id"])
@accepts(Int("id"), Patch("cloud_backup_create", "cloud_backup_update", ("attr", {"update": True})))
@pass_app(rest=True)
async def do_update(self, app, id_, data):
"""
Updates the cloud backup entry `id` with `data`.
"""
cloud_backup = await self.get_instance(id_)
# credentials is a foreign key for now
if cloud_backup["credentials"]:
cloud_backup["credentials"] = cloud_backup["credentials"]["id"]
cloud_backup.update(data)
verrors = ValidationErrors()
await self._validate(app, verrors, "cloud_backup_update", cloud_backup)
verrors.check()
cloud_backup = await self._compress(cloud_backup)
await self.middleware.call("datastore.update", "tasks.cloud_backup", id_, cloud_backup)
await self.middleware.call("service.restart", "cron")
return await self.get_instance(id_)
@accepts(Int("id"))
async def do_delete(self, id_):
"""
Deletes cloud backup entry `id`.
"""
await self.middleware.call("cloud_backup.abort", id_)
await self.middleware.call("alert.oneshot_delete", "CloudBackupTaskFailed", id_)
rv = await self.middleware.call("datastore.delete", "tasks.cloud_backup", id_)
await self.middleware.call("service.restart", "cron")
return rv
@private
async def _validate(self, app, verrors, name, data):
await super()._validate(app, verrors, name, data)
if not verrors:
try:
await self.middleware.call("cloud_backup.ensure_initialized", data)
except IncorrectPassword as e:
verrors.add(f"{name}.password", e.errmsg)
class CloudBackupTaskFailedAlertClass(AlertClass, OneShotAlertClass):
category = AlertCategory.TASKS
level = AlertLevel.ERROR
title = "Cloud Backup Task Failed"
text = "Cloud backup task \"%(name)s\" failed."
async def create(self, args):
return Alert(CloudBackupTaskFailedAlertClass, args, key=args["id"])
async def delete(self, alerts, query):
return list(filter(
lambda alert: alert.key != str(query),
alerts
))
class CloudBackupFSAttachmentDelegate(LockableFSAttachmentDelegate):
name = "cloud_backup"
title = "Cloud Backup Task"
service_class = CloudBackupService
resource_name = "path"
async def restart_reload_services(self, attachments):
await self.middleware.call("service.restart", "cron")
async def setup(middleware):
await middleware.call("pool.dataset.register_attachment_delegate", CloudBackupFSAttachmentDelegate(middleware))
await middleware.call("network.general.register_activity", "cloud_backup", "Cloud backup")
await middleware.call("cloud_backup.persist_task_state_on_job_complete")
| 6,256 | Python | .py | 132 | 39.606061 | 115 | 0.675378 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,568 | sync.py | truenas_middleware/src/middlewared/middlewared/plugins/cloud_backup/sync.py | import asyncio
import itertools
from middlewared.plugins.cloud.path import check_local_path
from middlewared.plugins.cloud_backup.restic import get_restic_config, run_restic
from middlewared.plugins.zfs_.utils import zvol_name_to_path, zvol_path_to_name
from middlewared.schema import accepts, Bool, Dict, Int
from middlewared.service import CallError, Service, item_method, job, private
from middlewared.utils.time_utils import utc_now
async def restic(middleware, job, cloud_backup, dry_run):
await middleware.call("network.general.will_perform_activity", "cloud_backup")
snapshot = None
clone = None
stdin = None
cmd = None
try:
local_path = cloud_backup["path"]
if local_path.startswith("/dev/zvol"):
await middleware.call("cloud_backup.validate_zvol", local_path)
name = f"cloud_backup-{cloud_backup.get('id', 'onetime')}-{utc_now().strftime('%Y%m%d%H%M%S')}"
snapshot = (await middleware.call("zfs.snapshot.create", {
"dataset": zvol_path_to_name(local_path),
"name": name,
"suspend_vms": True,
"vmware_sync": True,
}))["name"]
clone = zvol_path_to_name(local_path) + f"-{name}"
try:
await middleware.call("zfs.snapshot.clone", {
"snapshot": snapshot,
"dataset_dst": clone,
})
except Exception:
clone = None
raise
# zvol device might take a while to appear
for i in itertools.count():
try:
stdin = await middleware.run_in_thread(open, zvol_name_to_path(clone), "rb")
except FileNotFoundError:
if i >= 5:
raise
await asyncio.sleep(1)
else:
break
cmd = ["--stdin", "--stdin-filename", "volume"]
else:
await check_local_path(middleware, local_path)
if cmd is None:
cmd = [local_path]
if dry_run:
cmd.append("-n")
restic_config = get_restic_config(cloud_backup)
cmd = restic_config.cmd + ["--verbose", "backup"] + cmd
await run_restic(job, cmd, restic_config.env, stdin)
finally:
if stdin:
try:
stdin.close()
except Exception as e:
middleware.logger.warning(f"Error closing snapshot device: {e!r}")
if clone is not None:
try:
await middleware.call("zfs.dataset.delete", clone)
except Exception as e:
middleware.logger.warning(f"Error deleting cloned dataset {clone}: {e!r}")
if snapshot is not None:
try:
await middleware.call("zfs.snapshot.delete", snapshot)
except Exception as e:
middleware.logger.warning(f"Error deleting snapshot {snapshot}: {e!r}")
class CloudBackupService(Service):
class Config:
cli_namespace = "task.cloud_backup"
namespace = "cloud_backup"
@item_method
@accepts(
Int("id"),
Dict(
"cloud_backup_sync_options",
Bool("dry_run", default=False),
register=True,
)
)
@job(lock=lambda args: "cloud_backup:{}".format(args[-1]), lock_queue_size=1, logs=True, abortable=True)
async def sync(self, job, id_, options):
"""
Run the cloud backup job `id`.
"""
cloud_backup = await self.middleware.call("cloud_backup.get_instance", id_)
if cloud_backup["locked"]:
await self.middleware.call("cloud_backup.generate_locked_alert", id_)
raise CallError("Dataset is locked")
await self._sync(cloud_backup, options, job)
async def _sync(self, cloud_backup, options, job):
job.set_progress(0, "Starting")
try:
await self.middleware.call("cloud_backup.ensure_initialized", cloud_backup)
await restic(self.middleware, job, cloud_backup, options["dry_run"])
job.set_progress(100, "Cleaning up")
restic_config = get_restic_config(cloud_backup)
await run_restic(
job,
restic_config.cmd + ["forget", "--keep-last", str(cloud_backup["keep_last"])],
restic_config.env,
)
if "id" in cloud_backup:
await self.middleware.call("alert.oneshot_delete", "CloudBackupTaskFailed", cloud_backup["id"])
except Exception:
if "id" in cloud_backup:
await self.middleware.call("alert.oneshot_create", "CloudBackupTaskFailed", {
"id": cloud_backup["id"],
"name": cloud_backup["description"],
})
raise
@item_method
@accepts(Int("id"))
async def abort(self, id_):
"""
Aborts cloud backup task.
"""
cloud_backup = await self.middleware.call("cloud_backup.get_instance", id_)
if cloud_backup["job"] is None:
return False
if cloud_backup["job"]["state"] not in ["WAITING", "RUNNING"]:
return False
await self.middleware.call("core.job_abort", cloud_backup["job"]["id"])
return True
@private
async def validate_zvol(self, path):
dataset = zvol_path_to_name(path)
if not (
await self.middleware.call("vm.query_snapshot_begin", dataset, False) or
await self.middleware.call("vmware.dataset_has_vms", dataset, False)
):
raise CallError("Backed up zvol must be used by a local or VMware VM")
| 5,768 | Python | .py | 135 | 31.333333 | 111 | 0.574795 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,569 | restic.py | truenas_middleware/src/middlewared/middlewared/plugins/cloud_backup/restic.py | import asyncio
from dataclasses import dataclass
import subprocess
from middlewared.plugins.cloud.path import get_remote_path
from middlewared.plugins.cloud.remotes import REMOTES
from middlewared.service import CallError
from middlewared.utils import Popen
@dataclass
class ResticConfig:
cmd: list[str]
env: dict[str, str]
def get_restic_config(cloud_backup):
remote = REMOTES[cloud_backup["credentials"]["provider"]]
remote_path = get_remote_path(remote, cloud_backup["attributes"])
url, env = remote.get_restic_config(cloud_backup)
cmd = ["restic", "--no-cache", "-r", f"{remote.rclone_type}:{url}/{remote_path}"]
env["RESTIC_PASSWORD"] = cloud_backup["password"]
return ResticConfig(cmd, env)
async def run_restic(job, cmd, env, stdin=None):
job.middleware.logger.debug("Running %r", cmd)
proc = await Popen(
cmd,
env=env,
stdin=stdin,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
check_progress = asyncio.ensure_future(restic_check_progress(job, proc))
cancelled_error = None
try:
try:
await proc.wait()
except asyncio.CancelledError as e:
cancelled_error = e
try:
await job.middleware.call("service.terminate_process", proc.pid)
except CallError as e:
job.middleware.logger.warning(f"Error terminating restic on cloud backup abort: {e!r}")
finally:
await asyncio.wait_for(check_progress, None)
if cancelled_error is not None:
raise cancelled_error
if proc.returncode != 0:
message = "".join(job.internal_data.get("messages", []))
if message and proc.returncode != 1:
if not message.endswith("\n"):
message += "\n"
message += f"restic failed with exit code {proc.returncode}"
raise CallError(message)
async def restic_check_progress(job, proc):
while True:
read = (await proc.stdout.readline()).decode("utf-8", "ignore")
if read == "":
break
await job.logs_fd_write(read.encode("utf-8", "ignore"))
job.internal_data.setdefault("messages", [])
job.internal_data["messages"] = job.internal_data["messages"][-4:] + [read]
| 2,288 | Python | .py | 57 | 32.877193 | 103 | 0.655375 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,570 | init.py | truenas_middleware/src/middlewared/middlewared/plugins/cloud_backup/init.py | import subprocess
from middlewared.plugins.cloud_backup.restic import get_restic_config
from middlewared.service import CallError, Service, private
class IncorrectPassword(CallError):
pass
class CloudBackupService(Service):
class Config:
cli_namespace = "task.cloud_backup"
namespace = "cloud_backup"
@private
def ensure_initialized(self, cloud_backup):
self.middleware.call_sync("network.general.will_perform_activity", "cloud_backup")
if isinstance(cloud_backup["credentials"], int):
cloud_backup = {
**cloud_backup,
"credentials": self.middleware.call_sync("cloudsync.credentials.get_instance",
cloud_backup["credentials"]),
}
if self.is_initialized(cloud_backup):
return
self.init(cloud_backup)
@private
def is_initialized(self, cloud_backup):
self.middleware.call_sync("network.general.will_perform_activity", "cloud_backup")
restic_config = get_restic_config(cloud_backup)
try:
subprocess.run(
restic_config.cmd + ["cat", "config"],
env=restic_config.env,
capture_output=True,
text=True,
check=True,
)
return True
except subprocess.CalledProcessError as e:
text = e.stderr.strip()
if "Is there a repository at the following location?" in text:
return False
if "wrong password or no key found" in text:
raise IncorrectPassword(text)
raise CallError(text)
@private
def init(self, cloud_backup):
self.middleware.call_sync("network.general.will_perform_activity", "cloud_backup")
attrs = cloud_backup["attributes"]
cred = cloud_backup["credentials"]["id"]
if "bucket" in attrs:
existing_buckets = [b["Name"] for b in self.middleware.call_sync("cloudsync.list_buckets", cred)]
if attrs["bucket"] not in existing_buckets:
self.middleware.call_sync("cloudsync.create_bucket", cred, attrs["bucket"])
restic_config = get_restic_config(cloud_backup)
try:
subprocess.run(
restic_config.cmd + ["init"],
env=restic_config.env,
capture_output=True,
text=True,
check=True,
)
except subprocess.CalledProcessError as e:
raise CallError(e.stderr)
| 2,600 | Python | .py | 61 | 30.721311 | 109 | 0.595871 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,571 | snapshot.py | truenas_middleware/src/middlewared/middlewared/plugins/cloud_backup/snapshot.py | from datetime import datetime
import json
import subprocess
from middlewared.plugins.cloud_backup.restic import get_restic_config
from middlewared.schema import accepts, Datetime, Dict, Int, List, returns, Str
from middlewared.service import CallError, job, Service
from middlewared.validators import NotMatch
class CloudBackupService(Service):
class Config:
cli_namespace = "task.cloud_backup"
namespace = "cloud_backup"
@accepts(Int("id"))
@returns(
List("cloud_backup_snapshots", items=[
Dict(
"cloud_backup_snapshot",
Str("id"),
Str("hostname"),
Datetime("time"),
List("paths", items=[Str("path")]),
additional_attrs=True,
),
]),
)
def list_snapshots(self, id_):
"""
List existing snapshots for the cloud backup job `id`.
"""
self.middleware.call_sync("network.general.will_perform_activity", "cloud_backup")
cloud_backup = self.middleware.call_sync("cloud_backup.get_instance", id_)
restic_config = get_restic_config(cloud_backup)
try:
snapshots = json.loads(subprocess.run(
restic_config.cmd + ["--json", "snapshots"],
env=restic_config.env,
capture_output=True,
text=True,
check=True,
).stdout)
except subprocess.CalledProcessError as e:
raise CallError(e.stderr)
for snapshot in snapshots:
snapshot["time"] = datetime.fromisoformat(snapshot["time"])
return snapshots
@accepts(Int("id"), Str("snapshot_id", validators=[NotMatch(r"^-")]), Str("path", validators=[NotMatch(r"^-")]))
@returns(
List("cloud_backup_snapshot_items", items=[
Dict(
"cloud_backup_snapshot_item",
Str("name"),
Str("path"),
Str("type", enum=["dir", "file"]),
Int("size"),
Datetime("mtime"),
additional_attrs=True,
),
]),
)
def list_snapshot_directory(self, id_, snapshot_id, path):
"""
List files in the directory `path` of the `snapshot_id` created by the cloud backup job `id`.
"""
self.middleware.call_sync("network.general.will_perform_activity", "cloud_backup")
cloud_backup = self.middleware.call_sync("cloud_backup.get_instance", id_)
restic_config = get_restic_config(cloud_backup)
try:
items = list(map(json.loads, subprocess.run(
restic_config.cmd + ["--json", "ls", snapshot_id, path],
env=restic_config.env,
capture_output=True,
text=True,
check=True,
).stdout.splitlines()))
except subprocess.CalledProcessError as e:
raise CallError(e.stderr)
contents = []
for item in items[1:]:
if item["struct_type"] != "node":
continue
for k in ["atime", "ctime", "mtime"]:
item[k] = datetime.fromisoformat(item[k])
contents.append(item)
return contents
@accepts(Int("id"), Str("snapshot_id", validators=[NotMatch(r"^-")]))
@returns()
@job(lock=lambda args: "cloud_backup:{}".format(args[-1]), lock_queue_size=1)
def delete_snapshot(self, job, id_, snapshot_id):
"""
Delete snapshot `snapshot_id` created by the cloud backup job `id`.
"""
self.middleware.call_sync("network.general.will_perform_activity", "cloud_backup")
cloud_backup = self.middleware.call_sync("cloud_backup.get_instance", id_)
restic_config = get_restic_config(cloud_backup)
try:
subprocess.run(
restic_config.cmd + ["forget", snapshot_id, "--prune"],
env=restic_config.env,
capture_output=True,
text=True,
check=True,
)
except subprocess.CalledProcessError as e:
raise CallError(e.stderr)
| 4,179 | Python | .py | 103 | 29.495146 | 116 | 0.568574 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,572 | sysctl_info.py | truenas_middleware/src/middlewared/middlewared/plugins/sysctl/sysctl_info.py | import os
from middlewared.service import CallError, Service
from middlewared.utils import run, MIDDLEWARE_RUN_DIR
ZFS_MODULE_PARAMS_PATH = '/sys/module/zfs/parameters'
DEFAULT_ARC_MAX_FILE = f'{MIDDLEWARE_RUN_DIR}/default_arc_max'
class SysctlService(Service):
class Config:
private = True
async def get_value(self, sysctl_name):
cp = await run(['sysctl', sysctl_name], check=False)
if cp.returncode:
raise CallError(f'Unable to retrieve value of "{sysctl_name}" sysctl : {cp.stderr.decode()}')
return cp.stdout.decode().split('=')[-1].strip()
def store_default_arc_max(self):
"""This method should be called _BEFORE_ we initialize any VMs
so that we can capture what the ARC max value was before we start
changing the various ARC sysctls based on VM memory configurations"""
val = self.get_arcstats()['c_max']
try:
with open(DEFAULT_ARC_MAX_FILE, 'x') as f:
f.write(str(val))
f.flush()
except FileExistsError:
return self.get_default_arc_max()
else:
return val
def get_default_arc_max(self):
try:
with open(DEFAULT_ARC_MAX_FILE) as f:
return int(f.read())
except FileNotFoundError:
return self.store_default_arc_max()
def get_arc_max(self):
return self.get_arcstats()['c_max']
def get_arc_min(self):
return self.get_arcstats()['c_min']
async def get_pagesize(self):
cp = await run(['getconf', 'PAGESIZE'], check=False)
if cp.returncode:
raise CallError(f'Unable to retrieve pagesize value: {cp.stderr.decode()}')
return int(cp.stdout.decode().strip())
def get_arcstats(self):
stats = {}
with open('/proc/spl/kstat/zfs/arcstats') as f:
for lineno, line in enumerate(f, start=1):
if lineno > 2: # skip first 2 lines
try:
key, _, value = line.strip().split()
key, value = key.strip(), value.strip()
except ValueError:
continue
else:
stats[key] = int(value) if value.isdigit() else value
return stats
def get_arcstats_size(self):
return self.get_arcstats()['size']
async def set_value(self, key, value):
await run(['sysctl', f'{key}={value}'])
def write_to_file(self, path, value):
with open(path, 'w') as f:
f.write(str(value))
def set_arc_max(self, value):
return self.write_to_file(os.path.join(ZFS_MODULE_PARAMS_PATH, 'zfs_arc_max'), value)
def set_zvol_volmode(self, value):
return self.write_to_file(os.path.join(ZFS_MODULE_PARAMS_PATH, 'zvol_volmode'), value)
| 2,872 | Python | .py | 65 | 33.861538 | 105 | 0.59412 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,573 | sync.py | truenas_middleware/src/middlewared/middlewared/plugins/catalog/sync.py | from middlewared.schema import accepts
from middlewared.service import job, private, returns, Service
from .git_utils import pull_clone_repository
from .utils import OFFICIAL_LABEL, OFFICIAL_CATALOG_REPO, OFFICIAL_CATALOG_BRANCH
class CatalogService(Service):
SYNCED = False
@private
async def synced(self):
return self.SYNCED
@accepts(roles=['CATALOG_WRITE'])
@returns()
@job(lock='official_catalog_sync')
async def sync(self, job):
"""
Sync truenas catalog to retrieve latest changes from upstream.
"""
try:
catalog = await self.middleware.call('catalog.config')
job.set_progress(5, 'Updating catalog repository')
await self.middleware.call(
'catalog.update_git_repository', catalog['location'], OFFICIAL_CATALOG_REPO, OFFICIAL_CATALOG_BRANCH
)
job.set_progress(15, 'Reading catalog information')
# Update feature map cache whenever official catalog is updated
await self.middleware.call('catalog.get_feature_map', False)
await self.middleware.call('catalog.retrieve_recommended_apps', False)
await self.middleware.call('catalog.apps', {
'cache': False,
'cache_only': False,
'retrieve_all_trains': True,
'trains': [],
})
except Exception as e:
await self.middleware.call(
'alert.oneshot_create', 'CatalogSyncFailed', {'catalog': OFFICIAL_LABEL, 'error': str(e)}
)
raise
else:
await self.middleware.call('alert.oneshot_delete', 'CatalogSyncFailed', OFFICIAL_LABEL)
job.set_progress(100, f'Synced {OFFICIAL_LABEL!r} catalog')
self.SYNCED = True
self.middleware.create_task(self.middleware.call('app.check_upgrade_alerts'))
@private
def update_git_repository(self, location, repository, branch):
self.middleware.call_sync('network.general.will_perform_activity', 'catalog')
return pull_clone_repository(repository, location, branch)
| 2,149 | Python | .py | 46 | 36.652174 | 116 | 0.643744 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,574 | features.py | truenas_middleware/src/middlewared/middlewared/plugins/catalog/features.py | import errno
import json
import os
from apps_schema.features import FEATURES
from middlewared.service import CallError, private, Service
from .apps_util import min_max_scale_version_check_update_impl
class CatalogService(Service):
@private
def get_feature_map(self, cache=True):
if cache and self.middleware.call_sync('cache.has_key', 'catalog_feature_map'):
return self.middleware.call_sync('cache.get', 'catalog_feature_map')
catalog = self.middleware.call_sync('catalog.config')
path = os.path.join(catalog['location'], 'features_capability.json')
if not os.path.exists(path):
raise CallError('Unable to retrieve feature capability mapping for SCALE versions', errno=errno.ENOENT)
with open(path, 'r') as f:
mapping = json.loads(f.read())
self.middleware.call_sync('cache.put', 'catalog_feature_map', mapping, 86400)
return mapping
@private
async def version_supported_error_check(self, version_details):
if version_details['supported']:
return
if not version_details['healthy']:
raise CallError(version_details['healthy_error'])
# There will be 2 scenarios now because of which a version might not be supported
# 1) Missing features
# 2) Minimum/maximum scale version check specified
error_str = ''
missing_features = set(version_details['required_features']) - set(FEATURES)
if missing_features:
error_str = await self.missing_feature_error_message(missing_features)
if err := min_max_scale_version_check_update_impl(version_details, False):
prefix = '\n\n' if error_str else ''
error_str = f'{error_str}{prefix}{" Also" if error_str else ""}{err}'
raise CallError(error_str)
@private
async def missing_feature_error_message(self, missing_features):
try:
mapping = await self.middleware.call('catalog.get_feature_map')
except Exception as e:
self.logger.error('Unable to retrieve feature mapping for SCALE versions: %s', e)
mapping = {}
error_str = 'Catalog app version is not supported due to following missing features:\n'
for index, feature in enumerate(missing_features):
train_message = ''
for k, v in mapping.get(feature, {}).items():
train_message += f'\nFor {k.capitalize()!r} train:\nMinimum SCALE version: {v["min"]}\n'
if v.get('max'):
train_message += f'Maximum SCALE version: {v["max"]}'
else:
train_message += f'Maximum SCALE version: Latest available {k.capitalize()!r} release'
error_str += f'{index + 1}) {feature}{f"{train_message}" if train_message else ""}\n\n'
return error_str
| 2,880 | Python | .py | 54 | 43.518519 | 115 | 0.645403 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,575 | apps.py | truenas_middleware/src/middlewared/middlewared/plugins/catalog/apps.py | from middlewared.schema import accepts, Bool, Datetime, Dict, List, Ref, returns, Str
from middlewared.service import filterable, filterable_returns, Service
from middlewared.utils import filter_list
class AppService(Service):
class Config:
cli_namespace = 'app'
@filterable(roles=['CATALOG_READ'])
@filterable_returns(Ref('available_apps'))
async def latest(self, filters, options):
"""
Retrieve latest updated apps.
"""
return filter_list(
await self.middleware.call(
'app.available', [
['last_update', '!=', None], ['name', '!=', 'ix-app'],
], {'order_by': ['-last_update']}
), filters, options
)
@filterable(roles=['CATALOG_READ'])
@filterable_returns(Dict(
'available_apps',
Bool('healthy', required=True),
Bool('installed', required=True),
Bool('recommended', required=True),
Datetime('last_update', required=True),
List('capabilities', required=True),
List('run_as_context', required=True),
List('categories', required=True),
List('maintainers', required=True),
List('tags', required=True),
List('screenshots', required=True, items=[Str('screenshot')]),
List('sources', required=True, items=[Str('source')]),
Str('name', required=True),
Str('title', required=True),
Str('description', required=True),
Str('app_readme', required=True),
Str('location', required=True),
Str('healthy_error', required=True, null=True),
Str('home', required=True),
Str('latest_version', required=True),
Str('latest_app_version', required=True),
Str('latest_human_version', required=True),
Str('icon_url', null=True, required=True),
Str('train', required=True),
Str('catalog', required=True),
register=True,
# We do this because if we change anything in catalog.json, even older releases will
# get this new field and different roles will start breaking due to this
additional_attrs=True,
))
def available(self, filters, options):
"""
Retrieve all available applications from all configured catalogs.
"""
if not self.middleware.call_sync('catalog.synced'):
self.middleware.call_sync('catalog.sync').wait_sync()
results = []
installed_apps = [
(app['metadata']['name'], app['metadata']['train'])
for app in self.middleware.call_sync('app.query')
]
catalog = self.middleware.call_sync('catalog.config')
for train, train_data in self.middleware.call_sync('catalog.apps').items():
if train not in catalog['preferred_trains']:
continue
for app_data in train_data.values():
results.append({
'catalog': catalog['label'],
'installed': (app_data['name'], train) in installed_apps,
'train': train,
**app_data,
})
return filter_list(results, filters, options)
@accepts(roles=['CATALOG_READ'])
@returns(List(items=[Str('category')]))
async def categories(self):
"""
Retrieve list of valid categories which have associated applications.
"""
return sorted(list(await self.middleware.call('catalog.retrieve_mapped_categories')))
@accepts(Str('app_name'), Str('train'), roles=['CATALOG_READ'])
@returns(List(items=[Ref('available_apps')]))
def similar(self, app_name, train):
"""
Retrieve applications which are similar to `app_name`.
"""
available_apps = self.available()
app = filter_list(available_apps, [['name', '=', app_name], ['train', '=', train]], {'get': True})
similar_apps = {}
# Calculate the number of common categories/tags between app and other apps
app_categories = set(app['categories'])
app_tags = set(app['tags'])
app_similarity = {}
for to_check_app in available_apps:
if all(to_check_app[k] == app[k] for k in ('name', 'catalog', 'train')):
continue
common_categories = set(to_check_app['categories']).intersection(app_categories)
common_tags = set(to_check_app['tags']).intersection(app_tags)
similarity_score = len(common_categories) + len(common_tags)
if similarity_score:
app_similarity[to_check_app['name']] = similarity_score
similar_apps[to_check_app['name']] = to_check_app
# Sort apps based on the similarity score in descending order
sorted_apps = sorted(app_similarity.keys(), key=lambda x: app_similarity[x], reverse=True)
return [similar_apps[app] for app in sorted_apps]
| 4,923 | Python | .py | 106 | 36.5 | 106 | 0.60175 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,576 | apps_details.py | truenas_middleware/src/middlewared/middlewared/plugins/catalog/apps_details.py | import contextlib
import json
import os
from apps_ci.names import CACHED_CATALOG_FILE_NAME
from apps_validation.json_schema_utils import CATALOG_JSON_SCHEMA
from catalog_reader.app_utils import get_app_details_base
from catalog_reader.catalog import retrieve_train_names
from catalog_reader.train_utils import get_train_path
from catalog_reader.recommended_apps import retrieve_recommended_apps
from datetime import datetime
from jsonschema import validate as json_schema_validate, ValidationError as JsonValidationError
from middlewared.schema import accepts, Bool, Dict, List, returns, Str
from middlewared.service import private, Service
from .apps_util import get_app_version_details
from .utils import get_cache_key, OFFICIAL_LABEL
class CatalogService(Service):
class Config:
cli_namespace = 'app.catalog'
CATEGORIES_SET = set()
@private
def train_to_apps_version_mapping(self):
mapping = {}
for train, train_data in self.apps({
'cache': True,
'cache_only': True,
}).items():
mapping[train] = {}
for app_data in train_data.values():
mapping[train][app_data['name']] = {
'version': app_data['latest_version'],
'app_version': app_data['latest_app_version'],
}
return mapping
@private
def cached(self, label):
return self.middleware.call_sync('cache.has_key', get_cache_key(label))
@accepts(
Dict(
'options',
Bool('cache', default=True),
Bool('cache_only', default=False),
Bool('retrieve_all_trains', default=True),
List('trains', items=[Str('train_name')]),
),
roles=['CATALOG_READ']
)
@returns(Dict(
'trains',
additional_attrs=True,
example={
'stable': {
'plex': {
'app_readme': '<h1>Plex</h1>',
'categories': ['media'],
'description': 'Plex is a media server that allows you to stream your media to any Plex client.',
'healthy': True,
'healthy_error': None,
'home': 'https://plex.tv',
'location': '/mnt/.ix-apps/truenas_catalog/stable/plex',
'latest_version': '1.0.0',
'latest_app_version': '1.40.2.8395',
'latest_human_version': '1.40.2.8395_1.0.0',
'last_update': '2024-07-30 13:40:47+00:00',
'name': 'plex',
'recommended': False,
'title': 'Plex',
'maintainers': [
{'email': 'dev@ixsystems.com', 'name': 'truenas', 'url': 'https://www.truenas.com/'},
],
'tags': ['plex', 'media', 'entertainment', 'movies', 'series', 'tv', 'streaming'],
'screenshots': ['https://media.sys.truenas.net/apps/plex/screenshots/screenshot2.png'],
'sources': ['https://plex.tv', 'https://hub.docker.com/r/plexinc/pms-docker'],
'icon_url': 'https://media.sys.truenas.net/apps/plex/icons/icon.png'
},
},
}
))
def apps(self, options):
"""
Retrieve apps details for `label` catalog.
`options.cache` is a boolean which when set will try to get apps details for `label` catalog from cache
if available.
`options.cache_only` is a boolean which when set will force usage of cache only for retrieving catalog
information. If the content for the catalog in question is not cached, no content would be returned. If
`options.cache` is unset, this attribute has no effect.
`options.retrieve_all_trains` is a boolean value which when set will retrieve information for all the trains
present in the catalog ( it is set by default ).
`options.trains` is a list of train name(s) which will allow selective filtering to retrieve only information
of desired trains in a catalog. If `options.retrieve_all_trains` is set, it has precedence over `options.train`.
"""
catalog = self.middleware.call_sync('catalog.config')
all_trains = options['retrieve_all_trains']
cache_available = False
if options['cache']:
cache_key = get_cache_key(catalog['label'])
try:
orig_cached_data = self.middleware.call_sync('cache.get', cache_key)
except KeyError:
orig_cached_data = None
cache_available = orig_cached_data is not None
if options['cache'] and options['cache_only'] and not cache_available:
return {}
if options['cache'] and cache_available:
cached_data = {}
for train in orig_cached_data:
if not all_trains and train not in options['trains']:
continue
train_data = {}
for catalog_app in orig_cached_data[train]:
train_data[catalog_app] = {k: v for k, v in orig_cached_data[train][catalog_app].items()}
cached_data[train] = train_data
return cached_data
elif not os.path.exists(catalog['location']):
return {}
if all_trains:
# We can only safely say that the catalog is healthy if we retrieve data for all trains
self.middleware.call_sync('alert.oneshot_delete', 'CatalogNotHealthy', catalog['label'])
trains = self.get_trains(catalog, options)
if all_trains:
# We will only update cache if we are retrieving data of all trains for a catalog
# which happens when we sync catalog(s) periodically or manually
# We cache for 90000 seconds giving system an extra 1 hour to refresh it's cache which
# happens after 24h - which means that for a small amount of time it's possible that user
# come with a case where system is trying to access cached data but it has expired and it's
# reading again from disk hence the extra 1 hour.
self.middleware.call_sync('cache.put', get_cache_key(catalog['label']), trains, 90000)
return trains
@private
def get_trains(self, catalog, options):
if os.path.exists(os.path.join(catalog['location'], CACHED_CATALOG_FILE_NAME)):
# If the data is malformed or something similar, let's read the data then from filesystem
try:
return self.retrieve_trains_data_from_json(catalog, options)
except (json.JSONDecodeError, JsonValidationError):
self.logger.error('Invalid catalog json file specified for %r catalog', catalog['id'])
return {}
@private
def retrieve_trains_data_from_json(self, catalog, options):
trains_to_traverse = retrieve_train_names(
get_train_path(catalog['location']), options['retrieve_all_trains'], options['trains']
)
with open(os.path.join(catalog['location'], CACHED_CATALOG_FILE_NAME), 'r') as f:
catalog_data = json.loads(f.read())
json_schema_validate(catalog_data, CATALOG_JSON_SCHEMA)
data = {k: v for k, v in catalog_data.items() if k in trains_to_traverse}
recommended_apps = self.retrieve_recommended_apps(False) if catalog['label'] == OFFICIAL_LABEL else {}
unhealthy_apps = set()
for train in data:
for app in data[train]:
# We normalize keys here, why this needs to be done is that specifying some keys which probably
# will be monotonous for an app dev to specify in each version of the app if he is not consuming them
# in his app. This way we can ensure that we have all the keys present for each app in each train
# from our consumers perspective.
data[train][app].update({
**{k: v for k, v in get_app_details_base(False).items() if k not in data[train][app]},
'location': os.path.join(get_train_path(catalog['location']), train, app),
})
if data[train][app]['last_update']:
data[train][app]['last_update'] = datetime.strptime(
data[train][app]['last_update'], '%Y-%m-%d %H:%M:%S'
)
if data[train][app]['healthy'] is False:
unhealthy_apps.add(f'{app} ({train} train)')
if train in recommended_apps and app in recommended_apps[train]:
data[train][app]['recommended'] = True
self.CATEGORIES_SET.update(data[train][app].get('categories') or [])
if unhealthy_apps:
self.middleware.call_sync(
'alert.oneshot_create', 'CatalogNotHealthy', {
'catalog': catalog['id'], 'apps': ', '.join(unhealthy_apps)
}
)
return data
@private
def app_version_details(self, version_path, questions_context=None):
if not questions_context:
questions_context = self.middleware.call_sync('catalog.get_normalized_questions_context')
return get_app_version_details(version_path, questions_context)
@private
def retrieve_recommended_apps(self, cache=True):
cache_key = 'recommended_apps'
if cache:
with contextlib.suppress(KeyError):
return self.middleware.call_sync('cache.get', cache_key)
data = retrieve_recommended_apps(self.middleware.call_sync('catalog.config')['location'])
self.middleware.call_sync('cache.put', cache_key, data)
return data
@private
async def get_normalized_questions_context(self):
return {
'timezones': await self.middleware.call('system.general.timezone_choices'),
'system.general.config': await self.middleware.call('system.general.config'),
'certificates': await self.middleware.call('app.certificate_choices'),
'certificate_authorities': await self.middleware.call('app.certificate_authority_choices'),
'ip_choices': await self.middleware.call('app.ip_choices'),
'gpu_choices': await self.middleware.call('app.gpu_choices_internal'),
}
@private
def retrieve_train_names(self, location, all_trains=True, trains_filter=None):
return retrieve_train_names(get_train_path(location), all_trains, trains_filter)
@private
def retrieve_mapped_categories(self):
return self.CATEGORIES_SET
| 10,729 | Python | .py | 203 | 40.871921 | 120 | 0.606295 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,577 | utils.py | truenas_middleware/src/middlewared/middlewared/plugins/catalog/utils.py | import os
from middlewared.utils import MIDDLEWARE_RUN_DIR
COMMUNITY_TRAIN = 'community'
OFFICIAL_ENTERPRISE_TRAIN = 'enterprise'
OFFICIAL_LABEL = 'TRUENAS'
OFFICIAL_CATALOG_REPO = 'https://github.com/truenas/apps'
OFFICIAL_CATALOG_BRANCH = 'master'
TMP_IX_APPS_CATALOGS = os.path.join(MIDDLEWARE_RUN_DIR, 'ix-apps/catalogs')
def get_cache_key(label: str) -> str:
return f'catalog_{label}_train_details'
| 413 | Python | .py | 10 | 39.4 | 75 | 0.778894 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,578 | apps_util.py | truenas_middleware/src/middlewared/middlewared/plugins/catalog/apps_util.py | import errno
import json
import jsonschema
import os
import re
from apps_ci.names import CACHED_VERSION_FILE_NAME
from apps_validation.json_schema_utils import VERSION_VALIDATION_SCHEMA
from catalog_reader.app import get_app_version_details as get_catalog_app_version_details
from catalog_reader.questions import normalize_questions
from middlewared.plugins.apps.schema_utils import construct_schema
from middlewared.plugins.update_.utils import can_update
from middlewared.service import CallError
from middlewared.utils import sw_info
RE_VERSION_PATTERN = re.compile(r'(\d{2}\.\d{2}(?:\.\d)*)') # We are only interested in XX.XX here
def get_app_default_values(version_details: dict) -> dict:
return construct_schema(version_details, {}, False)['new_values']
def custom_scale_version_checks(min_scale_version: str, max_scale_version: str, system_scale_version: str) -> str:
if not (normalized_system_version := RE_VERSION_PATTERN.findall(system_scale_version)):
return 'Unable to determine your TrueNAS system version'
normalized_system_version = normalized_system_version[0]
if min_scale_version and min_scale_version != normalized_system_version and not can_update(
min_scale_version, normalized_system_version
):
return (f'Your TrueNAS system version ({normalized_system_version}) is less than the minimum version '
f'({min_scale_version}) required by this application.')
if max_scale_version and max_scale_version != normalized_system_version and not can_update(
normalized_system_version, max_scale_version
):
return (f'Your TrueNAS system version ({normalized_system_version}) is greater than the maximum version '
f'({max_scale_version}) required by this application.')
return ''
def min_max_scale_version_check_update_impl(version_details: dict, check_supported_key: bool = True) -> str:
# `check_supported_key` is used because when catalog validation returns the data it only checks the
# missing features and based on that makes the decision. So if something is not already supported
# we do not want to validate minimum scale version in that case. However, when we want to report to
# the user as to why exactly the app version is not supported, we need to be able to make that distinction
system_scale_version = sw_info()['version']
min_scale_version = version_details.get('chart_metadata', {}).get('annotations', {}).get('min_scale_version')
max_scale_version = version_details.get('chart_metadata', {}).get('annotations', {}).get('max_scale_version')
if (
version_details.get('healthy', True) and (not check_supported_key or version_details['supported'])
and (min_scale_version or max_scale_version)
):
try:
if any(k in system_scale_version for k in ('MASTER', 'INTERNAL', 'CUSTOM')):
return custom_scale_version_checks(min_scale_version, max_scale_version, system_scale_version)
else:
if (
min_scale_version and min_scale_version != system_scale_version and
not can_update(min_scale_version, system_scale_version)
):
return (f'Your TrueNAS system version ({system_scale_version}) is less than the minimum version '
f'({min_scale_version}) required by this application.')
if (
max_scale_version and system_scale_version != max_scale_version and
not can_update(system_scale_version, max_scale_version)
):
return (f'Your TrueNAS system version ({system_scale_version}) is greater than the maximum version '
f'({max_scale_version}) required by this application.')
except Exception:
# In case invalid version string is specified we don't want a traceback here
# let's just explicitly not support the app version in question
return 'Unable to complete TrueNAS system version compatibility checks'
return ''
def minimum_scale_version_check_update(version_details: dict) -> dict:
version_details['supported'] = not bool(min_max_scale_version_check_update_impl(version_details))
return version_details
def get_app_version_details(version_path: str, questions_context: dict) -> dict:
return minimum_scale_version_check_update(get_catalog_app_version_details(version_path, questions_context, {
'default_values_callable': get_app_default_values,
}))
def get_app_details(app_location: str, app_data: dict, questions_context: dict) -> dict:
app_name = os.path.basename(app_location)
app_data['versions'] = retrieve_cached_versions_data(os.path.join(app_location, CACHED_VERSION_FILE_NAME), app_name)
# At this point, we have cached versions and apps data - now we want to do the following:
# 1) Update location in each version entry
# 2) Make sure default values have been normalised
# 3) Normalise questions
for version_name, version_data in app_data['versions'].items():
minimum_scale_version_check_update(version_data)
version_data.update({
'location': os.path.join(app_location, version_name),
'values': get_app_default_values(version_data),
})
normalize_questions(version_data, questions_context)
return app_data
def retrieve_cached_versions_data(version_path: str, app_name: str) -> dict:
try:
with open(version_path, 'r') as f:
data = json.loads(f.read())
jsonschema.validate(data, VERSION_VALIDATION_SCHEMA)
except FileNotFoundError:
raise CallError(f'Unable to locate {app_name!r} versions', errno=errno.ENOENT)
except IsADirectoryError:
raise CallError(f'{version_path!r} must be a file')
except json.JSONDecodeError:
raise CallError(f'Unable to parse {version_path!r} file')
except jsonschema.ValidationError as e:
raise CallError(f'Unable to validate {version_path!r} file: {e}')
else:
return data
| 6,140 | Python | .py | 101 | 52.60396 | 120 | 0.696491 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,579 | app_version.py | truenas_middleware/src/middlewared/middlewared/plugins/catalog/app_version.py | import errno
import os
import stat
from catalog_reader.train_utils import get_train_path
from middlewared.schema import accepts, Bool, Dict, List, returns, Str
from middlewared.service import CallError, Service
from .apps_util import get_app_details
class CatalogService(Service):
class Config:
cli_namespace = 'app.catalog'
@accepts(
Str('app_name'),
Dict(
'app_version_details',
Str('train', required=True),
),
roles=['CATALOG_READ'],
)
@returns(Dict(
# TODO: Make sure keys here are mapped appropriately
'app_details',
Str('name', required=True),
List('categories', items=[Str('category')], required=True),
List('maintainers', required=True),
List('tags', required=True),
List('screenshots', required=True, items=[Str('screenshot')]),
List('sources', required=True, items=[Str('source')]),
Str('app_readme', null=True, required=True),
Str('location', required=True),
Bool('healthy', required=True),
Bool('recommended', required=True),
Str('healthy_error', required=True, null=True),
Str('healthy_error', required=True, null=True),
Dict('versions', required=True, additional_attrs=True),
Str('latest_version', required=True, null=True),
Str('latest_app_version', required=True, null=True),
Str('latest_human_version', required=True, null=True),
Str('last_update', required=True, null=True),
Str('icon_url', required=True, null=True),
Str('home', required=True),
))
def get_app_details(self, app_name, options):
"""
Retrieve information of `app_name` `app_version_details.catalog` catalog app.
"""
catalog = self.middleware.call_sync('catalog.config')
app_location = os.path.join(get_train_path(catalog['location']), options['train'], app_name)
try:
if not stat.S_ISDIR(os.stat(app_location).st_mode):
raise CallError(f'{app_location!r} must be a directory')
except FileNotFoundError:
raise CallError(f'Unable to locate {app_name!r} at {app_location!r}', errno=errno.ENOENT)
train_data = self.middleware.call_sync('catalog.apps', {
'retrieve_all_trains': False,
'trains': [options['train']],
})
if options['train'] not in train_data:
raise CallError(f'Unable to locate {options["train"]!r} train')
elif app_name not in train_data[options['train']]:
raise CallError(f'Unable to locate {app_name!r} app in {options["train"]!r} train')
questions_context = self.middleware.call_sync('catalog.get_normalized_questions_context')
app_details = get_app_details(app_location, train_data[options['train']][app_name], questions_context)
recommended_apps = self.middleware.call_sync('catalog.retrieve_recommended_apps')
if options['train'] in recommended_apps and app_name in recommended_apps[options['train']]:
app_details['recommended'] = True
return app_details
| 3,149 | Python | .py | 66 | 39.212121 | 110 | 0.641276 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,580 | git_utils.py | truenas_middleware/src/middlewared/middlewared/plugins/catalog/git_utils.py | import logging
import threading
import typing
from collections import defaultdict
from middlewared.service import CallError
from middlewared.utils.git import clone_repository, checkout_repository, update_repo, validate_git_repo
GIT_LOCK = defaultdict(threading.Lock)
logger = logging.getLogger('catalog_utils')
def convert_repository_to_path(git_repository_uri: str, branch: str) -> str:
return git_repository_uri.split('://', 1)[-1].replace('/', '_').replace('.', '_') + f'_{branch}'
def pull_clone_repository(repository_uri: str, destination: str, branch: str, depth: typing.Optional[int] = 1):
with GIT_LOCK[repository_uri]:
valid_repo = validate_git_repo(destination)
clone_repo = not bool(valid_repo)
if valid_repo:
# We will try to checkout branch and do a git pull, if any of these operations fail,
# we will clone the repository again.
# Why they might fail is if user has been manually playing with the repo or repo was force-pushed
try:
checkout_repository(destination, branch)
update_repo(destination, branch)
except CallError:
clone_repo = True
if clone_repo:
try:
clone_repository(repository_uri, destination, branch, depth)
except CallError as e:
raise CallError(f'Failed to clone {repository_uri!r} repository at {destination!r} destination: {e}')
return True
| 1,496 | Python | .py | 29 | 42.965517 | 117 | 0.672154 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,581 | update.py | truenas_middleware/src/middlewared/middlewared/plugins/catalog/update.py | import os
import middlewared.sqlalchemy as sa
from middlewared.plugins.docker.state_utils import catalog_ds_path, CATALOG_DATASET_NAME
from middlewared.schema import accepts, Dict, List, returns, Str
from middlewared.service import ConfigService, private, ValidationErrors
from middlewared.utils import ProductType
from middlewared.validators import Match
from .utils import OFFICIAL_ENTERPRISE_TRAIN, OFFICIAL_LABEL, TMP_IX_APPS_CATALOGS
class CatalogModel(sa.Model):
__tablename__ = 'services_catalog'
label = sa.Column(sa.String(255), nullable=False, unique=True, primary_key=True)
preferred_trains = sa.Column(sa.JSON(list))
class CatalogService(ConfigService):
class Config:
datastore = 'services.catalog'
datastore_extend = 'catalog.extend'
datastore_extend_context = 'catalog.extend_context'
datastore_primary_key = 'label'
datastore_primary_key_type = 'string'
cli_namespace = 'app.catalog'
namespace = 'catalog'
role_prefix = 'CATALOG'
ENTRY = Dict(
'catalog_create',
List('preferred_trains'),
Str('id'),
Str(
'label', required=True, validators=[Match(
r'^\w+[\w.-]*$',
explanation='Label must start with an alphanumeric character and can include dots and dashes.'
)],
max_length=60,
),
register=True,
)
@private
def extend(self, data, context):
data.update({
'id': data['label'],
'location': context['catalog_dir'],
})
return data
@accepts()
@returns(List('trains', items=[Str('train')]))
async def trains(self):
"""
Retrieve available trains.
"""
return list(await self.middleware.call('catalog.apps', {'cache': True, 'cache_only': True}))
@private
async def extend_context(self, rows, extra):
if await self.dataset_mounted():
catalog_dir = catalog_ds_path()
else:
# FIXME: This can eat lots of memory if it's a large catalog
catalog_dir = TMP_IX_APPS_CATALOGS
return {
'catalog_dir': catalog_dir,
}
@private
async def dataset_mounted(self):
if docker_ds := (await self.middleware.call('docker.config'))['dataset']:
return bool(await self.middleware.call(
'filesystem.mount_info', [
['mount_source', '=', os.path.join(docker_ds, CATALOG_DATASET_NAME)],
['fs_type', '=', 'zfs'],
],
))
return False
@private
async def common_validation(self, schema, data):
verrors = ValidationErrors()
if not data['preferred_trains']:
verrors.add(
f'{schema}.preferred_trains',
'At least 1 preferred train must be specified.'
)
if (
await self.middleware.call('system.product_type') == ProductType.SCALE_ENTERPRISE and
OFFICIAL_ENTERPRISE_TRAIN not in data['preferred_trains']
):
verrors.add(
f'{schema}.preferred_trains',
f'Enterprise systems must at least have {OFFICIAL_ENTERPRISE_TRAIN!r} train enabled'
)
verrors.check()
@accepts(
Dict(
'catalog_update',
List('preferred_trains'),
update=True
)
)
async def do_update(self, data):
"""
Update catalog preferences.
"""
await self.common_validation('catalog_update', data)
await self.middleware.call('datastore.update', self._config.datastore, OFFICIAL_LABEL, data)
return await self.config()
@private
async def update_train_for_enterprise(self):
catalog = await self.middleware.call('catalog.config')
if await self.middleware.call('system.product_type') == ProductType.SCALE_ENTERPRISE:
preferred_trains = []
# Logic coming from here
# https://github.com/truenas/middleware/blob/e7f2b29b6ff8fadcc9fdd8d7f104cbbf5172fc5a/src/middlewared
# /middlewared/plugins/catalogs_linux/update.py#L341
can_have_multiple_trains = not await self.middleware.call('system.is_ha_capable') and not (
await self.middleware.call('failover.hardware')
).startswith('TRUENAS-R')
if OFFICIAL_ENTERPRISE_TRAIN not in catalog['preferred_trains'] and can_have_multiple_trains:
preferred_trains = catalog['preferred_trains'] + [OFFICIAL_ENTERPRISE_TRAIN]
elif not can_have_multiple_trains:
preferred_trains = [OFFICIAL_ENTERPRISE_TRAIN]
if preferred_trains:
await self.middleware.call(
'datastore.update', self._config.datastore, OFFICIAL_LABEL, {
'preferred_trains': preferred_trains,
},
)
async def enterprise_train_update(middleware, prev_product_type, *args, **kwargs):
await middleware.call('catalog.update_train_for_enterprise')
async def setup(middleware):
middleware.register_hook('system.post_license_update', enterprise_train_update)
| 5,279 | Python | .py | 125 | 32.28 | 113 | 0.617905 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,582 | host_initiator.py | truenas_middleware/src/middlewared/middlewared/plugins/iscsi_/host_initiator.py | from middlewared.schema import accepts, Bool, Int, List
from middlewared.service import CallError, Service, ServiceChangeMixin
class iSCSIHostService(Service, ServiceChangeMixin):
class Config:
namespace = "iscsi.host"
@accepts(Int("id"), roles=['SHARING_ISCSI_HOST_READ'])
async def get_initiators(self, id_):
"""
Returns initiator groups associated with host `id`.
"""
host = await self.middleware.call("iscsi.host.get_instance", id_)
return [
initiator
for initiator in await self.middleware.call("iscsi.initiator.query")
if set(host["iqns"]) & set(initiator["initiators"])
]
@accepts(Int("id"), List("ids", items=[Int("id")]), Bool("force", default=False), roles=['SHARING_ISCSI_HOST_WRITE'])
async def set_initiators(self, id_, ids, force):
"""
Associates initiator groups `ids` with host `id`.
Use `force` if you want to allow adding first or removing last initiator from initiator groups.
"""
host = await self.middleware.call("iscsi.host.get_instance", id_)
update = []
for initiator in await self.middleware.call("iscsi.initiator.query"):
initiators = set(initiator["initiators"])
had_initiators = bool(initiators)
if initiator["id"] in ids:
initiators |= set(host["iqns"])
if not force and not had_initiators and initiators:
raise CallError(
f"Unable to add initiator to the Initiator Group {initiator['id']} " +
(f"({initiator['comment']}) " if initiator["comment"] else "") +
"that includes all initiators."
)
else:
initiators -= set(host["iqns"])
if not force and had_initiators and not initiators:
raise CallError(
f"Unable to remove the last remaining initiator from Initiator Group {initiator['id']}" +
(f" ({initiator['comment']})" if initiator["comment"] else "")
)
update.append((initiator["id"], list(initiators)))
for id_, initiators in update:
await self.middleware.call("iscsi.initiator.update", id_, {"initiators": initiators})
await self._service_change("iscsitarget", "reload")
| 2,434 | Python | .py | 46 | 40.391304 | 121 | 0.589571 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,583 | iscsi_global.py | truenas_middleware/src/middlewared/middlewared/plugins/iscsi_/iscsi_global.py | import asyncio
import re
import socket
import middlewared.sqlalchemy as sa
from middlewared.async_validators import validate_port
from middlewared.schema import Bool, Dict, Int, List, Str, accepts
from middlewared.service import SystemServiceService, ValidationErrors, private
from middlewared.utils import run
from middlewared.validators import IpAddress, Port, Range
RE_IP_PORT = re.compile(r'^(.+?)(:[0-9]+)?$')
class ISCSIGlobalModel(sa.Model):
__tablename__ = 'services_iscsitargetglobalconfiguration'
id = sa.Column(sa.Integer(), primary_key=True)
iscsi_basename = sa.Column(sa.String(120))
iscsi_isns_servers = sa.Column(sa.Text())
iscsi_pool_avail_threshold = sa.Column(sa.Integer(), nullable=True)
iscsi_alua = sa.Column(sa.Boolean(), default=False)
iscsi_listen_port = sa.Column(sa.Integer(), nullable=False, default=3260)
class ISCSIGlobalService(SystemServiceService):
class Config:
datastore = 'services.iscsitargetglobalconfiguration'
datastore_extend = 'iscsi.global.config_extend'
datastore_prefix = 'iscsi_'
service = 'iscsitarget'
namespace = 'iscsi.global'
cli_namespace = 'sharing.iscsi.global'
role_prefix = 'SHARING_ISCSI_GLOBAL'
@private
def port_is_listening(self, host, port, timeout=5):
ret = False
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if timeout:
s.settimeout(timeout)
try:
s.connect((host, port))
ret = True
except Exception:
self.logger.debug("connection to %s failed", host, exc_info=True)
ret = False
finally:
s.close()
return ret
@private
def validate_isns_server(self, server, verrors):
"""
Check whether a valid IP[:port] was supplied. Returns None or failure,
or (server, ip, port) tuple on success.
"""
invalid_ip_port_tuple = f'Server "{server}" is not a valid IP(:PORT)? tuple.'
reg = RE_IP_PORT.search(server)
if not reg:
verrors.add('iscsiglobal_update.isns_servers', invalid_ip_port_tuple)
return None
ip = reg.group(1)
if ip and ip[0] == '[' and ip[-1] == ']':
ip = ip[1:-1]
# First check that a valid IP was supplied
try:
ip_validator = IpAddress()
ip_validator(ip)
except ValueError:
verrors.add('iscsiglobal_update.isns_servers', invalid_ip_port_tuple)
return None
# Next check the port number (if supplied)
parts = server.split(':')
if len(parts) == 2:
try:
port = int(parts[1])
port_validator = Port()
port_validator(port)
except ValueError:
verrors.add('iscsiglobal_update.isns_servers', invalid_ip_port_tuple)
return None
else:
port = 3205
return (server, ip, port)
@private
def config_extend(self, data):
data['isns_servers'] = data['isns_servers'].split()
return data
@accepts(Dict(
'iscsiglobal_update',
Str('basename'),
List('isns_servers', items=[Str('server')]),
Int('listen_port', validators=[Range(min_=1025, max_=65535)], default=3260),
Int('pool_avail_threshold', validators=[Range(min_=1, max_=99)], null=True),
Bool('alua'),
update=True
), audit='Update iSCSI')
async def do_update(self, data):
"""
`alua` is a no-op for FreeNAS.
"""
old = await self.config()
new = old.copy()
new.update(data)
verrors = ValidationErrors()
servers = data.get('isns_servers') or []
server_addresses = []
for server in servers:
if result := self.validate_isns_server(server, verrors):
server_addresses.append(result)
if server_addresses:
# For the valid addresses, we will check connectivity in parallel
coroutines = [
self.middleware.call(
'iscsi.global.port_is_listening', ip, port
) for (server, ip, port) in server_addresses
]
results = await asyncio.gather(*coroutines)
for (server, ip, port), result in zip(server_addresses, results):
if not result:
verrors.add('iscsiglobal_update.isns_servers', f'Server "{server}" could not be contacted.')
verrors.extend(await validate_port(
self.middleware, 'iscsiglobal_update.listen_port', new['listen_port'], 'iscsi.global'
))
verrors.check()
new['isns_servers'] = '\n'.join(servers)
licensed = await self.middleware.call('failover.licensed')
if licensed and old['alua'] != new['alua']:
if not new['alua']:
await self.middleware.call('failover.call_remote', 'service.stop', ['iscsitarget'])
await self.middleware.call('failover.call_remote', 'iscsi.target.logout_ha_targets')
await self._update_service(old, new, options={'ha_propagate': False})
if licensed and old['alua'] != new['alua']:
if new['alua']:
await self.middleware.call('failover.call_remote', 'service.start', ['iscsitarget'])
# Force a scst.conf update
# When turning off ALUA we want to clean up scst.conf, and when turning it on
# we want to give any existing target a kick to come up as a dev_disk
await self.middleware.call('failover.call_remote', 'service.reload', ['iscsitarget'])
# If we have just turned off iSNS then work around a short-coming in scstadmin reload
if old['isns_servers'] != new['isns_servers'] and not servers:
await self.middleware.call('iscsi.global.stop_active_isns')
if licensed:
try:
await self.middleware.call('failover.call_remote', 'iscsi.global.stop_active_isns')
except Exception:
self.logger.error('Unhandled exception in stop_active_isns on remote controller', exc_info=True)
return await self.config()
@private
async def stop_active_isns(self):
"""
Unfortunately a SCST reload does not stop a previously active iSNS config, so
need to be able to perform an explicit action.
"""
cp = await run([
'scstadmin', '-force', '-noprompt', '-set_drv_attr', 'iscsi',
'-attributes', 'iSNSServer=""'
], check=False)
if cp.returncode:
self.logger.warning('Failed to stop active iSNS: %s', cp.stderr.decode())
@accepts(roles=['SHARING_ISCSI_GLOBAL_READ'])
async def alua_enabled(self):
"""
Returns whether iSCSI ALUA is enabled or not.
"""
if not await self.middleware.call('system.is_enterprise'):
return False
if not await self.middleware.call('failover.licensed'):
return False
# TODO: FIBRECHANNEL not currently supported in SCALE
# license_ = await self.middleware.call('system.license')
# if license_ is not None and 'FIBRECHANNEL' in license_['features']:
# return True
return (await self.middleware.call('iscsi.global.config'))['alua']
| 7,431 | Python | .py | 166 | 34.807229 | 116 | 0.609544 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,584 | global_linux.py | truenas_middleware/src/middlewared/middlewared/plugins/iscsi_/global_linux.py | import glob
import os
from middlewared.schema import Bool, Dict, Int, Str
from middlewared.service import filterable, filterable_returns, private, Service
from middlewared.utils import filter_list, run
class ISCSIGlobalService(Service):
class Config:
namespace = 'iscsi.global'
@filterable(roles=['SHARING_ISCSI_GLOBAL_READ'])
@filterable_returns(Dict(
'session',
Str('initiator'),
Str('initiator_addr'),
Str('initiator_alias', null=True),
Str('target'),
Str('target_alias'),
Str('header_digest', null=True),
Str('data_digest', null=True),
Int('max_data_segment_length', null=True),
Int('max_receive_data_segment_length', null=True),
Int('max_burst_length', null=True),
Int('first_burst_length', null=True),
Bool('immediate_data'),
Bool('iser'),
Bool('offload'),
))
def sessions(self, filters, options):
"""
Get a list of currently running iSCSI sessions. This includes initiator and target names
and the unique connection IDs.
"""
sessions = []
global_info = self.middleware.call_sync('iscsi.global.config')
base_path = '/sys/kernel/scst_tgt/targets/iscsi'
for target_dir in glob.glob(f'{base_path}/{global_info["basename"]}*'):
target = target_dir.rsplit('/', 1)[-1]
if target.startswith(f'{global_info["basename"]}:HA:'):
continue
for session in os.listdir(os.path.join(target_dir, 'sessions')):
session_dir = os.path.join(target_dir, 'sessions', session)
ip_file = glob.glob(f'{session_dir}/*/ip')
if not ip_file:
continue
# Initiator alias is another name sent by initiator but we are unable to retrieve it in scst
session_dict = {
'initiator': session.rsplit('#', 1)[0],
'initiator_alias': None,
'target': target,
'target_alias': target.rsplit(':', 1)[-1],
'header_digest': None,
'data_digest': None,
'max_data_segment_length': None,
'max_receive_data_segment_length': None,
'max_xmit_data_segment_length': None,
'max_burst_length': None,
'first_burst_length': None,
'immediate_data': False,
'iser': False,
'offload': False, # It is a chelsio NIC driver to offload iscsi, we are not using it so far
}
with open(ip_file[0], 'r') as f:
session_dict['initiator_addr'] = f.read().strip()
for k, f, op in (
('header_digest', 'HeaderDigest', None),
('data_digest', 'DataDigest', None),
('max_burst_length', 'MaxBurstLength', lambda i: int(i)),
('max_receive_data_segment_length', 'MaxRecvDataSegmentLength', lambda i: int(i)),
('max_xmit_data_segment_length', 'MaxXmitDataSegmentLength', lambda i: int(i)),
('first_burst_length', 'FirstBurstLength', lambda i: int(i)),
('immediate_data', 'ImmediateData', lambda i: True if i == 'Yes' else False),
):
f_path = os.path.join(session_dir, f)
if os.path.exists(f_path):
with open(f_path, 'r') as fd:
data = fd.read().strip()
if data != 'None':
if op:
data = op(data)
session_dict[k] = data
# We get recv/emit data segment length, keeping consistent with freebsd, we can
# take the maximum of two and show it for max_data_segment_length
if session_dict['max_xmit_data_segment_length'] and session_dict['max_receive_data_segment_length']:
session_dict['max_data_segment_length'] = max(
session_dict['max_receive_data_segment_length'], session_dict['max_xmit_data_segment_length']
)
sessions.append(session_dict)
return filter_list(sessions, filters, options)
@private
def resync_lun_size_for_zvol(self, id_):
if not self.middleware.call_sync('service.started', 'iscsitarget'):
return
extent = self.middleware.call_sync(
'iscsi.extent.query', [['enabled', '=', True], ['path', '=', f'zvol/{id_}']],
{'select': ['name', 'enabled', 'path']}
)
if not extent:
return
try:
# CORE ctl device names are incompatible with SCALE SCST
# so (similarly to scst.mako.conf) replace period with underscore, slash with dash
extent_name = extent[0]["name"].replace('.', '_').replace('/', '-')
with open(f'/sys/kernel/scst_tgt/devices/{extent_name}/resync_size', 'w') as f:
f.write('1')
except Exception as e:
if isinstance(e, OSError) and e.errno == 124:
# 124 == Wrong medium type
# This is raised when all the iscsi targets are removed causing /etc/scst.conf to
# be written with a "blank" config. Once this occurs, any time a new iscsi target
# is added and the size gets changed, it will raise this error. In my testing,
# SCST sees the zvol size change and so does the initiator so it's safe to ignore.
pass
else:
self.logger.warning('Failed to resync lun size for %r', extent[0]['name'], exc_info=True)
@private
def resync_lun_size_for_file(self, path):
if not self.middleware.call_sync('service.started', 'iscsitarget'):
return
extent = self.middleware.call_sync(
'iscsi.extent.query', [
['enabled', '=', True], ['type', '=', 'FILE'], ['path', '=', path]
], {'select': ['enabled', 'type', 'path', 'name']}
)
if not extent:
return
try:
extent_name = extent[0]["name"].replace('.', '_')
with open(f'/sys/kernel/scst_tgt/devices/{extent_name}/resync_size', 'w') as f:
f.write('1')
except Exception as e:
if isinstance(e, OSError) and e.errno == 124:
# 124 == Wrong medium type
# This is raised when all the iscsi targets are removed causing /etc/scst.conf to
# be written with a "blank" config. Once this occurs, any time a new iscsi target
# is added and the size gets changed, it will raise this error. In my testing,
# SCST sees the zvol size change and so does the initiator so it's safe to ignore.
pass
else:
self.logger.warning('Failed to resync lun size for %r', extent[0]['name'], exc_info=True)
@private
async def terminate_luns_for_pool(self, pool_name):
if not await self.middleware.call('service.started', 'iscsitarget'):
return
g_config = await self.middleware.call('iscsi.global.config')
targets = {t['id']: t for t in await self.middleware.call('iscsi.target.query')}
extents = {
t['id']: t for t in await self.middleware.call(
'iscsi.extent.query', [['enabled', '=', True]], {'select': ['enabled', 'path', 'id']}
)
}
for associated_target in filter(
lambda a: a['extent'] in extents and extents[a['extent']]['path'].startswith(f'zvol/{pool_name}/'),
await self.middleware.call('iscsi.targetextent.query')
):
self.middleware.logger.debug('Terminating associated target %r', associated_target['id'])
cp = await run([
'scstadmin', '-noprompt', '-rem_lun', str(associated_target['lunid']), '-driver',
'iscsi', '-target', f'{g_config["basename"]}:{targets[associated_target["target"]]["name"]}',
'-group', 'security_group'
], check=False)
if cp.returncode:
self.middleware.logger.error(
'Failed to remove associated target %r : %s', associated_target['id'], cp.stderr.decode()
)
| 8,550 | Python | .py | 163 | 38.386503 | 117 | 0.543912 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,585 | initiator.py | truenas_middleware/src/middlewared/middlewared/plugins/iscsi_/initiator.py | import middlewared.sqlalchemy as sa
from middlewared.schema import accepts, Dict, Int, List, Patch, Str
from middlewared.service import CRUDService, private
def initiator_summary(data):
"""Select a human-readable string representing this initiator"""
if title := data.get('comment'):
return title
initiators = data.get('initiators', [])
count = len(initiators)
if count == 0:
return 'Allow All initiators'
elif count == 1:
return initiators[0]
else:
return initiators[0] + ',...'
class iSCSITargetAuthorizedInitiatorModel(sa.Model):
__tablename__ = 'services_iscsitargetauthorizedinitiator'
id = sa.Column(sa.Integer(), primary_key=True)
iscsi_target_initiator_initiators = sa.Column(sa.Text(), default="ALL")
iscsi_target_initiator_comment = sa.Column(sa.String(120))
class iSCSITargetAuthorizedInitiator(CRUDService):
class Config:
namespace = 'iscsi.initiator'
datastore = 'services.iscsitargetauthorizedinitiator'
datastore_prefix = 'iscsi_target_initiator_'
datastore_extend = 'iscsi.initiator.extend'
cli_namespace = 'sharing.iscsi.target.authorized_initiator'
role_prefix = 'SHARING_ISCSI_INITIATOR'
@accepts(Dict(
'iscsi_initiator_create',
List('initiators'),
Str('comment'),
register=True
), audit='Create iSCSI initiator', audit_extended=lambda data: initiator_summary(data))
async def do_create(self, data):
"""
Create an iSCSI Initiator.
`initiators` is a list of initiator hostnames which are authorized to access an iSCSI Target. To allow all
possible initiators, `initiators` can be left empty.
"""
await self.compress(data)
data['id'] = await self.middleware.call(
'datastore.insert', self._config.datastore, data,
{'prefix': self._config.datastore_prefix})
await self._service_change('iscsitarget', 'reload')
return await self.get_instance(data['id'])
@accepts(
Int('id'),
Patch(
'iscsi_initiator_create',
'iscsi_initiator_update',
('attr', {'update': True})
),
audit='Update iSCSI initiator',
audit_callback=True,
)
async def do_update(self, audit_callback, id_, data):
"""
Update iSCSI initiator of `id`.
"""
old = await self.get_instance(id_)
audit_callback(initiator_summary(old))
new = old.copy()
new.update(data)
await self.compress(new)
await self.middleware.call(
'datastore.update', self._config.datastore, id_, new,
{'prefix': self._config.datastore_prefix})
await self._service_change('iscsitarget', 'reload')
return await self.get_instance(id_)
@accepts(Int('id'),
audit='Delete iSCSI initiator',
audit_callback=True,
)
async def do_delete(self, audit_callback, id_):
"""
Delete iSCSI initiator of `id`.
"""
old = await self.get_instance(id_)
audit_callback(initiator_summary(old))
result = await self.middleware.call(
'datastore.delete', self._config.datastore, id_
)
await self._service_change('iscsitarget', 'reload')
return result
@private
async def compress(self, data):
initiators = data['initiators']
initiators = 'ALL' if not initiators else '\n'.join(initiators)
data['initiators'] = initiators
return data
@private
async def extend(self, data):
initiators = data['initiators']
initiators = [] if initiators == 'ALL' else initiators.split()
data['initiators'] = initiators
return data
| 3,826 | Python | .py | 97 | 31.195876 | 114 | 0.635088 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,586 | extents.py | truenas_middleware/src/middlewared/middlewared/plugins/iscsi_/extents.py | import hashlib
import os
import pathlib
import secrets
import subprocess
import uuid
import middlewared.sqlalchemy as sa
from middlewared.async_validators import check_path_resides_within_volume
from middlewared.plugins.zfs_.utils import zvol_path_to_name
from middlewared.schema import accepts, Bool, Dict, Int, Patch, Str
from middlewared.service import CallError, private, SharingService, ValidationErrors
from middlewared.utils.size import format_size
from middlewared.validators import Range
from collections import defaultdict
from .utils import MAX_EXTENT_NAME_LEN
class iSCSITargetExtentModel(sa.Model):
__tablename__ = 'services_iscsitargetextent'
id = sa.Column(sa.Integer(), primary_key=True)
iscsi_target_extent_name = sa.Column(sa.String(120), unique=True)
iscsi_target_extent_serial = sa.Column(sa.String(16))
iscsi_target_extent_type = sa.Column(sa.String(120))
iscsi_target_extent_path = sa.Column(sa.String(120))
iscsi_target_extent_filesize = sa.Column(sa.String(120), default=0)
iscsi_target_extent_blocksize = sa.Column(sa.Integer(), default=512)
iscsi_target_extent_pblocksize = sa.Column(sa.Boolean(), default=False)
iscsi_target_extent_avail_threshold = sa.Column(sa.Integer(), nullable=True)
iscsi_target_extent_comment = sa.Column(sa.String(120))
iscsi_target_extent_naa = sa.Column(sa.String(34), unique=True)
iscsi_target_extent_insecure_tpc = sa.Column(sa.Boolean(), default=True)
iscsi_target_extent_xen = sa.Column(sa.Boolean(), default=False)
iscsi_target_extent_rpm = sa.Column(sa.String(20), default='SSD')
iscsi_target_extent_ro = sa.Column(sa.Boolean(), default=False)
iscsi_target_extent_enabled = sa.Column(sa.Boolean(), default=True)
iscsi_target_extent_vendor = sa.Column(sa.Text(), nullable=True)
class iSCSITargetExtentService(SharingService):
share_task_type = 'iSCSI Extent'
class Config:
namespace = 'iscsi.extent'
datastore = 'services.iscsitargetextent'
datastore_prefix = 'iscsi_target_extent_'
datastore_extend = 'iscsi.extent.extend'
cli_namespace = 'sharing.iscsi.extent'
role_prefix = 'SHARING_ISCSI_EXTENT'
@private
async def sharing_task_determine_locked(self, data, locked_datasets):
"""
`mountpoint` attribute of zvol will be unpopulated and so we
first try direct comparison between the two strings.
The parent dataset of a zvol may also be locked, which renders
the zvol inaccessible as well, and so we need to continue to the
common check for whether the path is in the locked datasets.
"""
path = await self.get_path_field(data)
if data['type'] == 'DISK' and any(path == os.path.join('/mnt', d['id']) for d in locked_datasets):
return True
return await self.middleware.call('pool.dataset.path_in_locked_datasets', path, locked_datasets)
@accepts(Dict(
'iscsi_extent_create',
Str('name', required=True, max_length=MAX_EXTENT_NAME_LEN),
Str('type', enum=['DISK', 'FILE'], default='DISK'),
Str('disk', default=None, null=True),
Str('serial', default=None, null=True),
Str('path', default=None, null=True),
Int('filesize', default=0),
Int('blocksize', enum=[512, 1024, 2048, 4096], default=512),
Bool('pblocksize'),
Int('avail_threshold', validators=[Range(min_=1, max_=99)], null=True),
Str('comment'),
Bool('insecure_tpc', default=True),
Bool('xen'),
Str('rpm', enum=['UNKNOWN', 'SSD', '5400', '7200', '10000', '15000'],
default='SSD'),
Bool('ro', default=False),
Bool('enabled', default=True),
register=True
), audit='Create iSCSI extent', audit_extended=lambda data: data["name"])
async def do_create(self, data):
"""
Create an iSCSI Extent.
When `type` is set to FILE, attribute `filesize` is used and it represents number of bytes. `filesize` if
not zero should be a multiple of `blocksize`. `path` is a required attribute with `type` set as FILE.
With `type` being set to DISK, a valid ZFS volume is required.
`insecure_tpc` when enabled allows an initiator to bypass normal access control and access any scannable
target. This allows xcopy operations otherwise blocked by access control.
`xen` is a boolean value which is set to true if Xen is being used as the iSCSI initiator.
`ro` when set to true prevents the initiator from writing to this LUN.
"""
verrors = ValidationErrors()
await self.middleware.call('iscsi.extent.validate', data)
await self.clean(data, 'iscsi_extent_create', verrors)
verrors.check()
await self.middleware.call('iscsi.extent.save', data, 'iscsi_extent_create', verrors)
# This change is being made in conjunction with threads_num being specified in scst.conf
if data['type'] == 'DISK' and data['path'].startswith('zvol/'):
zvolname = zvol_path_to_name(os.path.join('/dev', data['path']))
await self.middleware.call('zfs.dataset.update', zvolname, {'properties': {'volthreading': {'value': 'off'}}})
data['id'] = await self.middleware.call(
'datastore.insert', self._config.datastore, {**data, 'vendor': 'TrueNAS'},
{'prefix': self._config.datastore_prefix}
)
return await self.get_instance(data['id'])
@accepts(
Int('id'),
Patch(
'iscsi_extent_create',
'iscsi_extent_update',
('attr', {'update': True})
),
audit='Update iSCSI extent',
audit_callback=True,
)
async def do_update(self, audit_callback, id_, data):
"""
Update iSCSI Extent of `id`.
"""
verrors = ValidationErrors()
old = await self.get_instance(id_)
audit_callback(old['name'])
new = old.copy()
new.update(data)
await self.middleware.call('iscsi.extent.validate', new)
await self.clean(
new, 'iscsi_extent_update', verrors, old=old
)
verrors.check()
await self.middleware.call('iscsi.extent.save', new, 'iscsi_extent_create', verrors, old)
verrors.check()
new.pop(self.locked_field)
await self.middleware.call(
'datastore.update',
self._config.datastore,
id_,
new,
{'prefix': self._config.datastore_prefix}
)
await self._service_change('iscsitarget', 'reload')
return await self.get_instance(id_)
@accepts(
Int('id'),
Bool('remove', default=False),
Bool('force', default=False),
audit='Delete iSCSI extent',
audit_callback=True,
)
async def do_delete(self, audit_callback, id_, remove, force):
"""
Delete iSCSI Extent of `id`.
If `id` iSCSI Extent's `type` was configured to FILE, `remove` can be set to remove the configured file.
"""
data = await self.get_instance(id_)
audit_callback(data['name'])
target_to_extents = await self.middleware.call('iscsi.targetextent.query', [['extent', '=', id_]])
active_sessions = await self.middleware.call(
'iscsi.target.active_sessions_for_targets', [t['target'] for t in target_to_extents]
)
if active_sessions:
sessions_str = f'Associated target(s) {",".join(active_sessions)} ' \
f'{"is" if len(active_sessions) == 1 else "are"} in use.'
if force:
self.middleware.logger.warning('%s. Forcing deletion of extent.', sessions_str)
else:
raise CallError(sessions_str)
if remove:
delete = await self.remove_extent_file(data)
if delete is not True:
raise CallError('Failed to remove extent file')
for target_to_extent in target_to_extents:
await self.middleware.call('iscsi.targetextent.delete', target_to_extent['id'], force)
# This change is being made in conjunction with threads_num being specified in scst.conf
if data['type'] == 'DISK' and data['path'].startswith('zvol/'):
zvolname = zvol_path_to_name(os.path.join('/dev', data['path']))
# Only try to set volthreading if the volume still exists.
if await self.middleware.call('pool.dataset.query', [['name', '=', zvolname], ['type', '=', 'VOLUME']]):
await self.middleware.call('zfs.dataset.update', zvolname, {'properties': {'volthreading': {'value': 'on'}}})
try:
return await self.middleware.call(
'datastore.delete', self._config.datastore, id_
)
finally:
await self._service_change('iscsitarget', 'reload')
if await self.middleware.call("iscsi.global.alua_enabled") and await self.middleware.call('failover.remote_connected'):
await self.middleware.call('iscsi.alua.wait_for_alua_settled')
@private
def validate(self, data):
data['serial'] = self.extent_serial(data['serial'])
data['naa'] = self.extent_naa(data.get('naa'))
@private
async def extend(self, data):
if data['type'] == 'DISK':
data['disk'] = data['path']
elif data['type'] == 'FILE':
data['disk'] = None
extent_size = data['filesize']
# Legacy Compat for having 2[KB, MB, GB, etc] in database
if not str(extent_size).isdigit():
suffixes = {
'PB': 1125899906842624,
'TB': 1099511627776,
'GB': 1073741824,
'MB': 1048576,
'KB': 1024,
'B': 1
}
for x in suffixes.keys():
if str(extent_size).upper().endswith(x):
extent_size = str(extent_size).upper().strip(x)
extent_size = int(extent_size) * suffixes[x]
data['filesize'] = extent_size
return data
@private
async def clean(self, data, schema_name, verrors, old=None):
await self.clean_name(data, schema_name, verrors, old=old)
await self.clean_serial(data, schema_name, verrors, old=old)
await self.middleware.call('iscsi.extent.clean_type_and_path', data, schema_name, verrors)
await self.middleware.call('iscsi.extent.clean_size', data, schema_name, verrors)
@private
async def clean_name(self, data, schema_name, verrors, old=None):
name = data['name']
old = old['name'] if old is not None else None
name_filters = [('name', '=', name)]
if '"' in name:
verrors.add(f'{schema_name}.name', 'Double quotes are not allowed')
if name != old or old is None:
name_result = await self.middleware.call(
'datastore.query',
self._config.datastore,
name_filters,
{'prefix': self._config.datastore_prefix}
)
if name_result:
verrors.add(f'{schema_name}.name', 'Extent name must be unique')
@private
async def clean_serial(self, data, schema_name, verrors, old=None):
serial = data['serial']
old = old['serial'] if old is not None else None
serial_filters = [('serial', '=', serial)]
max_serial_len = 20 # SCST max length
if '"' in serial:
verrors.add(f'{schema_name}.serial', 'Double quotes are not allowed')
if len(serial) > max_serial_len:
verrors.add(
f'{schema_name}.serial',
f'Extent serial can not exceed {max_serial_len} characters'
)
if serial != old or old is None:
serial_result = await self.middleware.call(
'datastore.query',
self._config.datastore,
serial_filters,
{'prefix': self._config.datastore_prefix}
)
if serial_result:
verrors.add(f'{schema_name}.serial', 'Serial number must be unique')
@private
async def validate_path_resides_in_volume(self, verrors, schema, path):
await check_path_resides_within_volume(verrors, self.middleware, schema, path)
@private
async def get_path_field(self, data):
if data['type'] == 'DISK' and data[self.path_field].startswith('zvol/'):
return os.path.join('/mnt', zvol_path_to_name(os.path.join('/dev', data[self.path_field])))
return data[self.path_field]
@private
def clean_type_and_path(self, data, schema_name, verrors):
if data['type'] is None:
return data
extent_type = data['type']
disk = data['disk']
path = data['path']
if extent_type == 'DISK':
if not disk:
verrors.add(f'{schema_name}.disk', 'This field is required')
raise verrors
if not disk.startswith('zvol/'):
verrors.add(f'{schema_name}.disk', 'Disk name must start with "zvol/"')
raise verrors
device = os.path.join('/dev', disk)
zvol_name = zvol_path_to_name(device)
if not os.path.exists(device):
verrors.add(f'{schema_name}.disk', f'Device {device!r} for volume {zvol_name!r} does not exist')
self.middleware.call_sync('iscsi.extent.validate_zvol_path', verrors, f'{schema_name}.disk', device)
if '@' in zvol_name and not data['ro']:
verrors.add(f'{schema_name}.ro', 'Must be set when disk is a ZFS Snapshot')
elif extent_type == 'FILE':
if not path:
verrors.add(f'{schema_name}.path', 'This field is required')
raise verrors # They need this for anything else
if os.path.exists(path):
if not os.path.isfile(path) or path[-1] == '/':
verrors.add(
f'{schema_name}.path',
'You need to specify a filepath not a directory'
)
self.middleware.call_sync(
'iscsi.extent.validate_path_resides_in_volume',
verrors, f'{schema_name}.path', path
)
return data
@private
def clean_size(self, data, schema_name, verrors):
# only applies to files
if data['type'] != 'FILE':
return data
path = data['path']
size = data['filesize']
blocksize = data['blocksize']
if not path:
verrors.add(f'{schema_name}.path', 'This field is required')
elif size == 0:
if not os.path.exists(path) or not os.path.isfile(path):
verrors.add(
f'{schema_name}.path',
'The file must exist if the extent size is set to auto (0)'
)
elif float(size) % blocksize:
verrors.add(
f'{schema_name}.filesize',
f'File size ({size}) must be a multiple of block size ({blocksize})'
)
return data
@private
def extent_serial(self, serial):
if serial in [None, '']:
used_serials = [i['serial'] for i in (
self.middleware.call_sync('iscsi.extent.query', [], {'select': ['serial']})
)]
tries = 5
for i in range(tries):
serial = secrets.token_hex()[:15]
if serial not in used_serials:
break
else:
if i < tries - 1:
continue
else:
raise CallError(
'Failed to generate a random extent serial'
)
return serial
@private
def extent_naa(self, naa):
if naa is None:
return '0x6589cfc000000' + hashlib.sha256(str(uuid.uuid4()).encode()).hexdigest()[0:19]
else:
return naa
@accepts()
async def disk_choices(self):
"""
Return a dict of available zvols that can be used
when creating an extent.
"""
diskchoices = {}
zvols = await self.middleware.call(
'zfs.dataset.unlocked_zvols_fast',
[['attachment', '=', None]], {},
['SIZE', 'RO', 'ATTACHMENT']
)
for zvol in zvols:
key = os.path.relpath(zvol['path'], '/dev')
if zvol['ro']:
description = f'{zvol["name"]} [ro]'
else:
description = f'{zvol["name"]} ({format_size(zvol["size"])})'
diskchoices[key] = description
return diskchoices
@private
def save(self, data, schema_name, verrors, old=None):
if data['type'] == 'FILE':
path = data['path']
dirs = '/'.join(path.split('/')[:-1])
# create extent directories
try:
pathlib.Path(dirs).mkdir(parents=True, exist_ok=True)
except Exception as e:
raise CallError(
f'Failed to create {dirs} with error: {e}'
)
# create the extent, or perhaps extend it
if not os.path.exists(path):
# create the extent
subprocess.run(['truncate', '-s', str(data['filesize']), path])
else:
if old:
old_size = int(old['filesize'])
new_size = int(data['filesize'])
# Only allow expansion
if new_size > old_size:
subprocess.run(['truncate', '-s', str(data['filesize']), path])
# resync so connected initiators can see the new size
self.middleware.call_sync('iscsi.global.resync_lun_size_for_file', path)
elif old_size > new_size:
verrors.add(f'{schema_name}.filesize',
'Shrinking an extent is not allowed. This can lead to data loss.')
data.pop('disk', None)
else:
data['path'] = data.pop('disk', None)
@private
async def remove_extent_file(self, data):
if data['type'] == 'FILE':
try:
os.unlink(data['path'])
except FileNotFoundError:
pass
except Exception as e:
return e
return True
@private
async def logged_in_extents(self):
"""
Obtain the unsurfaced disk names for all extents currently logged into on
a HA STANDBY controller.
:return: dict keyed by extent name, with unsurfaced disk name as the value
"""
result = {}
# First check if *anything* is logged in.
iqns = await self.middleware.call('iscsi.target.logged_in_iqns')
if not iqns:
return result
target_to_id = {t['name']: t['id'] for t in await self.middleware.call('iscsi.target.query', [], {'select': ['id', 'name']})}
extents = {e['id']: e for e in await self.middleware.call('iscsi.extent.query', [], {'select': ['id', 'name', 'locked']})}
assoc = await self.middleware.call('iscsi.targetextent.query')
# Generate a dict, keyed by target ID whose value is a set of (lunID, extent name) tuples
target_luns = defaultdict(set)
for a_tgt in filter(
lambda a: a['extent'] in extents and not extents[a['extent']]['locked'],
assoc
):
target_id = a_tgt['target']
extent_name = extents[a_tgt['extent']]['name']
target_luns[target_id].add((a_tgt['lunid'], extent_name))
global_basename = (await self.middleware.call('iscsi.global.config'))['basename']
ha_basename = f'{global_basename}:HA:'
for iqn in filter(lambda x: x.startswith(ha_basename), iqns):
target_name = iqn.split(':')[-1]
target_id = target_to_id[target_name]
for ctl in iqns[iqn]:
lun = int(ctl.split(':')[-1])
for (l, extent_name) in target_luns[target_id]:
if l == lun:
result[extent_name] = ctl
break
return result
@private
async def logged_in_extent(self, iqn, lun):
"""Return the device name (e.g. 13:0:0:0) of the logged in IQN/lun"""
p = pathlib.Path('/sys/devices/platform')
for targetname in p.glob('host*/session*/iscsi_session/session*/targetname'):
logged_in_iqn = targetname.read_text().strip()
if logged_in_iqn == iqn:
for disk in targetname.parent.glob('device/target*/*/scsi_disk'):
device = disk.parent.name
if device.split(':')[-1] == str(lun):
return device
return None
@private
async def active_extents(self):
"""
Returns the names of all extents who are neither disabled nor locked, and which are
associated with a target.
"""
filters = [['enabled', '=', True], ['locked', '=', False]]
extents = await self.middleware.call('iscsi.extent.query', filters, {'select': ['id', 'name']})
assoc = [a_tgt['extent'] for a_tgt in await self.middleware.call('iscsi.targetextent.query')]
result = []
for extent in extents:
if extent['id'] in assoc:
result.append(extent['name'])
return result
@private
async def pool_import(self, pool=None):
"""
On pool import we will ensure that any ZVOLs used as iSCSI extents have the
necessary properties set (i.e. turn off volthreading).
"""
filters = [['type', '=', 'DISK']]
options = {'select': ['path']}
if pool is not None:
filters.append(['path', '^', f'zvol/{pool["name"]}/'])
zvols = [extent['path'][5:] for extent in await self.middleware.call('iscsi.extent.query', filters, options)]
filters = [['name', 'in', zvols], ['properties.volthreading.value', '=', 'on']]
options = {'select': ['name']}
for zvol in await self.middleware.call('zfs.dataset.query', filters, options):
await self.middleware.call('zfs.dataset.update', zvol['name'], {'properties': {'volthreading': {'value': 'off'}}})
async def pool_post_import(middleware, pool):
await middleware.call('iscsi.extent.pool_import', pool)
async def setup(middleware):
middleware.register_hook('pool.post_import', pool_post_import, sync=True)
| 22,935 | Python | .py | 486 | 35.932099 | 133 | 0.579225 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,587 | target_to_extent.py | truenas_middleware/src/middlewared/middlewared/plugins/iscsi_/target_to_extent.py | import asyncio
import middlewared.sqlalchemy as sa
from middlewared.schema import Bool, Dict, Int, Patch, accepts
from middlewared.service import CallError, CRUDService, ValidationErrors, private
class iSCSITargetToExtentModel(sa.Model):
__tablename__ = 'services_iscsitargettoextent'
__table_args__ = (
sa.Index(
'services_iscsitargettoextent_iscsi_target_id_757cc851_uniq',
'iscsi_target_id', 'iscsi_extent_id', unique=True
),
)
id = sa.Column(sa.Integer(), primary_key=True)
iscsi_extent_id = sa.Column(sa.ForeignKey('services_iscsitargetextent.id'), index=True)
iscsi_target_id = sa.Column(sa.ForeignKey('services_iscsitarget.id'), index=True)
iscsi_lunid = sa.Column(sa.Integer())
class iSCSITargetToExtentService(CRUDService):
class Config:
namespace = 'iscsi.targetextent'
datastore = 'services.iscsitargettoextent'
datastore_prefix = 'iscsi_'
datastore_extend = 'iscsi.targetextent.extend'
cli_namespace = 'sharing.iscsi.target.extent'
role_prefix = 'SHARING_ISCSI_TARGETEXTENT'
@accepts(Dict(
'iscsi_targetextent_create',
Int('target', required=True),
Int('lunid', null=True),
Int('extent', required=True),
register=True
), audit='Create iSCSI target/LUN/extent mapping', audit_callback=True)
async def do_create(self, audit_callback, data):
"""
Create an Associated Target.
`lunid` will be automatically assigned if it is not provided based on the `target`.
"""
# It is unusual to do a audit_callback on a do_create, but we want to perform
# more extensive operations than is usual for a create ... because the parameters
# supplied as so opaque to the user.
audit_callback(await self._mapping_summary(data))
verrors = ValidationErrors()
await self.validate(data, 'iscsi_targetextent_create', verrors)
verrors.check()
data['id'] = await self.middleware.call(
'datastore.insert', self._config.datastore, data,
{'prefix': self._config.datastore_prefix}
)
await self._service_change('iscsitarget', 'reload', options={'ha_propagate': False})
if await self.middleware.call("iscsi.global.alua_enabled") and await self.middleware.call('failover.remote_connected'):
await self.middleware.call('failover.call_remote', 'service.reload', ['iscsitarget'])
await self.middleware.call('iscsi.alua.wait_cluster_mode', data['target'], data['extent'])
await self.middleware.call('iscsi.alua.wait_for_alua_settled')
return await self.get_instance(data['id'])
def _set_null_false(name):
def set_null_false(attr):
attr.null = False
return {'name': name, 'method': set_null_false}
async def _mapping_summary(self, data):
try:
target = (await self.middleware.call('iscsi.target.query', [['id', '=', data.get('target')]], {'get': True}))['name']
except Exception:
target = data.get('target')
try:
extent = (await self.middleware.call('iscsi.extent.query', [['id', '=', data.get('extent')]], {'get': True}))['name']
except Exception:
extent = data.get('extent')
return f'{target}/{data.get("lunid")}/{extent}'
@accepts(
Int('id'),
Patch(
'iscsi_targetextent_create',
'iscsi_targetextent_update',
('edit', _set_null_false('lunid')),
('attr', {'update': True})
),
audit='Update iSCSI target/LUN/extent mapping',
audit_callback=True,
)
async def do_update(self, audit_callback, id_, data):
"""
Update Associated Target of `id`.
"""
verrors = ValidationErrors()
old = await self.get_instance(id_)
audit_callback(await self._mapping_summary(old))
new = old.copy()
new.update(data)
await self.validate(new, 'iscsi_targetextent_update', verrors, old)
verrors.check()
await self.middleware.call(
'datastore.update', self._config.datastore, id_, new,
{'prefix': self._config.datastore_prefix})
await self._service_change('iscsitarget', 'reload')
return await self.get_instance(id_)
@accepts(Int('id'),
Bool('force', default=False),
audit='Delete iSCSI target/LUN/extent mapping',
audit_callback=True,)
async def do_delete(self, audit_callback, id_, force):
"""
Delete Associated Target of `id`.
"""
associated_target = await self.get_instance(id_)
active_sessions = await self.middleware.call(
'iscsi.target.active_sessions_for_targets', [associated_target['target']]
)
if active_sessions:
if force:
self.middleware.logger.warning('Associated target %s is in use.', active_sessions[0])
else:
raise CallError(f'Associated target {active_sessions[0]} is in use.')
audit_callback(await self._mapping_summary(associated_target))
result = await self.middleware.call(
'datastore.delete', self._config.datastore, id_
)
# Reload the target, so that the LUN is removed from what is being offered ... including
# on the internal target, if this is an ALUA system.
await self._service_change('iscsitarget', 'reload', options={'ha_propagate': False})
# Next, perform any necessary fixup on the STANDBY system if ALUA is enabled.
if await self.middleware.call("iscsi.global.alua_enabled") and await self.middleware.call('failover.remote_connected'):
target_name = (await self.middleware.call('iscsi.target.query',
[['id', '=', associated_target['target']]],
{'select': ['name'], 'get': True}))['name']
extent_name = (await self.middleware.call('iscsi.extent.query',
[['id', '=', associated_target['extent']]],
{'select': ['name'], 'get': True}))['name']
# Check that the HA target is no longer offering the LUN that we just deleted. Wait a short period
# if necessary (though this should not be required).
retries = 5
lun_removed = False
iqn = await self.middleware.call('iscsi.target.ha_iqn', target_name)
while retries:
if associated_target['lunid'] not in await self.middleware.call('iscsi.target.iqn_ha_luns', iqn):
lun_removed = True
break
retries -= 1
await asyncio.sleep(1)
if not lun_removed:
self.logger.warning('Failed to remove lun %r from internal target %r', associated_target['lunid'], iqn, exc_info=True)
try:
# iscsi.alua.removed_target_extent includes a local service reload
await self.middleware.call('failover.call_remote', 'iscsi.alua.removed_target_extent', [target_name, associated_target['lunid'], extent_name])
except CallError as e:
if e.errno != CallError.ENOMETHOD:
self.logger.warning('Failed up update STANDBY node', exc_info=True)
# Better to continue than to raise the exception
await self.middleware.call('failover.call_remote', 'service.reload', ['iscsitarget'])
await self.middleware.call('iscsi.alua.wait_for_alua_settled')
return result
@private
async def extend(self, data):
data['target'] = data['target']['id']
data['extent'] = data['extent']['id']
return data
@private
async def validate(self, data, schema_name, verrors, old=None):
if old is None:
old = {}
old_lunid = old.get('lunid')
target = data['target']
old_target = old.get('target')
extent = data['extent']
old_extent = old.get('extent')
if data.get('lunid') is None:
lunids = [
o['lunid'] for o in await self.query(
[('target', '=', target)], {'order_by': ['lunid'], 'force_sql_filters': True}
)
]
if not lunids:
lunid = 0
else:
diff = sorted(set(range(0, lunids[-1] + 1)).difference(lunids))
lunid = diff[0] if diff else max(lunids) + 1
data['lunid'] = lunid
else:
lunid = data['lunid']
# For Linux we have
# http://github.com/bvanassche/scst/blob/d483590da4de7d32c8371e0712fc186f3d8c509c/scst/include/scst_const.h#L69
lun_map_size = 16383
if lunid < 0 or lunid > lun_map_size - 1:
verrors.add(
f'{schema_name}.lunid',
f'LUN ID must be a positive integer and lower than {lun_map_size - 1}'
)
# If either the LUN or the target name have changed then
# ensure that we are not clashing with something pre-existing
if (old_lunid != lunid or old_target != target) and await self.query([
('lunid', '=', lunid), ('target', '=', target)
], {'force_sql_filters': True}):
verrors.add(
f'{schema_name}.lunid',
'LUN ID is already being used for this target.'
)
# Need to ensure that a particular extent is only ever used in
# a single target (at a single LUN) at a time. Failure to
# do so would result in a mechanism to avoid any SCSI based
# locking, and therefore could result in data corruption.
if old_extent != extent and await self.query([
('extent', '=', extent)
], {'force_sql_filters': True}):
verrors.add(
f'{schema_name}.extent',
'Extent is already in use.'
)
| 10,196 | Python | .py | 205 | 38.156098 | 158 | 0.59021 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,588 | status.py | truenas_middleware/src/middlewared/middlewared/plugins/iscsi_/status.py | from middlewared.service import accepts, Service
class ISCSIGlobalService(Service):
class Config:
namespace = 'iscsi.global'
cli_namespace = 'sharing.iscsi.global'
@accepts(roles=['SHARING_ISCSI_GLOBAL_READ'])
async def client_count(self):
"""
Return currently connected clients count.
"""
return len({host.ip for host in await self.middleware.call("iscsi.host.injection.collect")})
| 447 | Python | .py | 11 | 33.818182 | 100 | 0.689815 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,589 | auth.py | truenas_middleware/src/middlewared/middlewared/plugins/iscsi_/auth.py | import middlewared.sqlalchemy as sa
from middlewared.api import api_method
from middlewared.api.current import (IscsiAuthCreateArgs, IscsiAuthCreateResult, IscsiAuthDeleteArgs,
IscsiAuthDeleteResult, IscsiAuthEntry, IscsiAuthUpdateArgs, IscsiAuthUpdateResult)
from middlewared.service import CallError, CRUDService, private, ValidationErrors
def _auth_summary(data):
user = data.get('user', '')
tag = data.get('tag', '')
if peeruser := data.get('peeruser'):
return f'{user}/{peeruser} ({tag})'
return f'{user} ({tag})'
class iSCSITargetAuthCredentialModel(sa.Model):
__tablename__ = 'services_iscsitargetauthcredential'
id = sa.Column(sa.Integer(), primary_key=True)
iscsi_target_auth_tag = sa.Column(sa.Integer(), default=1)
iscsi_target_auth_user = sa.Column(sa.String(120))
iscsi_target_auth_secret = sa.Column(sa.EncryptedText())
iscsi_target_auth_peeruser = sa.Column(sa.String(120))
iscsi_target_auth_peersecret = sa.Column(sa.EncryptedText())
class iSCSITargetAuthCredentialService(CRUDService):
class Config:
namespace = 'iscsi.auth'
datastore = 'services.iscsitargetauthcredential'
datastore_prefix = 'iscsi_target_auth_'
cli_namespace = 'sharing.iscsi.target.auth_credential'
role_prefix = 'SHARING_ISCSI_AUTH'
entry = IscsiAuthEntry
@api_method(IscsiAuthCreateArgs, IscsiAuthCreateResult, audit='Create iSCSI Authorized Access', audit_extended=lambda data: _auth_summary(data))
async def do_create(self, data):
"""
Create an iSCSI Authorized Access.
`tag` should be unique among all configured iSCSI Authorized Accesses.
`secret` and `peersecret` should have length between 12-16 letters inclusive.
`peeruser` and `peersecret` are provided only when configuring mutual CHAP. `peersecret` should not be
similar to `secret`.
"""
verrors = ValidationErrors()
await self.validate(data, 'iscsi_auth_create', verrors)
verrors.check()
orig_peerusers = await self.middleware.call('iscsi.discoveryauth.mutual_chap_peerusers')
data['id'] = await self.middleware.call(
'datastore.insert', self._config.datastore, data,
{'prefix': self._config.datastore_prefix}
)
await self.middleware.call('iscsi.discoveryauth.recalc_mutual_chap_alert', orig_peerusers)
await self._service_change('iscsitarget', 'reload')
return await self.get_instance(data['id'])
@api_method(IscsiAuthUpdateArgs, IscsiAuthUpdateResult, audit='Update iSCSI Authorized Access', audit_callback=True)
async def do_update(self, audit_callback, id_, data):
"""
Update iSCSI Authorized Access of `id`.
"""
old = await self.get_instance(id_)
audit_callback(_auth_summary(old))
new = old.copy()
new.update(data)
verrors = ValidationErrors()
await self.validate(new, 'iscsi_auth_update', verrors)
if new['tag'] != old['tag'] and not await self.query([['tag', '=', old['tag']], ['id', '!=', id_]]):
usages = await self.is_in_use(id_)
if usages['in_use']:
verrors.add('iscsi_auth_update.tag', usages['usages'])
verrors.check()
orig_peerusers = await self.middleware.call('iscsi.discoveryauth.mutual_chap_peerusers')
await self.middleware.call(
'datastore.update', self._config.datastore, id_, new,
{'prefix': self._config.datastore_prefix}
)
await self.middleware.call('iscsi.discoveryauth.recalc_mutual_chap_alert', orig_peerusers)
await self._service_change('iscsitarget', 'reload')
return await self.get_instance(id_)
@api_method(IscsiAuthDeleteArgs, IscsiAuthDeleteResult, audit='Delete iSCSI Authorized Access', audit_callback=True)
async def do_delete(self, audit_callback, id_):
"""
Delete iSCSI Authorized Access of `id`.
"""
config = await self.get_instance(id_)
audit_callback(_auth_summary(config))
if not await self.query([['tag', '=', config['tag']], ['id', '!=', id_]]):
# We are attempting to delete the last auth in a particular group (aka tag)
usages = await self.is_in_use(id_)
if usages['in_use']:
raise CallError(usages['usages'])
orig_peerusers = await self.middleware.call('iscsi.discoveryauth.mutual_chap_peerusers')
result = await self.middleware.call(
'datastore.delete', self._config.datastore, id_
)
if orig_peerusers:
await self.middleware.call('iscsi.discoveryauth.recalc_mutual_chap_alert', orig_peerusers)
return result
@private
async def is_in_use(self, id_):
config = await self.get_instance(id_)
usages = []
# Check discovery auth
discovery_auths = await self.middleware.call(
'iscsi.discoveryauth.query', [['authgroup', '=', config['tag']]], {'select': ['id']}
)
if discovery_auths:
usages.append(
f'Authorized access of {id_} is being used by discovery auth(s): {", ".join(str(a["id"]) for a in discovery_auths)}'
)
# Check targets
groups = await self.middleware.call(
'datastore.query', 'services.iscsitargetgroups', [['iscsi_target_authgroup', '=', config['tag']]]
)
if groups:
usages.append(
f'Authorized access of {id_} is being used by following target(s): '
f'{", ".join(str(g["iscsi_target"]["id"]) for g in groups)}'
)
return {'in_use': bool(usages), 'usages': '\n'.join(usages)}
@private
async def validate(self, data, schema_name, verrors):
secret = data.get('secret')
peer_secret = data.get('peersecret')
peer_user = data.get('peeruser', '')
if not peer_user and peer_secret:
verrors.add(
f'{schema_name}.peersecret',
'The peer user is required if you set a peer secret.'
)
if len(secret) < 12 or len(secret) > 16:
verrors.add(
f'{schema_name}.secret',
'Secret must be between 12 and 16 characters.'
)
if not peer_user:
return
if not peer_secret:
verrors.add(
f'{schema_name}.peersecret',
'The peer secret is required if you set a peer user.'
)
elif peer_secret == secret:
verrors.add(
f'{schema_name}.peersecret',
'The peer secret cannot be the same as user secret.'
)
elif peer_secret:
if len(peer_secret) < 12 or len(peer_secret) > 16:
verrors.add(
f'{schema_name}.peersecret',
'Peer Secret must be between 12 and 16 characters.'
)
| 7,073 | Python | .py | 145 | 38.57931 | 148 | 0.619939 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,590 | utils.py | truenas_middleware/src/middlewared/middlewared/plugins/iscsi_/utils.py | import bidict
AUTHMETHOD_LEGACY_MAP = bidict.bidict({
'None': 'NONE',
'CHAP': 'CHAP',
'CHAP Mutual': 'CHAP_MUTUAL',
})
# Currently SCST has this limit (scst_vdisk_dev->name)
MAX_EXTENT_NAME_LEN = 64
| 214 | Python | .py | 8 | 23.875 | 54 | 0.679803 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,591 | host_injection.py | truenas_middleware/src/middlewared/middlewared/plugins/iscsi_/host_injection.py | import asyncio
from collections import namedtuple
from ipaddress import ip_address
import os
from middlewared.service import Service
CollectedHost = namedtuple("CollectedHost", ["ip", "iqn"])
class iSCSIHostsInjectionService(Service):
class Config:
namespace = "iscsi.host.injection"
private = True
control_lock = asyncio.Lock()
run_event = None
stop_event = None
async def start(self):
async with self.control_lock:
if self.run_event:
self.logger.debug("iscsi.host.injection is already running")
return
self.logger.debug("Starting iscsi.host.injection")
self.run_event = asyncio.Event()
self.stop_event = asyncio.Event()
self.middleware.create_task(self._run(self.run_event, self.stop_event))
async def stop(self):
async with self.control_lock:
if not self.run_event:
self.logger.debug("iscsi.host.injection is already stopped")
return
self.logger.debug("Stopping iscsi.host.injection")
self.run_event.set()
await self.stop_event.wait()
self.run_event = None
self.stop_event = None
async def _run(self, run_event, stop_event):
try:
while True:
try:
await self.middleware.call(
"iscsi.host.batch_update",
[
dict(host._asdict(), added_automatically=True)
for host in await self.middleware.call("iscsi.host.injection.collect")
],
)
except Exception:
self.middleware.logger.error("Unhandled exception in iscsi.host.injection", exc_info=True)
try:
await asyncio.wait_for(self.middleware.create_task(run_event.wait()), 5)
return
except asyncio.TimeoutError:
continue
finally:
stop_event.set()
def collect(self):
hosts = set()
targets_path = "/sys/kernel/scst_tgt/targets/iscsi"
try:
targets = os.listdir(targets_path)
except FileNotFoundError:
return hosts
for target in targets:
target_path = os.path.join(targets_path, target)
if not os.path.isdir(target_path):
continue
sessions_path = os.path.join(target_path, "sessions")
for session in os.listdir(sessions_path):
if "#" not in session:
continue
iqn, target_ip = session.split("#", 1)
session_path = os.path.join(sessions_path, session)
if not os.path.isdir(session_path):
continue
for ip in os.listdir(session_path):
try:
ip_address(ip)
except ValueError:
continue
ip_path = os.path.join(session_path, ip)
if not os.path.isdir(ip_path):
continue
hosts.add(CollectedHost(ip, iqn))
return hosts
async def setup(middleware):
if await middleware.call('service.started_or_enabled', 'iscsitarget'):
await middleware.call("iscsi.host.injection.start")
| 3,472 | Python | .py | 84 | 27.52381 | 110 | 0.549049 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,592 | host_crud.py | truenas_middleware/src/middlewared/middlewared/plugins/iscsi_/host_crud.py | import asyncio
from collections import defaultdict
import errno
from sqlalchemy.exc import IntegrityError
from middlewared.schema import accepts, Bool, Dict, IPAddr, Int, List, Patch, Ref, Str
from middlewared.service import CRUDService, private, ValidationErrors
import middlewared.sqlalchemy as sa
LOCK = asyncio.Lock()
class iSCSIHostModel(sa.Model):
__tablename__ = "services_iscsihost"
id = sa.Column(sa.Integer(), primary_key=True)
ip = sa.Column(sa.String(45), unique=True)
description = sa.Column(sa.Text())
added_automatically = sa.Column(sa.Boolean())
class iSCSIHostIqnModel(sa.Model):
__tablename__ = "services_iscsihostiqn"
id = sa.Column(sa.Integer(), primary_key=True)
iqn = sa.Column(sa.String(223), unique=True)
host_id = sa.Column(sa.Integer(), sa.ForeignKey("services_iscsihost.id", ondelete="CASCADE"))
class iSCSIHostService(CRUDService):
hosts = {}
class Config:
namespace = "iscsi.host"
datastore = "services.iscsihost"
datastore_extend = "iscsi.host.extend"
datastore_extend_context = "iscsi.host.extend_context"
cli_namespace = "sharing.iscsi.host"
role_prefix = 'SHARING_ISCSI_HOST'
@private
async def extend_context(self, rows, extra):
id_to_iqns = defaultdict(list)
for row in await self.middleware.call("datastore.query", "services.iscsihostiqn", [], {"relationships": False}):
id_to_iqns[row["host_id"]].append(row["iqn"])
return {
"id_to_iqns": id_to_iqns,
}
@private
async def extend(self, row, context):
row["iqns"] = context["id_to_iqns"][row["id"]]
return row
@accepts(Dict(
"iscsi_host_create",
IPAddr("ip", required=True),
Str("description", default=""),
List("iqns", items=[Str("iqn", empty=False)], default=[]),
Bool("added_automatically", default=False),
register=True,
), audit='Create iSCSI host', audit_extended=lambda data: data["ip"])
async def do_create(self, data):
"""
Creates iSCSI host.
`ip` indicates an IP address of the host.
`description` is a human-readable name for the host.
`iqns` is a list of initiator iSCSI Qualified Names.
"""
async with LOCK:
return await self.create_unlocked(data)
@accepts(Ref("iscsi_host_create"))
@private
async def create_unlocked(self, data):
iqns = data.pop("iqns")
try:
id_ = await self.middleware.call("datastore.insert", self._config.datastore, data)
except IntegrityError:
verrors = ValidationErrors()
verrors.add("iscsi_host_create.ip", "This IP address already exists", errno.EEXIST)
raise verrors
await self._set_datastore_iqns(id_, iqns)
host = await self.get_instance(id_)
self.hosts[host["ip"]] = host
self._set_cache_iqns(id_, iqns)
return host
@accepts(
Int("id"),
Patch(
"iscsi_host_create",
"iscsi_host_update",
("attr", {"update": True}),
register=True,
),
audit='Update iSCSI host',
audit_callback=True,
)
async def do_update(self, audit_callback, id_, data):
"""
Update iSCSI host `id`.
"""
async with LOCK:
return await self.update_unlocked(audit_callback, id_, data)
@private
@accepts(
Int("id"),
Ref("iscsi_host_update"),
audit='Update iSCSI host',
audit_callback=True,
)
async def update_unlocked(self, audit_callback, id_, data):
old = await self.get_instance(id_)
audit_callback(old['ip'])
new = old.copy()
new.update(data)
iqns = new.pop("iqns")
try:
await self.middleware.call("datastore.update", self._config.datastore, id_, new)
except IntegrityError:
verrors = ValidationErrors()
verrors.add("iscsi_host_update.ip", "This IP address already exists", errno.EEXIST)
raise verrors
await self._set_datastore_iqns(id_, iqns)
host = await self.get_instance(id_)
self.hosts.pop(old["ip"], None)
self.hosts[host["ip"]] = host
self._set_cache_iqns(id_, iqns)
return host
@accepts(Int("id"),
audit='Delete iSCSI host',
audit_callback=True,
)
async def do_delete(self, audit_callback, id_):
"""
Update iSCSI host `id`.
"""
async with LOCK:
return await self.delete_unlocked(audit_callback, id_)
@private
@accepts(Int("id"),
audit='Delete iSCSI host',
audit_callback=True,
)
async def delete_unlocked(self, audit_callback, id_):
host = await self.get_instance(id_)
audit_callback(host['ip'])
await self.middleware.call("datastore.delete", self._config.datastore, id_)
self.hosts.pop(host["ip"], None)
return host
async def _set_datastore_iqns(self, id_, iqns):
await self.middleware.call("datastore.delete", "services.iscsihostiqn", [["iqn", "in", iqns]])
for iqn in iqns:
await self.middleware.call("datastore.insert", "services.iscsihostiqn", {
"iqn": iqn,
"host_id": id_,
})
def _set_cache_iqns(self, id_, iqns):
for host in self.hosts.values():
if host["id"] != id_:
for iqn in iqns:
try:
host["iqns"].remove(iqn)
except ValueError:
pass
@private
async def read_cache(self):
self.hosts = {}
for host in await self.query():
self.hosts[host["ip"]] = host
@accepts(
List(
"hosts",
items=[
Dict(
"host",
IPAddr("ip", required=True),
Str("iqn", required=True),
Bool("added_automatically", default=False),
),
],
),
audit_callback=True,
)
@private
async def batch_update(self, audit_callback, hosts):
async with LOCK:
try:
for host in hosts:
if host["ip"] not in self.hosts:
await self.create_unlocked({
"ip": host["ip"],
"added_automatically": host["added_automatically"],
})
db_host = self.hosts[host["ip"]]
if host["iqn"] not in db_host["iqns"]:
await self.update_unlocked(audit_callback, db_host["id"], {"iqns": db_host["iqns"] + [host["iqn"]]})
except Exception:
await self.read_cache()
raise
async def setup(middleware):
await middleware.call("iscsi.host.read_cache")
| 7,073 | Python | .py | 187 | 27.737968 | 124 | 0.56859 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,593 | host_target.py | truenas_middleware/src/middlewared/middlewared/plugins/iscsi_/host_target.py | from collections import defaultdict
from middlewared.schema import accepts, Int
from middlewared.service import private, Service, ServiceChangeMixin
import middlewared.sqlalchemy as sa
class iSCSIHostTargetModel(sa.Model):
__tablename__ = "services_iscsihosttarget"
id = sa.Column(sa.Integer(), primary_key=True)
host_id = sa.Column(sa.Integer(), sa.ForeignKey("services_iscsihost.id", ondelete="CASCADE"))
target_id = sa.Column(sa.Integer(), sa.ForeignKey("services_iscsitarget.id", ondelete="CASCADE"))
class iSCSIHostService(Service, ServiceChangeMixin):
class Config:
namespace = "iscsi.host"
@accepts(Int("id"), roles=['SHARING_ISCSI_HOST_READ'])
async def get_targets(self, id_):
"""
Returns targets associated with host `id`.
"""
return await self.middleware.call("iscsi.target.query", [["id", "in", [
row["target_id"]
for row in await self.middleware.call("datastore.query", "services.iscsihosttarget", [
["host_id", "=", id_],
], {"relationships": False})
]]])
async def _audit_summary(self, id_, ids):
"""
Return a summary string of the data provided, to be used in the audit summary.
"""
try:
host = (await self.middleware.call('iscsi.host.query', [['id', '=', id_]], {'get': True}))['ip']
except Exception:
host = id_
try:
targets = [target['name'] for target in await self.middleware.call('iscsi.target.query', [['id', 'in', ids]], {'select': ['name']})]
except Exception:
targets = ids
if len(targets) > 3:
return f'{host}: {",".join(targets[:3])},...'
else:
return f'{host}: {",".join(targets)}'
@private
async def get_target_hosts(self):
target_hosts = defaultdict(list)
for row in await self.middleware.call("datastore.query", "services.iscsihosttarget"):
target_hosts[row["target"]["id"]].append(row["host"])
return target_hosts
@private
async def get_hosts_iqns(self):
hosts_iqns = defaultdict(list)
for row in await self.middleware.call("datastore.query", "services.iscsihostiqn", [], {"relationships": False}):
hosts_iqns[row["host_id"]].append(row["iqn"])
return hosts_iqns
| 2,371 | Python | .py | 51 | 38.137255 | 144 | 0.61845 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,594 | discovery_auth.py | truenas_middleware/src/middlewared/middlewared/plugins/iscsi_/discovery_auth.py | import middlewared.sqlalchemy as sa
from middlewared.alert.source.discovery_auth import UPGRADE_ALERTS
from middlewared.schema import accepts, Dict, Int, Patch, Str
from middlewared.service import CRUDService, private, ValidationErrors
from middlewared.validators import Range
def _auth_summary(data):
authmethod = data.get('authmethod', '')
authgroup = data.get('authgroup', '')
return f'{authmethod} Group ID {authgroup}'
class iSCSIDiscoveryAuthModel(sa.Model):
__tablename__ = 'services_iscsidiscoveryauth'
id = sa.Column(sa.Integer(), primary_key=True)
iscsi_discoveryauth_authmethod = sa.Column(sa.String(120), default='CHAP')
iscsi_discoveryauth_authgroup = sa.Column(sa.Integer(), unique=True)
class iSCSIDiscoveryAuthService(CRUDService):
class Config:
namespace = 'iscsi.discoveryauth'
datastore = 'services.iscsidiscoveryauth'
datastore_prefix = 'iscsi_discoveryauth_'
role_prefix = 'SHARING_ISCSI_AUTH'
cli_namespace = 'sharing.iscsi.discoveryauth'
ENTRY = Patch(
'iscsi_discoveryauth_create',
'iscsi_discoveryauth_entry',
('add', Int('id', required=True)),
)
@accepts(Dict(
'iscsi_discoveryauth_create',
Str('authmethod', enum=['CHAP', 'CHAP_MUTUAL'], default='CHAP'),
Int('authgroup', validators=[Range(min_=0)]),
register=True
), audit='Create iSCSI Discovery Authorized Access', audit_extended=lambda data: _auth_summary(data))
async def do_create(self, data):
"""
Create an iSCSI Discovery Authorized Access.
`authmethod` specifies the CHAP mechanism that will be used for discovery authentication (only).
Note that only a single Mutual CHAP user may be specified system-wide for discovery auth.
`authgroup` specifies an authorized access group id to be used for discovery auth.
"""
verrors = ValidationErrors()
await self.validate(data, 'iscsi_discoveryauth_create', verrors)
verrors.check()
orig_peerusers = await self.middleware.call('iscsi.discoveryauth.mutual_chap_peerusers')
data['id'] = await self.middleware.call(
'datastore.insert', self._config.datastore, data,
{'prefix': self._config.datastore_prefix}
)
await self.middleware.call('iscsi.discoveryauth.recalc_mutual_chap_alert', orig_peerusers)
await self._service_change('iscsitarget', 'reload')
return await self.get_instance(data['id'])
@accepts(
Int('id'),
Patch(
'iscsi_discoveryauth_create',
'iscsi_discoveryauth_update',
('attr', {'update': True})
),
audit='Update iSCSI Discovery Authorized Access',
audit_callback=True,
)
async def do_update(self, audit_callback, id_, data):
"""
Update iSCSI Authorized Access of `id`.
"""
old = await self.get_instance(id_)
audit_callback(_auth_summary(old))
new = old.copy()
new.update(data)
verrors = ValidationErrors()
await self.validate(new, 'iscsi_discoveryauth_update', verrors)
verrors.check()
orig_peerusers = await self.middleware.call('iscsi.discoveryauth.mutual_chap_peerusers')
await self.middleware.call(
'datastore.update', self._config.datastore, id_, new,
{'prefix': self._config.datastore_prefix}
)
await self.middleware.call('iscsi.discoveryauth.recalc_mutual_chap_alert', orig_peerusers)
await self._service_change('iscsitarget', 'reload')
return await self.get_instance(id_)
@accepts(Int('id'),
audit='Delete iSCSI Discovery Authorized Access',
audit_callback=True,)
async def do_delete(self, audit_callback, id_):
"""
Delete iSCSI Discovery Authorized Access of `id`.
"""
config = await self.get_instance(id_)
audit_callback(_auth_summary(config))
orig_peerusers = await self.middleware.call('iscsi.discoveryauth.mutual_chap_peerusers')
result = await self.middleware.call(
'datastore.delete', self._config.datastore, id_
)
if not await self.middleware.call('iscsi.discoveryauth.query', [], {'count': True}):
# If we have cleared all the discovery auth, then don't need any alerts
await self.middleware.call('iscsi.discoveryauth.clear_alerts')
elif orig_peerusers and len(orig_peerusers) > 1:
# Have we eliminated the multiple mutual CHAP alert?
await self.middleware.call('iscsi.discoveryauth.recalc_mutual_chap_alert', orig_peerusers)
await self._service_change('iscsitarget', 'reload')
return result
@private
async def validate(self, data, schema_name, verrors):
"""
If this is an update then data will contain an `id`
"""
authgroup = data['authgroup']
authmethod = data['authmethod']
id_ = data.get('id')
# Check the specified authgroup
if authgroup >= 0:
if id_ is None:
# Adding a new entry
filters = [['authgroup', '=', authgroup]]
else:
# Updating an existing entry
filters = [['authgroup', '=', authgroup], ['id', '!=', id_]]
if await self.middleware.call('iscsi.discoveryauth.query', filters, {'count': True}):
verrors.add(
f'{schema_name}.authgroup',
'The specified authgroup is already in use.'
)
if not await self.middleware.call('iscsi.auth.query', [['tag', '=', authgroup]], {'count': True}):
verrors.add(
f'{schema_name}.authgroup',
'The specified authgroup does not contain any entries.'
)
if authmethod == 'CHAP_MUTUAL':
# Ensure that we don't add more than one MUTUAL
if id_ is None:
# Adding a new entry
filters = [['authmethod', '=', 'CHAP_MUTUAL']]
else:
# Updating an existing entry
filters = [['authmethod', '=', 'CHAP_MUTUAL'], ['id', '!=', id_]]
if await self.middleware.call('iscsi.discoveryauth.query', filters, {'count': True}):
verrors.add(
f'{schema_name}.authmethod',
'Another Mutual CHAP discovery auth has already been specified.'
)
else:
# Ensure that this auth does not have more than one peeruser
filters = [['tag', '=', authgroup], ['peeruser', '!=', '']]
if await self.middleware.call('iscsi.auth.query', filters, {'count': True}) > 1:
verrors.add(
f'{schema_name}.authgroup',
'The specified authgroup has multiple peerusers.'
)
# Note: we may have upgraded and found ourselves in the above situation,
# so we will also raise an alert if that is the case ... in addition to
# preventing it here.
@private
async def mutual_chap_peers(self):
"""
Return a list of (peeruser, peersecret) tuples that are in use for Mutual CHAP discovery auth.
"""
filters = [['authmethod', '=', 'CHAP_MUTUAL']]
options = {'select': ['authgroup']}
groups = await self.middleware.call('iscsi.discoveryauth.query', filters, options)
group_ids = [item['authgroup'] for item in groups]
filters = [['peeruser', '!=', ""], ['tag', 'in', group_ids]]
options = {'select': ['peeruser', 'peersecret']}
peers = await self.middleware.call('iscsi.auth.query', filters, options)
return [(peer['peeruser'], peer['peersecret']) for peer in peers]
@private
async def mutual_chap_peerusers(self):
"""
Return a list of peerusers that are in use for Mutual CHAP discovery auth.
"""
return [peer[0] for peer in await self.middleware.call('iscsi.discoveryauth.mutual_chap_peers')]
@private
async def recalc_mutual_chap_alert(self, orig_peerusers):
alert_name = 'ISCSIDiscoveryAuthMultipleMutualCHAP'
peerusers = await self.middleware.call('iscsi.discoveryauth.mutual_chap_peerusers')
if len(orig_peerusers) > 1:
# Alert was in place, do we need to update or remove it?
if len(peerusers) <= 1:
# Clear the existing alert
await self.middleware.call("alert.oneshot_delete", alert_name, {'peeruser': orig_peerusers[0]})
elif peerusers[0] != orig_peerusers[0]:
# Remove old event and replace with new one.
await self.middleware.call("alert.oneshot_delete", alert_name, {'peeruser': orig_peerusers[0]})
await self.middleware.call("alert.oneshot_create", alert_name, {'peeruser': peerusers[0]})
elif len(peerusers) > 1:
# Alert was not in place, add one.
await self.middleware.call("alert.oneshot_create", alert_name, {'peeruser': peerusers[0]})
@private
async def load_upgrade_alerts(self):
"""
Load any events that may have been generated during an alembic migration.
"""
for alert in UPGRADE_ALERTS:
try:
args = await self.middleware.call("keyvalue.get", alert)
await self.middleware.call("alert.oneshot_create", alert, args)
await self.middleware.call("keyvalue.delete", alert)
except KeyError:
pass
@private
async def clear_alerts(self):
alerts = [alert for alert in await self.middleware.call('alert.list') if alert['klass'].startswith('ISCSIDiscoveryAuth')]
for alert in alerts:
await self.middleware.call("alert.oneshot_delete", alert['klass'], alert['args'])
async def __event_system_ready(middleware, event_type, args):
await middleware.call('iscsi.discoveryauth.load_upgrade_alerts')
async def setup(middleware):
if await middleware.call('system.ready'):
await middleware.call('iscsi.discoveryauth.load_upgrade_alerts')
else:
middleware.event_subscribe('system.ready', __event_system_ready)
| 10,425 | Python | .py | 209 | 39.363636 | 129 | 0.618893 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,595 | portal.py | truenas_middleware/src/middlewared/middlewared/plugins/iscsi_/portal.py | import errno
import middlewared.sqlalchemy as sa
from middlewared.schema import accepts, Dict, Int, IPAddr, List, Patch, Str
from middlewared.service import CRUDService, private, ValidationErrors
from .utils import AUTHMETHOD_LEGACY_MAP
def portal_summary(data):
"""Select a human-readable string representing this portal"""
if title := data.get('comment'):
return title
ips = []
for pair in data.get('listen', []):
if ip := pair.get('ip'):
ips.append(ip)
return ','.join(ips)
class ISCSIPortalModel(sa.Model):
__tablename__ = 'services_iscsitargetportal'
id = sa.Column(sa.Integer(), primary_key=True)
iscsi_target_portal_tag = sa.Column(sa.Integer(), default=1)
iscsi_target_portal_comment = sa.Column(sa.String(120))
class ISCSIPortalIPModel(sa.Model):
__tablename__ = 'services_iscsitargetportalip'
__table_args__ = (
sa.Index('services_iscsitargetportalip_iscsi_target_portalip_ip', 'iscsi_target_portalip_ip', unique=True),
)
id = sa.Column(sa.Integer(), primary_key=True)
iscsi_target_portalip_portal_id = sa.Column(sa.ForeignKey('services_iscsitargetportal.id'), index=True)
iscsi_target_portalip_ip = sa.Column(sa.CHAR(15))
class ISCSIPortalService(CRUDService):
class Config:
datastore = 'services.iscsitargetportal'
datastore_extend = 'iscsi.portal.config_extend'
datastore_extend_context = 'iscsi.portal.config_extend_context'
datastore_prefix = 'iscsi_target_portal_'
namespace = 'iscsi.portal'
cli_namespace = 'sharing.iscsi.portal'
role_prefix = 'SHARING_ISCSI_PORTAL'
@private
async def config_extend_context(self, rows, extra):
return {
'global_config': await self.middleware.call('iscsi.global.config'),
}
@private
async def config_extend(self, data, context):
data['listen'] = []
for portalip in await self.middleware.call(
'datastore.query',
'services.iscsitargetportalip',
[('portal', '=', data['id'])],
{'prefix': 'iscsi_target_portalip_'}
):
data['listen'].append({
'ip': portalip['ip'],
'port': context['global_config']['listen_port'],
})
# Temporary until new API being used: START
# data['discovery_authmethod'] = AUTHMETHOD_LEGACY_MAP.get(
# data.pop('discoveryauthmethod')
# )
# data['discovery_authgroup'] = data.pop('discoveryauthgroup')
auths = await self.middleware.call('iscsi.discoveryauth.query')
if auths:
data['discovery_authmethod'] = auths[0]['authmethod']
data['discovery_authgroup'] = auths[0]['authgroup']
else:
data['discovery_authmethod'] = "NONE"
data['discovery_authgroup'] = None
# Temporary until new API being used: END
return data
@accepts()
async def listen_ip_choices(self):
"""
Returns possible choices for `listen.ip` attribute of portal create and update.
"""
choices = {'0.0.0.0': '0.0.0.0', '::': '::'}
if (await self.middleware.call('iscsi.global.config'))['alua']:
# If ALUA is enabled we actually want to show the user the IPs of each node
# instead of the VIP so its clear its not going to bind to the VIP even though
# thats the value used under the hoods.
filters = [('int_vip', 'nin', [None, ''])]
for i in await self.middleware.call('datastore.query', 'network.Interfaces', filters):
choices[i['int_vip']] = f'{i["int_address"]}/{i["int_address_b"]}'
filters = [('alias_vip', 'nin', [None, ''])]
for i in await self.middleware.call('datastore.query', 'network.Alias', filters):
choices[i['alias_vip']] = f'{i["alias_address"]}/{i["alias_address_b"]}'
else:
if await self.middleware.call('failover.licensed'):
# If ALUA is disabled, HA system should only offer Virtual IPs
for i in await self.middleware.call('interface.query'):
for alias in i.get('failover_virtual_aliases') or []:
choices[alias['address']] = alias['address']
else:
# Non-HA system should offer all addresses
for i in await self.middleware.call('interface.query'):
for alias in i['aliases']:
choices[alias['address']] = alias['address']
return choices
async def __validate(self, verrors, data, schema, old=None):
if not data['listen']:
verrors.add(f'{schema}.listen', 'At least one listen entry is required.')
else:
system_ips = await self.listen_ip_choices()
new_ips = set(i['ip'] for i in data['listen']) - set(i['ip'] for i in old['listen']) if old else set()
for i in data['listen']:
filters = [('iscsi_target_portalip_ip', '=', i['ip'])]
if schema == 'iscsiportal_update':
filters.append(('iscsi_target_portalip_portal', '!=', data['id']))
if await self.middleware.call(
'datastore.query', 'services.iscsitargetportalip', filters
):
verrors.add(f'{schema}.listen', f'{i["ip"]!r} IP is already in use.')
if (
(i['ip'] in new_ips or not new_ips) and
i['ip'] not in system_ips
):
verrors.add(f'{schema}.listen', f'IP {i["ip"]} not configured on this system.')
# Temporary until new API being used: START
if data['discovery_authgroup']:
if not await self.middleware.call(
'datastore.query', 'services.iscsitargetauthcredential',
[('iscsi_target_auth_tag', '=', data['discovery_authgroup'])]
):
verrors.add(
f'{schema}.discovery_authgroup',
f'Auth Group "{data["discovery_authgroup"]}" not found.',
errno.ENOENT,
)
elif data['discovery_authmethod'] in ('CHAP', 'CHAP_MUTUAL'):
verrors.add(f'{schema}.discovery_authgroup', 'This field is required if discovery method is '
'set to CHAP or CHAP Mutual.')
# Temporary until new API being used: END
@accepts(Dict(
'iscsiportal_create',
Str('comment'),
Str('discovery_authmethod', default='NONE', enum=['NONE', 'CHAP', 'CHAP_MUTUAL']),
Int('discovery_authgroup', default=None, null=True),
List('listen', required=True, items=[
Dict(
'listen',
IPAddr('ip', required=True),
),
]),
register=True,
), audit='Create iSCSI portal', audit_extended=lambda data: portal_summary(data))
async def do_create(self, data):
"""
Create a new iSCSI Portal.
`discovery_authgroup` is required for CHAP and CHAP_MUTUAL.
"""
verrors = ValidationErrors()
await self.__validate(verrors, data, 'iscsiportal_create')
verrors.check()
# tag attribute increments sequentially
data['tag'] = (await self.middleware.call(
'datastore.query', self._config.datastore, [], {'count': True}
)) + 1
listen = data.pop('listen')
# Temporary until new API being used: START
authgroup = data.pop('discovery_authgroup', None)
authmethod = AUTHMETHOD_LEGACY_MAP.inv.get(data.pop('discovery_authmethod'), 'None')
if authmethod in ['CHAP', 'CHAP_MUTUAL']:
filters = [['authmethod', '=', authmethod], ['authgroup', '=', authgroup]]
if not await self.middleware.call('iscsi.discoveryauth.query', filters):
await self.middleware.call('iscsi.discoveryauth.create', {'authmethod': authmethod,
'authgroup': authgroup})
# Temporary until new API being used: END
pk = await self.middleware.call(
'datastore.insert', self._config.datastore, data,
{'prefix': self._config.datastore_prefix}
)
try:
await self.__save_listen(pk, listen)
except Exception as e:
await self.middleware.call('datastore.delete', self._config.datastore, pk)
raise e
await self._service_change('iscsitarget', 'reload')
return await self.get_instance(pk)
async def __save_listen(self, pk, new, old=None):
"""
Update database with new listen IPs.
It will delete no longer existing addresses and add new ones.
"""
# We only want to compare 'ip', weed out any 'port' present
new_listen_set = set([(('ip', i.get('ip')),) for i in new])
old_listen_set = set([(('ip', i.get('ip')),) for i in old]) if old else set()
for i in new_listen_set - old_listen_set:
i = dict(i)
await self.middleware.call(
'datastore.insert',
'services.iscsitargetportalip',
{'portal': pk, 'ip': i['ip']},
{'prefix': 'iscsi_target_portalip_'}
)
for i in old_listen_set - new_listen_set:
i = dict(i)
portalip = await self.middleware.call(
'datastore.query',
'services.iscsitargetportalip',
[('portal', '=', pk), ('ip', '=', i['ip'])],
{'prefix': 'iscsi_target_portalip_'}
)
if portalip:
await self.middleware.call(
'datastore.delete', 'services.iscsitargetportalip', portalip[0]['id']
)
@accepts(
Int('id'),
Patch(
'iscsiportal_create',
'iscsiportal_update',
('attr', {'update': True})
),
audit='Update iSCSI portal',
audit_callback=True,
)
async def do_update(self, audit_callback, pk, data):
"""
Update iSCSI Portal `id`.
"""
old = await self.get_instance(pk)
audit_callback(portal_summary(old))
new = old.copy()
new.update(data)
verrors = ValidationErrors()
await self.__validate(verrors, new, 'iscsiportal_update', old)
verrors.check()
listen = new.pop('listen')
# Temporary until new API being used: START
# new['discoveryauthgroup'] = new.pop('discovery_authgroup', None)
# new['discoveryauthmethod'] = AUTHMETHOD_LEGACY_MAP.inv.get(new.pop('discovery_authmethod'), 'None')
new.pop('discovery_authgroup')
new.pop('discovery_authmethod')
# Temporary until new API being used: END
await self.__save_listen(pk, listen, old['listen'])
await self.middleware.call(
'datastore.update', self._config.datastore, pk, new,
{'prefix': self._config.datastore_prefix}
)
await self._service_change('iscsitarget', 'reload')
return await self.get_instance(pk)
@accepts(Int('id'),
audit='Delete iSCSI portal',
audit_callback=True,)
async def do_delete(self, audit_callback, id_):
"""
Delete iSCSI Portal `id`.
"""
old = await self.get_instance(id_)
audit_callback(portal_summary(old))
await self.middleware.call(
'datastore.delete', 'services.iscsitargetgroups', [['iscsi_target_portalgroup', '=', id_]]
)
await self.middleware.call(
'datastore.delete', 'services.iscsitargetportalip', [['iscsi_target_portalip_portal', '=', id_]]
)
result = await self.middleware.call('datastore.delete', self._config.datastore, id_)
for i, portal in enumerate(await self.middleware.call('iscsi.portal.query', [], {'order_by': ['tag']})):
await self.middleware.call(
'datastore.update', self._config.datastore, portal['id'], {'tag': i + 1},
{'prefix': self._config.datastore_prefix}
)
await self._service_change('iscsitarget', 'reload')
return result
| 12,407 | Python | .py | 262 | 35.973282 | 115 | 0.574864 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,596 | fs_attachment_delegate.py | truenas_middleware/src/middlewared/middlewared/plugins/iscsi_/fs_attachment_delegate.py | from middlewared.common.attachment import LockableFSAttachmentDelegate
from .extents import iSCSITargetExtentService
class ISCSIFSAttachmentDelegate(LockableFSAttachmentDelegate):
name = 'iscsi'
title = 'iSCSI Extent'
service = 'iscsitarget'
service_class = iSCSITargetExtentService
async def get_query_filters(self, enabled, options=None):
return [['type', '=', 'DISK']] + (await super().get_query_filters(enabled, options))
async def delete(self, attachments):
orphan_targets_ids = set()
for attachment in attachments:
for te in await self.middleware.call('iscsi.targetextent.query', [['extent', '=', attachment['id']]]):
orphan_targets_ids.add(te['target'])
await self.middleware.call('datastore.delete', 'services.iscsitargettoextent', te['id'])
await self.middleware.call('datastore.delete', 'services.iscsitargetextent', attachment['id'])
await self.remove_alert(attachment)
for te in await self.middleware.call('iscsi.targetextent.query', [['target', 'in', orphan_targets_ids]]):
orphan_targets_ids.discard(te['target'])
for target_id in orphan_targets_ids:
await self.middleware.call('iscsi.target.delete', target_id, True)
await self._service_change('iscsitarget', 'reload')
async def restart_reload_services(self, attachments):
await self._service_change('iscsitarget', 'reload')
async def stop(self, attachments):
await self.restart_reload_services(attachments)
async def setup(middleware):
await middleware.call('pool.dataset.register_attachment_delegate', ISCSIFSAttachmentDelegate(middleware))
| 1,712 | Python | .py | 28 | 52.857143 | 114 | 0.704545 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,597 | portal_listen_delegate.py | truenas_middleware/src/middlewared/middlewared/plugins/iscsi_/portal_listen_delegate.py | from middlewared.common.listen import ListenDelegate
from middlewared.service import ServiceChangeMixin
class ISCSIPortalListenDelegate(ListenDelegate, ServiceChangeMixin):
def __init__(self, middleware):
self.middleware = middleware
async def get_listen_state(self, ips):
return await self.middleware.call(
'datastore.query', 'services.iscsitargetportalip', [['ip', 'in', ips]], {'prefix': 'iscsi_target_portalip_'}
)
async def set_listen_state(self, state):
for row in state:
await self.middleware.call(
'datastore.update', 'services.iscsitargetportalip', row['id'],
{'ip': row['ip']}, {'prefix': 'iscsi_target_portalip_'}
)
await self._service_change('iscsitarget', 'reload')
async def listens_on(self, state, ip):
return any(row['ip'] == ip for row in state)
async def reset_listens(self, state):
for row in state:
await self.middleware.call(
'datastore.update', 'services.iscsitargetportalip', row['id'],
{'ip': '0.0.0.0'}, {'prefix': 'iscsi_target_portalip_'}
)
await self._service_change('iscsitarget', 'reload')
async def repr(self, state):
return {'type': 'SERVICE', 'service': 'iscsi.portal'}
async def setup(middleware):
await middleware.call(
'interface.register_listen_delegate',
ISCSIPortalListenDelegate(middleware),
)
| 1,489 | Python | .py | 32 | 37.5625 | 120 | 0.635546 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,598 | alua.py | truenas_middleware/src/middlewared/middlewared/plugins/iscsi_/alua.py | import asyncio
import itertools
from middlewared.service import CallError, Service, job
from middlewared.utils import run
CHUNK_SIZE = 20
RETRY_SECONDS = 5
SLOW_RETRY_SECONDS = 30
HA_TARGET_SETTLE_SECONDS = 10
GET_UNIT_STATE_SECONDS = 2
RELOAD_REMOTE_QUICK_RETRIES = 10
STANDBY_ENABLE_DEVICES_RETRIES = 10
REMOTE_RELOAD_LONG_DELAY_SECS = 300
def chunker(it, size):
iterator = iter(it)
while chunk := list(itertools.islice(iterator, size)):
yield chunk
class iSCSITargetAluaService(Service):
"""
Support iSCSI ALUA configuration.
The ALUA mechanism is based up DLM support baked into SCST, along with other
potions of middleware (dlm, iscsi.targets, etc ) to handle the coordination
between the two nodes in a HA pair. This is performed in response to
cluster_mode being set on target extents.
However, when a LARGE number of extents(/targets) are present it becomes
impractical to leave/enter lockspaces on scst startup.
To avoid this, SCST on the ACTIVE will start without cluster_mode being
set on extents. Likewise on the STANDBY node, so the targets there will be
present but disabled. However, the STANDBY will then initiate a job to
(gradually) enable cluster_mode on the ACTIVE and react.
"""
class Config:
private = True
namespace = 'iscsi.alua'
# See HA_PROPAGATE in event.py. Only required when running command
# on MASTER, and don't want it to propagate.
HA_PROPAGATE = {'ha_propagate': False}
def __init__(self, middleware):
super().__init__(middleware)
self.enabled = set()
self.standby_starting = False
self.standby_alua_ready = False
self.active_elected_job = None
self.activate_extents_job = None
# standby_write_empty_config will be used to control whether the
# STANDBY node initially writes a minimal scst.conf
# We initialize it to None here, as we could just be restarting
# middleware, then in the getter it will query the state of
# the iscsitarget to decide what the initial value should be
self._standby_write_empty_config = None
async def before_start(self):
if await self.middleware.call('iscsi.global.alua_enabled'):
if await self.middleware.call('failover.status') == 'BACKUP':
self._standby_write_empty_config = True
await self.middleware.call('etc.generate', 'scst')
async def after_start(self):
if await self.middleware.call('iscsi.global.alua_enabled'):
if await self.middleware.call('failover.status') == 'BACKUP':
await self.middleware.call('iscsi.alua.standby_after_start')
async def before_stop(self):
self.standby_starting = False
async def standby_enable_devices(self, devices):
await self.middleware.call('iscsi.target.login_ha_targets')
extents = await self.middleware.call('iscsi.extent.logged_in_extents')
asked = set(devices)
if extents and devices and asked.issubset(set(extents)):
tochange = [extents[name] for name in devices]
await self.middleware.call('iscsi.scst.set_devices_cluster_mode', tochange, 1)
# We could expose the targets as we go along, but will just wait until the end.
# await self.middleware.call('service.reload', 'iscsitarget')
return True
else:
return False
async def standby_write_empty_config(self, value=None):
if value is not None:
self._standby_write_empty_config = value
if self._standby_write_empty_config is None:
if await self.middleware.call('service.get_unit_state', 'iscsitarget') == 'active':
self._standby_write_empty_config = False
else:
self._standby_write_empty_config = True
return self._standby_write_empty_config
@job(lock='active_elected', transient=True, lock_queue_size=1)
async def active_elected(self, job):
self.active_elected_job = job
self.standby_starting = False
job.set_progress(0, 'Start ACTIVE node ALUA reset on election')
self.logger.debug('Start ACTIVE node ALUA reset on election')
if await self.middleware.call('iscsi.global.alua_enabled'):
# Just do the bare minimum here. This API will only be called
# on the new MASTER.
try:
await self.middleware.call('dlm.eject_peer')
except Exception:
self.logger.warning('Unexpected failure while dlm.eject_peer', exc_info=True)
job.set_progress(100, 'ACTIVE node ALUA reset completed')
self.logger.debug('ACTIVE node ALUA reset completed')
return
job.set_progress(100, 'ACTIVE node ALUA reset NOOP')
self.logger.debug('ACTIVE node ALUA reset NOOP')
@job(lock='activate_extents', transient=True, lock_queue_size=1)
async def activate_extents(self, job):
self.activate_extents_job = job
job.set_progress(0, 'Start activate_extents')
if self.active_elected_job:
self.logger.debug('Waiting for active_elected to complete')
await self.active_elected_job.wait()
self.logger.debug('Waited for active_elected to complete')
self.active_elected_job = None
job.set_progress(10, 'Previous job completed')
# First get all the currently active extents
extents = await self.middleware.call('iscsi.extent.query',
[['enabled', '=', True], ['locked', '=', False]],
{'select': ['name', 'id', 'type', 'path', 'disk']})
# Calculate what we want to do
todo = []
for extent in extents:
if extent['type'] == 'DISK':
path = f'/dev/{extent["disk"]}'
else:
path = extent['path']
todo.append([extent['name'], extent['type'], path])
job.set_progress(20, 'Read to activate')
if todo:
self.logger.debug(f'Activating {len(todo)} extents')
retries = 10
while todo and retries:
do_again = []
for item in todo:
# Mark them active
if not await self.middleware.call('iscsi.scst.activate_extent', *item):
self.logger.debug(f'Cannot Activate extent {item}')
do_again.append(item)
if not do_again:
break
await asyncio.sleep(1)
retries -= 1
todo = do_again
self.logger.debug('Activated extents')
await asyncio.sleep(2)
else:
self.logger.debug('No extent to activate')
job.set_progress(100, 'All extents activated')
async def become_active(self):
self.logger.debug('Becoming active upon failover event starting')
iqn_basename = (await self.middleware.call('iscsi.global.config'))['basename']
thisnode = await self.middleware.call('failover.node')
# extents: dict[id] : {id, name, type}
extents = {ext['id']: ext for ext in await self.middleware.call('iscsi.extent.query',
[['enabled', '=', True], ['locked', '=', False]],
{'select': ['name', 'id', 'type']})}
# targets: dict[id]: name
targets = {t['id']: t['name'] for t in await self.middleware.call('iscsi.target.query', [], {'select': ['id', 'name']})}
assocs = await self.middleware.call('iscsi.targetextent.query')
if self.activate_extents_job:
self.logger.debug('Waiting for activate to complete')
await self.activate_extents_job.wait()
self.logger.debug('Waited for activate to complete')
self.activate_extents_job = None
# If we have NOT completed standby_after_start then we cannot just
# become ready, instead we will need to restart iscsitarget
if not self.standby_alua_ready:
self.logger.debug('STANDBY node was not yet ready, skip become_active shortcut')
await self.middleware.call('service.restart', 'iscsitarget')
self.logger.debug('iscsitarget restarted')
return
self.logger.debug('Updating LUNs')
await self.middleware.call('iscsi.scst.suspend', 10)
self.logger.debug('iSCSI suspended')
for assoc in assocs:
extent_id = assoc['extent']
if extent_id in extents:
target_id = assoc['target']
if target_id in targets:
iqn = f'{iqn_basename}:{targets[target_id]}'
await self.middleware.call('iscsi.scst.replace_lun', iqn, extents[extent_id]['name'], assoc['lunid'])
self.logger.debug('Updated LUNs')
await self.middleware.call('iscsi.scst.set_node_optimized', thisnode)
self.logger.debug('Switched optimized node')
if await self.middleware.call('iscsi.scst.clear_suspend'):
self.logger.debug('iSCSI unsuspended')
@job(lock='standby_after_start', transient=True, lock_queue_size=1)
async def standby_after_start(self, job):
job.set_progress(0, 'ALUA starting on STANDBY')
self.logger.debug('ALUA starting on STANDBY')
self.standby_starting = True
self.standby_alua_ready = False
self.enabled = set()
local_requires_reload = False
remote_requires_reload = False
# We are the STANDBY node. Tell the ACTIVE it can logout any HA targets it had left over.
prefix = await self.middleware.call('iscsi.target.ha_iqn_prefix')
while self.standby_starting:
try:
iqns = (await self.middleware.call('failover.call_remote', 'iscsi.target.logged_in_iqns')).keys()
ha_iqns = list(filter(lambda iqn: iqn.startswith(prefix), iqns))
if not ha_iqns:
break
await self.middleware.call('failover.call_remote', 'iscsi.target.logout_ha_targets')
# If we have logged out targets on the ACTIVE node, then we will want to regenerate
# the scst.conf (to remove any left-over dev_disk)
remote_requires_reload = True
await asyncio.sleep(1)
except Exception:
await asyncio.sleep(RETRY_SECONDS)
if not self.standby_starting:
job.set_progress(20, 'Abandoned job.')
return
else:
job.set_progress(20, 'Logged out HA targets (remote node)')
self.logger.debug('Logged out HA targets (remote node)')
# We may want to ensure that the iSCSI service on the remote node is fully
# up. Since we have switched it systemd_async_start asking get_unit_state
while self.standby_starting:
try:
state = await self.middleware.call('failover.call_remote', 'service.get_unit_state', ['iscsitarget'])
if state == 'active':
break
await asyncio.sleep(GET_UNIT_STATE_SECONDS)
except Exception as e:
# This is a fail-safe exception catch. Should never occur.
self.logger.warning('standby_start job: %r', e, exc_info=True)
await asyncio.sleep(RETRY_SECONDS)
if not self.standby_starting:
job.set_progress(22, 'Abandoned job.')
return
else:
job.set_progress(22, 'Remote iscsitarget is active')
self.logger.debug('Remote iscsitarget is active')
# Next turn off cluster_mode for all the extents.
# this will avoid "ignore dlm msg because seq mismatch" errors when we reconnect
# Rather than try to execute in parallel, we will take our time
cr_opts = {'timeout': 10, 'connect_timeout': 10}
logged_enomethod = False
while self.standby_starting:
try:
try:
devices = await self.middleware.call('failover.call_remote', 'iscsi.scst.cluster_mode_devices_set', [], cr_opts)
except CallError as e:
if e.errno != CallError.ENOMETHOD:
raise
# We have not yet upgraded the other node
if not logged_enomethod:
self.logger.debug('Awaiting the ACTIVE node being upgraded.')
logged_enomethod = True
await asyncio.sleep(SLOW_RETRY_SECONDS)
continue
# We did manage to call cluster_mode_devices_set
if not devices:
break
for device in devices:
await self.middleware.call('failover.call_remote', 'iscsi.scst.set_device_cluster_mode', [device, 0], cr_opts)
except Exception:
# This is a fail-safe exception catch. Should never occur.
self.logger.warning('Unexpected failure while cleaning up ACTIVE cluster_mode', exc_info=True)
await asyncio.sleep(RETRY_SECONDS)
if not self.standby_starting:
job.set_progress(24, 'Abandoned job.')
return
else:
job.set_progress(24, 'Cleared cluster_mode on ACTIVE node')
self.logger.debug('Cleared cluster_mode on ACTIVE node')
# Reload on ACTIVE node. This will ensure the HA targets are available
if self.standby_starting:
try:
await self.middleware.call('failover.call_remote', 'service.reload', ['iscsitarget', self.HA_PROPAGATE])
except Exception:
self.logger.warning('Failed to reload ACTIVE iscsitarget', exc_info=True)
# Next login the HA targets.
reload_remote_quick_retries = RELOAD_REMOTE_QUICK_RETRIES
while self.standby_starting:
try:
while self.standby_starting:
try:
# Logout any targets that have no associated LUN (may have been BUSY during login)
await self.middleware.call('iscsi.target.logout_empty_ha_targets')
# Login any missing targets
before_iqns = await self.middleware.call('iscsi.target.logged_in_iqns')
await self.middleware.call('iscsi.target.login_ha_targets')
after_iqns = await self.middleware.call('iscsi.target.logged_in_iqns')
if before_iqns != after_iqns:
await asyncio.sleep(HA_TARGET_SETTLE_SECONDS)
active_iqns = await self.middleware.call('iscsi.target.active_ha_iqns')
after_iqns_set = set(after_iqns.keys())
active_iqns_set = set(active_iqns.values())
if active_iqns_set.issubset(after_iqns_set):
break
job.set_progress(23, f'Detected {len(active_iqns_set - after_iqns_set)} missing HA targets')
if reload_remote_quick_retries > 0:
await self.middleware.call('failover.call_remote', 'service.reload', ['iscsitarget', self.HA_PROPAGATE])
reload_remote_quick_retries -= 1
await asyncio.sleep(HA_TARGET_SETTLE_SECONDS)
else:
await self.middleware.call('failover.call_remote', 'service.reload', ['iscsitarget', self.HA_PROPAGATE])
await asyncio.sleep(REMOTE_RELOAD_LONG_DELAY_SECS)
except Exception:
if reload_remote_quick_retries > 0:
await self.middleware.call('failover.call_remote', 'service.reload', ['iscsitarget', self.HA_PROPAGATE])
reload_remote_quick_retries -= 1
await asyncio.sleep(RETRY_SECONDS)
if not self.standby_starting:
job.set_progress(25, 'Abandoned job.')
return
else:
job.set_progress(25, 'Logged in HA targets')
# Now that we've logged in the HA targets, regenerate the config so that the
# dev_disk DEVICEs are present (we cleared _standby_write_empty_config above).
# We will need these, so that then we can switch them to cluster_mode
await self.middleware.call('service.reload', 'iscsitarget')
job.set_progress(30, 'Non cluster_mode config written')
# Sanity check that all the targets surfaced up thru SCST okay.
devices = list(itertools.chain.from_iterable([x for x in after_iqns.values() if x is not None]))
if await self.middleware.call('iscsi.scst.check_cluster_mode_paths_present', devices):
break
self.logger.debug('Detected missing cluster_mode. Retrying.')
self._standby_write_empty_config = False
await self.middleware.call('iscsi.target.logout_ha_targets')
await self.middleware.call('service.reload', 'iscsitarget')
job.set_progress(20, 'Logged out HA targets (local node)')
except Exception:
self.logger.warning('Failed to login and surface HA targets', exc_info=True)
# Now that the ground is cleared, start enabling cluster_mode on extents
while self.standby_starting:
try:
# We'll refetch the extents each time round the loop in case more have been added
extents = set((await self.middleware.call('iscsi.extent.logged_in_extents')).keys())
# Choose the next batch of extents to enable.
to_enable = set(itertools.islice(extents - self.enabled, CHUNK_SIZE))
if to_enable:
# First we will ensure they are in cluster_mode on the ACTIVE
while self.standby_starting:
try:
remote_clustered_extents = set(await self.middleware.call('failover.call_remote', 'iscsi.target.clustered_extents'))
todo_remote = to_enable - remote_clustered_extents
if todo_remote:
remote_requires_reload = True
await self.middleware.call('failover.call_remote', 'iscsi.scst.set_devices_cluster_mode', [list(todo_remote), 1])
else:
break
except Exception:
await asyncio.sleep(RETRY_SECONDS)
# Enable on STANDBY. If we fail here, we'll still go back around the main loop.
ok = False
enable_retries = STANDBY_ENABLE_DEVICES_RETRIES
while not ok and enable_retries:
ok = await self.middleware.call('iscsi.alua.standby_enable_devices', list(to_enable))
if not ok:
await asyncio.sleep(1)
enable_retries -= 1
if not ok:
# This shouldn't ever occur.
self.logger.error('Failed to enable cluster mode on devices: %r', to_enable)
progress = 30 + (70 * (len(self.enabled) / len(extents)))
job.set_progress(progress, 'Failed to enable cluster mode on devices. Retrying.')
await asyncio.sleep(SLOW_RETRY_SECONDS)
else:
local_requires_reload = True
# Update progress
self.enabled.update(to_enable)
progress = 30 + (70 * (len(self.enabled) / len(extents)))
job.set_progress(progress, f'Enabled {len(self.enabled)} extents')
self.logger.info('Set cluster_mode on for %r extents', len(self.enabled))
else:
break
except Exception:
# This is a fail-safe exception catch. Should never occur.
self.logger.warning('standby_start job', exc_info=True)
await asyncio.sleep(RETRY_SECONDS)
if not self.standby_starting:
job.set_progress(100, 'Abandoned job.')
return
# No point trying to write a full config until we have HA targets
self._standby_write_empty_config = False
if remote_requires_reload:
try:
if local_requires_reload:
await self.middleware.call('failover.call_remote', 'service.reload', ['iscsitarget'])
else:
await self.middleware.call('failover.call_remote', 'service.reload', ['iscsitarget', self.HA_PROPAGATE])
except Exception as e:
self.logger.warning('Failed to reload iscsitarget: %r', e, exc_info=True)
elif local_requires_reload:
await self.middleware.call('service.reload', 'iscsitarget')
job.set_progress(100, 'All targets in cluster_mode')
self.standby_starting = False
self.standby_alua_ready = True
self.logger.debug('ALUA started on STANDBY')
@job(lock='standby_delayed_reload', transient=True)
async def standby_delayed_reload(self, job):
await asyncio.sleep(30)
# Verify again that we are ALUA STANDBY
if await self.middleware.call('iscsi.global.alua_enabled'):
if await self.middleware.call('failover.status') == 'BACKUP':
await self.middleware.call('service.reload', 'iscsitarget', {'ha_propagate': False})
@job(lock='standby_fix_cluster_mode', transient=True)
async def standby_fix_cluster_mode(self, job, devices):
if self._standby_write_empty_config is not False:
self.logger.debug('Skipping standby_fix_cluster_mode')
return
job.set_progress(0, 'Fixing cluster_mode')
logged_in_extents = await self.middleware.call('iscsi.extent.logged_in_extents')
device_to_srcextent = {v: k for k, v in logged_in_extents.items()}
pruned_devices = [device for device in devices if device in device_to_srcextent]
need_to_reload = False
for chunk in chunker(pruned_devices, 10):
# First wait to ensure cluster_mode paths are present (10 x 0.2 = 2 secs)
retries = 10
while retries:
present = await self.middleware.call('iscsi.scst.check_cluster_mode_paths_present', chunk)
if present:
break
await asyncio.sleep(0.2)
retries -= 1
if not retries:
self.logger.warning(f'Timed out waiting for cluster_mode to surface for some of {chunk}')
# Next ensure cluster_mode is enabled on the ACTIVE node
try:
rextents = [device_to_srcextent[device] for device in chunk]
lextents = chunk
except KeyError:
# Things may have been logged out since we requested last checked
logged_in_extents = await self.middleware.call('iscsi.extent.logged_in_extents')
device_to_srcextent = {v: k for k, v in logged_in_extents.items()}
rextents = []
lextents = []
for device in chunk:
if device in device_to_srcextent:
rextents.append(device_to_srcextent[device])
lextents.append(device)
if rextents:
self.logger.debug(f'Setting cluster_mode on ACTIVE node for {rextents}')
await self.middleware.call('failover.call_remote', 'iscsi.scst.set_devices_cluster_mode', [rextents, 1])
# Then ensure cluster_mode is enabled on the STANDBY node. Retry if necessary.
retries = 10
while retries:
try:
self.logger.debug(f'Setting cluster_mode on STANDBY node for {lextents}')
await self.middleware.call('iscsi.scst.set_devices_cluster_mode', lextents, 1)
break
except Exception:
self.logger.warning(f'Failed to set cluster_mode on STANDBY node for {lextents}', exc_info=True)
retries -= 1
await asyncio.sleep(1)
need_to_reload = True
if need_to_reload:
job.set_progress(90, 'Fixed cluster_mode')
await asyncio.sleep(1)
# Now that we have enabled cluster_mode, need to reload iscsitarget so that
# it will now offer the targets to the world.
await self.middleware.call('service.reload', 'iscsitarget')
job.set_progress(100, 'Reloaded iscsitarget service')
else:
job.set_progress(100, 'Fixed cluster_mode')
self.logger.debug(f'Fixed cluster_mode for {len(devices)} extents')
async def wait_cluster_mode(self, target_id, extent_id):
"""After we add a target/extent mapping we wish to wait for the ALUA state to settle."""
self.logger.debug(f'Wait for extent with ID {extent_id}')
retries = 30
while retries:
# Do some basic checks each time round the loop to ensure we're still valid.
if not await self.middleware.call("iscsi.global.alua_enabled"):
return
if not await self.middleware.call('failover.remote_connected'):
return
if await self.middleware.call('service.get_unit_state', 'iscsitarget') not in ['active', 'activating']:
return
# We can only deal with active targets. Otherwise we cannot login to the HA target from the STANDBY node.
targetname = (await self.middleware.call('iscsi.target.query', [['id', '=', target_id]], {'select': ['name']}))[0]['name']
active_targets = await self.middleware.call('iscsi.target.active_targets')
if targetname not in active_targets:
self.logger.debug(f'Target {targetname} is not active (in an ALUA sense)')
return
retries -= 1
# The locked and enabled are already handled by active_targets check
lextent = (await self.middleware.call('iscsi.extent.query', [['id', '=', extent_id]], {'select': ['name']}))[0]['name']
# Check to see if the extent is available on the remote node yet
logged_in_extents = await self.middleware.call('failover.call_remote', 'iscsi.extent.logged_in_extents')
if lextent not in logged_in_extents:
self.logger.debug(f'Sleep while we wait for {lextent} to get logged in')
await asyncio.sleep(1)
continue
rextent = logged_in_extents[lextent]
# Have the dev_handlers surfaced cluster_mode yet:
# - local
if not await self.middleware.call('iscsi.scst.check_cluster_mode_paths_present', [lextent]):
self.logger.debug(f'Sleep while we wait for {lextent} cluster_mode to surface')
await asyncio.sleep(1)
continue
# - remote
if not await self.middleware.call('failover.call_remote', 'iscsi.scst.check_cluster_mode_paths_present', [[rextent]]):
self.logger.debug(f'Sleep while we wait for {rextent} cluster_mode to surface')
await asyncio.sleep(1)
continue
# OK, now check whether we've made it into cluster mode yet
# - local
if await self.middleware.call('iscsi.scst.get_cluster_mode', lextent) != "1":
self.logger.debug(f'Sleep while we wait for {lextent} to enter cluster_mode')
await asyncio.sleep(1)
continue
# - remote
if await self.middleware.call('failover.call_remote', 'iscsi.scst.get_cluster_mode', [rextent]) != "1":
self.logger.debug(f'Sleep while we wait for {rextent} to enter cluster_mode')
await asyncio.sleep(1)
continue
# If we get here, we're good to go!
self.logger.debug(f'Completed wait for {lextent}/{rextent} to enter cluster_mode')
return
async def removed_target_extent(self, target_name, lun, extent_name):
"""This is called on the STANDBY node to remove an extent from a target."""
if await self.middleware.call("iscsi.global.alua_enabled") and await self.middleware.call("failover.status") == 'BACKUP':
try:
# First we will remove the LUN from the target.
global_basename = (await self.middleware.call('iscsi.global.config'))['basename']
iqn = f'{global_basename}:{target_name}'
await self.middleware.call('iscsi.scst.delete_lun', iqn, lun)
# Next we will disable cluster_mode for the extent
ha_iqn = f'{global_basename}:HA:{target_name}'
device = await self.middleware.call('iscsi.extent.logged_in_extent', ha_iqn, lun)
if device:
await self.middleware.call('iscsi.scst.set_devices_cluster_mode', [device], 0)
# If we have removed a LUN from a target, it'd be nice to think that we could just do one of the following
# - for i in /sys/class/scsi_device/*/device/rescan ; do echo 1 > $i ; done
# - iscsiadm -m node -R
# etc, but (currently) these don't work. Therefore we'll use a sledgehammer
await self.middleware.call('iscsi.target.logout_ha_target', target_name)
finally:
await self.middleware.call('service.reload', 'iscsitarget')
async def has_active_jobs(self):
"""Return whether any ALUA jobs are running or queued."""
running_jobs = await self.middleware.call(
'core.get_jobs', [
('method', 'in', [
'iscsi.alua.active_elected',
'iscsi.alua.activate_extents',
'iscsi.alua.standby_after_start',
'iscsi.alua.standby_delayed_reload',
'iscsi.alua.standby_fix_cluster_mode',
]),
('state', 'in', ['RUNNING', 'WAITING']),
]
)
return bool(running_jobs)
async def settled(self):
"""Check whether the ALUA state is settled"""
if not await self.middleware.call("iscsi.global.alua_enabled"):
return True
# Check local: running & no active ALUA jobs
if (await self.middleware.call("service.get_unit_state", 'iscsitarget')) != 'active':
return False
if await self.middleware.call('iscsi.alua.has_active_jobs'):
return False
# Check remote: running & no active ALUA jobs
try:
if (await self.middleware.call('failover.call_remote', 'service.get_unit_state', ['iscsitarget'])) != 'active':
return False
if await self.middleware.call('failover.call_remote', 'iscsi.alua.has_active_jobs'):
return False
except Exception:
# If we fail to communicate with the other node, then we cannot be said to be settled.
return False
return True
async def wait_for_alua_settled(self, sleep_interval=1, retries=10):
while retries > 0:
if await self.middleware.call('iscsi.alua.settled'):
return
# self.logger.debug('Waiting for ALUA settle')
await asyncio.sleep(sleep_interval)
retries -= 1
self.logger.warning('Gave up waiting for ALUA to settle')
@job(lock='force_close_sessions', transient=True, lock_queue_size=1)
async def force_close_sessions(self, job):
job.set_progress(0, 'Start force-close of iSCSI sessions')
self.logger.debug('Start force-close of iSCSI sessions')
await run('scst_util.sh', 'force-close')
job.set_progress(100, 'Complete force-close of iSCSI sessions')
self.logger.debug('Complete force-close of iSCSI sessions')
@job(lock='reset_active', transient=True, lock_queue_size=1)
async def reset_active(self, job):
"""Job to be run on the ACTIVE node before the STANDBY node will join."""
job.set_progress(0, 'Start logout HA targets')
self.logger.debug('Start logout HA targets')
# This is similar, but not identical to iscsi.target.logout_ha_targets
# The main difference is these are logged out in series, to allow e.g. cluster_mode settle
# This is also why it is a job. it may take longer to run.
iqns = await self.middleware.call('iscsi.target.active_ha_iqns')
# Check what's already logged in
existing = await self.middleware.call('iscsi.target.logged_in_iqns')
# Generate the set of things we want to logout (don't assume every IQN, just the HA ones)
todo = set(iqn for iqn in iqns.values() if iqn in existing)
count = 0
remote_ip = await self.middleware.call('failover.remote_ip')
while todo and (iqn := todo.pop()):
try:
await self.middleware.call('iscsi.target.logout_iqn', remote_ip, iqn)
count += 1
except Exception:
self.logger.warning('Failed to logout %r', iqn, exc_info=True)
self.logger.debug('Logged out %d HA targets', count)
job.set_progress(50, 'Logged out HA targets')
await self.middleware.call('dlm.eject_peer')
self.logger.debug('Ejected peer')
job.set_progress(10, 'Ejected peer')
| 34,336 | Python | .py | 601 | 42.823627 | 145 | 0.593355 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,599 | port_attachments.py | truenas_middleware/src/middlewared/middlewared/plugins/iscsi_/port_attachments.py | from middlewared.common.ports import ServicePortDelegate
class ISCSIGlobalServicePortDelegate(ServicePortDelegate):
name = 'iSCSI'
namespace = 'iscsi.global'
port_fields = ['listen_port']
title = 'iSCSI Service'
async def setup(middleware):
await middleware.call('port.register_attachment_delegate', ISCSIGlobalServicePortDelegate(middleware))
| 369 | Python | .py | 8 | 42 | 106 | 0.794944 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |