id int64 0 458k | file_name stringlengths 4 119 | file_path stringlengths 14 227 | content stringlengths 24 9.96M | size int64 24 9.96M | language stringclasses 1 value | extension stringclasses 14 values | total_lines int64 1 219k | avg_line_length float64 2.52 4.63M | max_line_length int64 5 9.91M | alphanum_fraction float64 0 1 | repo_name stringlengths 7 101 | repo_stars int64 100 139k | repo_forks int64 0 26.4k | repo_open_issues int64 0 2.27k | repo_license stringclasses 12 values | repo_extraction_date stringclasses 433 values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
24,600 | scst.py | truenas_middleware/src/middlewared/middlewared/plugins/iscsi_/scst.py | import asyncio
import pathlib
from middlewared.service import Service
SCST_BASE = '/sys/kernel/scst_tgt'
SCST_TARGETS_ISCSI_ENABLED_PATH = '/sys/kernel/scst_tgt/targets/iscsi/enabled'
SCST_DEVICES = '/sys/kernel/scst_tgt/devices'
SCST_SUSPEND = '/sys/kernel/scst_tgt/suspend'
SCST_CONTROLLER_A_TARGET_GROUPS_STATE = '/sys/kernel/scst_tgt/device_groups/targets/target_groups/controller_A/state'
SCST_CONTROLLER_B_TARGET_GROUPS_STATE = '/sys/kernel/scst_tgt/device_groups/targets/target_groups/controller_B/state'
class iSCSITargetService(Service):
class Config:
namespace = 'iscsi.scst'
private = True
def path_write(self, path, text):
p = pathlib.Path(path)
realpath = str(p.resolve())
if realpath.startswith(SCST_BASE) and p.exists():
p.write_text(text)
else:
raise ValueError(f'Unexpected path "{realpath}"')
async def set_all_cluster_mode(self, value):
text = f'{int(value)}\n'
paths = await self.middleware.call('iscsi.scst.cluster_mode_paths')
if paths:
await asyncio.gather(*[self.middleware.call('iscsi.scst.path_write', path, text) for path in paths])
def cluster_mode_paths(self):
scst_tgt_devices = pathlib.Path(SCST_DEVICES)
if scst_tgt_devices.exists():
return [str(p) for p in scst_tgt_devices.glob('*/cluster_mode')]
else:
return []
def cluster_mode_devices_set(self):
devices = []
scst_tgt_devices = pathlib.Path(SCST_DEVICES)
if scst_tgt_devices.exists():
for p in scst_tgt_devices.glob('*/cluster_mode'):
if p.read_text().splitlines()[0] == '1':
devices.append(p.parent.name)
return devices
def check_cluster_modes_clear(self):
scst_tgt_devices = pathlib.Path(SCST_DEVICES)
if scst_tgt_devices.exists():
for p in scst_tgt_devices.glob('*/cluster_mode'):
if p.read_text().splitlines()[0] == '1':
return False
return True
def check_cluster_mode_paths_present(self, devices):
for device in devices:
if not pathlib.Path(f'{SCST_DEVICES}/{device}/cluster_mode').exists():
return False
return True
def get_cluster_mode(self, device):
try:
return pathlib.Path(f'{SCST_DEVICES}/{device}/cluster_mode').read_text().splitlines()[0]
except Exception:
return "UNKNOWN"
async def set_device_cluster_mode(self, device, value):
await self.middleware.call('iscsi.scst.path_write', f'{SCST_DEVICES}/{device}/cluster_mode', f'{int(value)}\n')
async def set_devices_cluster_mode(self, devices, value):
text = f'{int(value)}\n'
paths = [f'{SCST_DEVICES}/{device}/cluster_mode' for device in devices]
if paths:
await asyncio.gather(*[self.middleware.call('iscsi.scst.path_write', path, text) for path in paths])
def disable(self):
pathlib.Path(SCST_TARGETS_ISCSI_ENABLED_PATH).write_text('0\n')
def enable(self):
pathlib.Path(SCST_TARGETS_ISCSI_ENABLED_PATH).write_text('1\n')
def suspend(self, value=10):
pathlib.Path(SCST_SUSPEND).write_text(f'{value}\n')
def clear_suspend(self):
"""suspend could have been called several times, and will need to be decremented
several times to clean"""
try:
p = pathlib.Path(SCST_SUSPEND)
for i in range(30):
if p.read_text().strip() == '0':
return True
else:
p.write_text('-1\n')
except FileNotFoundError:
pass
return False
def enabled(self):
try:
return pathlib.Path(SCST_TARGETS_ISCSI_ENABLED_PATH).read_text().strip() == '1'
except FileNotFoundError:
return False
def is_kernel_module_loaded(self):
return pathlib.Path(SCST_BASE).exists()
def activate_extent(self, extent_name, extenttype, path):
if pathlib.Path(path).exists():
if extenttype == 'DISK':
p = pathlib.Path(f'{SCST_BASE}/handlers/vdisk_blockio/{extent_name}/active')
else:
p = pathlib.Path(f'{SCST_BASE}/handlers/vdisk_fileio/{extent_name}/active')
try:
p.write_text('1\n')
return True
except Exception:
# Return False on ANY exception
return False
else:
return False
def delete_lun(self, iqn, lun):
pathlib.Path(f'{SCST_BASE}/targets/iscsi/{iqn}/ini_groups/security_group/luns/mgmt').write_text(f'del {lun}\n')
def replace_lun(self, iqn, extent, lun):
pathlib.Path(f'{SCST_BASE}/targets/iscsi/{iqn}/ini_groups/security_group/luns/mgmt').write_text(f'replace {extent} {lun}\n')
def set_node_optimized(self, node):
"""Update which node is reported as being the active/optimized path."""
if node == 'A':
pathlib.Path(SCST_CONTROLLER_B_TARGET_GROUPS_STATE).write_text("nonoptimized\n")
pathlib.Path(SCST_CONTROLLER_A_TARGET_GROUPS_STATE).write_text("active\n")
else:
pathlib.Path(SCST_CONTROLLER_A_TARGET_GROUPS_STATE).write_text("nonoptimized\n")
pathlib.Path(SCST_CONTROLLER_B_TARGET_GROUPS_STATE).write_text("active\n")
| 5,442 | Python | .py | 115 | 37.443478 | 132 | 0.623161 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,601 | targets.py | truenas_middleware/src/middlewared/middlewared/plugins/iscsi_/targets.py | import asyncio
import errno
import os
import pathlib
import re
import subprocess
from collections import defaultdict
import middlewared.sqlalchemy as sa
from middlewared.schema import Bool, Dict, Int, IPAddr, List, Patch, Str, accepts
from middlewared.service import CallError, CRUDService, ValidationErrors, private
from middlewared.utils import UnexpectedFailure, run
from .utils import AUTHMETHOD_LEGACY_MAP
RE_TARGET_NAME = re.compile(r'^[-a-z0-9\.:]+$')
class iSCSITargetModel(sa.Model):
__tablename__ = 'services_iscsitarget'
id = sa.Column(sa.Integer(), primary_key=True)
iscsi_target_name = sa.Column(sa.String(120), unique=True)
iscsi_target_alias = sa.Column(sa.String(120), nullable=True, unique=True)
iscsi_target_mode = sa.Column(sa.String(20), default='iscsi')
iscsi_target_auth_networks = sa.Column(sa.JSON(list))
iscsi_target_rel_tgt_id = sa.Column(sa.Integer(), unique=True)
class iSCSITargetGroupModel(sa.Model):
__tablename__ = 'services_iscsitargetgroups'
__table_args__ = (
sa.Index(
'services_iscsitargetgroups_iscsi_target_id__iscsi_target_portalgroup_id',
'iscsi_target_id', 'iscsi_target_portalgroup_id',
unique=True
),
)
id = sa.Column(sa.Integer(), primary_key=True)
iscsi_target_id = sa.Column(sa.ForeignKey('services_iscsitarget.id'), index=True)
iscsi_target_portalgroup_id = sa.Column(sa.ForeignKey('services_iscsitargetportal.id'), index=True)
iscsi_target_initiatorgroup_id = sa.Column(
sa.ForeignKey('services_iscsitargetauthorizedinitiator.id', ondelete='SET NULL'), index=True, nullable=True
)
iscsi_target_authtype = sa.Column(sa.String(120), default='None')
iscsi_target_authgroup = sa.Column(sa.Integer(), nullable=True)
iscsi_target_initialdigest = sa.Column(sa.String(120), default='Auto')
class iSCSITargetService(CRUDService):
class Config:
namespace = 'iscsi.target'
datastore = 'services.iscsitarget'
datastore_prefix = 'iscsi_target_'
datastore_extend = 'iscsi.target.extend'
cli_namespace = 'sharing.iscsi.target'
role_prefix = 'SHARING_ISCSI_TARGET'
@private
async def extend(self, data):
data['mode'] = data['mode'].upper()
data['groups'] = await self.middleware.call(
'datastore.query',
'services.iscsitargetgroups',
[('iscsi_target', '=', data['id'])],
)
for group in data['groups']:
group.pop('id')
group.pop('iscsi_target')
group.pop('iscsi_target_initialdigest')
for i in ('portal', 'initiator'):
val = group.pop(f'iscsi_target_{i}group')
if val:
val = val['id']
group[i] = val
group['auth'] = group.pop('iscsi_target_authgroup')
group['authmethod'] = AUTHMETHOD_LEGACY_MAP.get(
group.pop('iscsi_target_authtype')
)
return data
@accepts(Dict(
'iscsi_target_create',
Str('name', required=True),
Str('alias', null=True),
Str('mode', enum=['ISCSI', 'FC', 'BOTH'], default='ISCSI'),
List('groups', items=[
Dict(
'group',
Int('portal', required=True),
Int('initiator', default=None, null=True),
Str('authmethod', enum=['NONE', 'CHAP', 'CHAP_MUTUAL'], default='NONE'),
Int('auth', default=None, null=True),
),
]),
List('auth_networks', items=[IPAddr('ip', network=True)]),
register=True
), audit='Create iSCSI target', audit_extended=lambda data: data["name"])
async def do_create(self, data):
"""
Create an iSCSI Target.
`groups` is a list of group dictionaries which provide information related to using a `portal`, `initiator`,
`authmethod` and `auth` with this target. `auth` represents a valid iSCSI Authorized Access and defaults to
null.
`auth_networks` is a list of IP/CIDR addresses which are allowed to use this initiator. If all networks are
to be allowed, this field should be left empty.
"""
verrors = ValidationErrors()
await self.__validate(verrors, data, 'iscsi_target_create')
verrors.check()
await self.compress(data)
groups = data.pop('groups')
data['rel_tgt_id'] = await self.middleware.call('iscsi.target.get_rel_tgt_id')
pk = await self.middleware.call(
'datastore.insert', self._config.datastore, data,
{'prefix': self._config.datastore_prefix})
try:
await self.__save_groups(pk, groups)
except Exception as e:
await self.middleware.call('datastore.delete', self._config.datastore, pk)
raise e
# First process the local (MASTER) config
await self._service_change('iscsitarget', 'reload', options={'ha_propagate': False})
# Then process the remote (BACKUP) config if we are HA and ALUA is enabled.
if await self.middleware.call("iscsi.global.alua_enabled") and await self.middleware.call('failover.remote_connected'):
await self.middleware.call('failover.call_remote', 'service.reload', ['iscsitarget'])
await self.middleware.call('iscsi.alua.wait_for_alua_settled')
return await self.get_instance(pk)
async def __save_groups(self, pk, new, old=None):
"""
Update database with a set of new target groups.
It will delete no longer existing groups and add new ones.
"""
new_set = set([tuple(i.items()) for i in new])
old_set = set([tuple(i.items()) for i in old]) if old else set()
for i in old_set - new_set:
i = dict(i)
targetgroup = await self.middleware.call(
'datastore.query',
'services.iscsitargetgroups',
[
('iscsi_target', '=', pk),
('iscsi_target_portalgroup', '=', i['portal']),
('iscsi_target_initiatorgroup', '=', i['initiator']),
('iscsi_target_authtype', '=', i['authmethod']),
('iscsi_target_authgroup', '=', i['auth']),
],
)
if targetgroup:
await self.middleware.call(
'datastore.delete', 'services.iscsitargetgroups', targetgroup[0]['id']
)
for i in new_set - old_set:
i = dict(i)
await self.middleware.call(
'datastore.insert',
'services.iscsitargetgroups',
{
'iscsi_target': pk,
'iscsi_target_portalgroup': i['portal'],
'iscsi_target_initiatorgroup': i['initiator'],
'iscsi_target_authtype': i['authmethod'],
'iscsi_target_authgroup': i['auth'],
},
)
async def __validate(self, verrors, data, schema_name, old=None):
if name_error := await self.validate_name(data['name'], old['id'] if old is not None else None):
verrors.add(f'{schema_name}.name', name_error)
if data.get('alias') is not None:
if '"' in data['alias']:
verrors.add(f'{schema_name}.alias', 'Double quotes are not allowed')
elif data['alias'] == 'target':
verrors.add(f'{schema_name}.alias', 'target is a reserved word')
else:
filters = [('alias', '=', data['alias'])]
if old:
filters.append(('id', '!=', old['id']))
aliases = await self.middleware.call(
f'{self._config.namespace}.query', filters, {'force_sql_filters': True}
)
if aliases:
verrors.add(f'{schema_name}.alias', 'Alias already exists')
if (
data['mode'] != 'ISCSI' and
not await self.middleware.call('system.feature_enabled', 'FIBRECHANNEL')
):
verrors.add(f'{schema_name}.mode', 'Fibre Channel not enabled')
# Creating target without groups should be allowed for API 1.0 compat
# if not data['groups']:
# verrors.add(f'{schema_name}.groups', 'At least one group is required')
db_portals = list(
map(
lambda v: v['id'],
await self.middleware.call('datastore.query', 'services.iSCSITargetPortal', [
['id', 'in', list(map(lambda v: v['portal'], data['groups']))]
])
)
)
db_initiators = list(
map(
lambda v: v['id'],
await self.middleware.call('datastore.query', 'services.iSCSITargetAuthorizedInitiator', [
['id', 'in', list(map(lambda v: v['initiator'], data['groups']))]
])
)
)
portals = []
for i, group in enumerate(data['groups']):
if group['portal'] in portals:
verrors.add(
f'{schema_name}.groups.{i}.portal',
f'Portal {group["portal"]} cannot be duplicated on a target'
)
elif group['portal'] not in db_portals:
verrors.add(
f'{schema_name}.groups.{i}.portal',
f'{group["portal"]} Portal not found in database'
)
else:
portals.append(group['portal'])
if group['initiator'] and group['initiator'] not in db_initiators:
verrors.add(
f'{schema_name}.groups.{i}.initiator',
f'{group["initiator"]} Initiator not found in database'
)
if not group['auth'] and group['authmethod'] in ('CHAP', 'CHAP_MUTUAL'):
verrors.add(
f'{schema_name}.groups.{i}.auth',
'Authentication group is required for CHAP and CHAP Mutual'
)
elif group['auth'] and group['authmethod'] == 'CHAP_MUTUAL':
auth = await self.middleware.call('iscsi.auth.query', [('tag', '=', group['auth'])])
if not auth:
verrors.add(f'{schema_name}.groups.{i}.auth', 'Authentication group not found', errno.ENOENT)
else:
if not auth[0]['peeruser']:
verrors.add(
f'{schema_name}.groups.{i}.auth',
f'Authentication group {group["auth"]} does not support CHAP Mutual'
)
@accepts(Str('name'),
Int('existing_id', null=True, default=None),
roles=['SHARING_ISCSI_TARGET_WRITE'])
async def validate_name(self, name, existing_id):
"""
Returns validation error for iSCSI target name
:param name: name to be validated
:param existing_id: id of an existing iSCSI target that will receive this name (or `None` if a new target
is being created)
:return: error message (or `None` if there is no error)
"""
if not RE_TARGET_NAME.search(name):
return 'Only lowercase alphanumeric characters plus dot (.), dash (-), and colon (:) are allowed.'
else:
filters = [('name', '=', name)]
if existing_id is not None:
filters.append(('id', '!=', existing_id))
names = await self.middleware.call('iscsi.target.query', filters, {'force_sql_filters': True})
if names:
return 'Target with this name already exists'
@accepts(
Int('id'),
Patch(
'iscsi_target_create',
'iscsi_target_update',
('attr', {'update': True})
),
audit='Update iSCSI target',
audit_callback=True,
)
async def do_update(self, audit_callback, id_, data):
"""
Update iSCSI Target of `id`.
"""
old = await self.get_instance(id_)
audit_callback(old['name'])
new = old.copy()
new.update(data)
verrors = ValidationErrors()
await self.__validate(verrors, new, 'iscsi_target_create', old=old)
verrors.check()
await self.compress(new)
groups = new.pop('groups')
oldgroups = old.copy()
await self.compress(oldgroups)
oldgroups = oldgroups['groups']
await self.middleware.call(
'datastore.update', self._config.datastore, id_, new,
{'prefix': self._config.datastore_prefix}
)
await self.__save_groups(id_, groups, oldgroups)
# First process the local (MASTER) config
await self._service_change('iscsitarget', 'reload', options={'ha_propagate': False})
# Then process the BACKUP config if we are HA and ALUA is enabled.
if await self.middleware.call("iscsi.global.alua_enabled") and await self.middleware.call('failover.remote_connected'):
await self.middleware.call('failover.call_remote', 'service.reload', ['iscsitarget'])
return await self.get_instance(id_)
@accepts(Int('id'),
Bool('force', default=False),
audit='Delete iSCSI target',
audit_callback=True,
)
async def do_delete(self, audit_callback, id_, force):
"""
Delete iSCSI Target of `id`.
Deleting an iSCSI Target makes sure we delete all Associated Targets which use `id` iSCSI Target.
"""
target = await self.get_instance(id_)
audit_callback(target['name'])
if await self.active_sessions_for_targets([target['id']]):
if force:
self.middleware.logger.warning('Target %s is in use.', target['name'])
else:
raise CallError(f'Target {target["name"]} is in use.')
for target_to_extent in await self.middleware.call('iscsi.targetextent.query', [['target', '=', id_]]):
await self.middleware.call('iscsi.targetextent.delete', target_to_extent['id'], force)
await self.middleware.call(
'datastore.delete', 'services.iscsitargetgroups', [['iscsi_target', '=', id_]]
)
rv = await self.middleware.call('datastore.delete', self._config.datastore, id_)
# If HA and ALUA handle BACKUP first
if await self.middleware.call("iscsi.global.alua_enabled") and await self.middleware.call('failover.remote_connected'):
await self.middleware.call('failover.call_remote', 'iscsi.target.remove_target', [target["name"]])
await self.middleware.call('failover.call_remote', 'service.reload', ['iscsitarget'])
await self.middleware.call('failover.call_remote', 'iscsi.target.logout_ha_target', [target["name"]])
await self.middleware.call('iscsi.alua.wait_for_alua_settled')
await self.middleware.call('iscsi.target.remove_target', target["name"])
await self._service_change('iscsitarget', 'reload', options={'ha_propagate': False})
# Attempt to cleanup initiators as the wizard may have created a single-use one
try:
initiators = [group['initiator'] for group in target['groups'] if group['initiator'] is not None]
for initiator in initiators:
# Ensure not used elsewhere
targets = await self.middleware.call('iscsi.target.query', [['groups.*.initiator', '=', initiator]])
if not targets:
await self.middleware.call('iscsi.initiator.delete', initiator)
except Exception:
self.logger.error('Failed to clean up target initiators for %r', target['name'], exc_info=True)
return rv
@private
@accepts(Str('name'))
async def remove_target(self, name):
# We explicitly need to do this unfortunately as scst does not accept these changes with a reload
# So this is the best way to do this without going through a restart of the service
if await self.middleware.call('service.started', 'iscsitarget'):
g_config = await self.middleware.call('iscsi.global.config')
cp = await run([
'scstadmin', '-force', '-noprompt', '-rem_target',
f'{g_config["basename"]}:{name}', '-driver', 'iscsi'
], check=False)
if cp.returncode:
self.middleware.logger.error('Failed to remove %r target: %s', name, cp.stderr.decode())
@private
async def get_rel_tgt_id(self):
existing = {target['rel_tgt_id'] for target in await self.middleware.call(f'{self._config.namespace}.query', [], {'select': ['rel_tgt_id']})}
for i in range(1, 32000):
if i not in existing:
return i
raise ValueError("Unable to deletmine rel_tgt_id")
@private
async def active_sessions_for_targets(self, target_id_list):
targets = await self.middleware.call(
'iscsi.target.query', [['id', 'in', target_id_list]],
{'force_sql_filters': True},
)
check_targets = []
global_basename = (await self.middleware.call('iscsi.global.config'))['basename']
for target in targets:
name = target['name']
if not name.startswith(('iqn.', 'naa.', 'eui.')):
name = f'{global_basename}:{name}'
check_targets.append(name)
return [
s['target'] for s in await self.middleware.call(
'iscsi.global.sessions', [['target', 'in', check_targets]]
)
]
@private
async def compress(self, data):
data['mode'] = data['mode'].lower()
for group in data['groups']:
group['authmethod'] = AUTHMETHOD_LEGACY_MAP.inv.get(group.pop('authmethod'), 'NONE')
# If we specified the alias as the empty string, store it as NULL instead to prevent clash
# on UNIQUE in the database.
if data.get("alias", None) == "":
data['alias'] = None
return data
@private
async def discover(self, ip):
cmd = ['iscsiadm', '-m', 'discovery', '-t', 'st', '-p', ip]
err = f'DISCOVER: {ip!r}'
try:
cp = await run(cmd, stderr=subprocess.STDOUT, encoding='utf-8')
except Exception as e:
err += f' ERROR: {str(e)}'
raise UnexpectedFailure(err)
else:
if cp.returncode != 0:
err += f' ERROR: {cp.stdout}'
raise OSError(cp.returncode, os.strerror(cp.returncode), err)
@private
async def login_iqn(self, ip, iqn, no_wait=False):
cmd = ['iscsiadm', '-m', 'node', '-p', ip, '-T', iqn, '--login']
if no_wait:
cmd.append('--no_wait')
err = f'LOGIN: {ip!r} {iqn!r}'
try:
cp = await run(cmd, stderr=subprocess.STDOUT, encoding='utf-8')
except Exception as e:
err += f' ERROR: {str(e)}'
raise UnexpectedFailure(err)
else:
if cp.returncode != 0:
err += f' ERROR: {cp.stdout}'
raise OSError(cp.returncode, os.strerror(cp.returncode), err)
@private
async def logout_iqn(self, ip, iqn, no_wait=False, timeout=30):
cmd = ['iscsiadm', '-m', 'node', '-p', ip, '-T', iqn, '--logout']
if no_wait:
cmd.append('--no_wait')
err = f'LOGOUT: {ip!r} {iqn!r}'
try:
cp = await run(cmd, stderr=subprocess.STDOUT, encoding='utf-8', timeout=timeout)
except Exception as e:
err += f' ERROR: {str(e)}'
raise UnexpectedFailure(err)
else:
if cp.returncode != 0:
err += f' ERROR: {cp.stdout}'
raise OSError(cp.returncode, os.strerror(cp.returncode), err)
@private
def logged_in_iqns(self):
"""
:return: dict keyed by iqn, with list of the unsurfaced disk names as the value
"""
results = defaultdict(list)
p = pathlib.Path('/sys/devices/platform')
for targetname in p.glob('host*/session*/iscsi_session/session*/targetname'):
iqn = targetname.read_text().strip()
for disk in targetname.parent.glob('device/target*/*/scsi_disk'):
results[iqn].append(disk.parent.name)
return results
@private
def logged_in_empty_iqns(self):
"""
:return: list of logged in iqns without any associated unsurfaced disks
"""
results = []
p = pathlib.Path('/sys/devices/platform')
for targetname in p.glob('host*/session*/iscsi_session/session*/targetname'):
found = False
iqn = targetname.read_text().strip()
for _item in targetname.parent.glob('device/target*/*/scsi_disk'):
found = True
break
if not found:
results.append(iqn)
return results
@private
def set_genhd_hidden_ips(self, ips):
"""
Set the kernel parameter /sys/module/iscsi_tcp/parameters/genhd_hidden_ips to the
specified string, if not already set to it.
"""
p = pathlib.Path('/sys/module/iscsi_tcp/parameters/genhd_hidden_ips')
if not p.exists():
try:
subprocess.run(["modprobe", "iscsi_tcp"])
except subprocess.CalledProcessError as e:
self.logger.error('Failed to load iscsi_tcp kernel module. Error %r', e)
if p.read_text().rstrip() != ips:
p.write_text(ips)
@private
async def login_ha_targets(self, no_wait=False, raise_error=False):
"""
When called on a HA BACKUP node will attempt to login to all internal HA targets,
used in ALUA.
:return: dict keyed by target name, with list of the unsurfaced disk names or None as the value
"""
iqns = await self.middleware.call('iscsi.target.active_ha_iqns')
global_basename = (await self.middleware.call('iscsi.global.config'))['basename']
# Check what's already logged in
existing = await self.middleware.call('iscsi.target.logged_in_iqns')
# Generate the set of things we want to login
todo = set()
for iqn in iqns.values():
if iqn not in existing:
todo.add(iqn)
if todo:
remote_ip = await self.middleware.call('failover.remote_ip')
# Ensure we have configured our kernel so that when we login to the
# peer controller's iSCSI targets no disk surfaces.
await self.middleware.call('iscsi.target.set_genhd_hidden_ips', remote_ip)
# Now we need to do an iscsiadm discovery
await self.discover(remote_ip)
# Then login the targets (in parallel)
exceptions = await asyncio.gather(*[self.login_iqn(remote_ip, iqn, no_wait) for iqn in todo], return_exceptions=True)
failures = []
for iqn, exc in zip(todo, exceptions):
if isinstance(exc, Exception):
failures.append(str(exc))
else:
self.logger.info('Successfully logged into %r', iqn)
if failures:
err = f'Failure logging in to targets: {", ".join(failures)}'
if raise_error:
raise CallError(err)
else:
self.logger.error(err)
# Regen existing as it should have now changed
existing = await self.middleware.call('iscsi.target.logged_in_iqns')
# This below one does NOT have the desired impact, despite the output from 'iscsiadm -m node -o show'
# cmd = ['iscsiadm', '-m', 'node', '-o', 'update', '-n', 'node.session.timeo.replacement_timeout', '-v', '10']
# await run(cmd, stderr=subprocess.STDOUT, encoding='utf-8')
# So instead do this.
await self.middleware.call('iscsi.target.set_ha_targets_sys', f'{global_basename}:HA:', 'recovery_tmo', '10\n')
# Now calculate the result to hand back.
result = {}
for name, iqn in iqns.items():
result[name] = existing.get(iqn, None)
return result
@private
async def logout_ha_target(self, name, no_wait=False):
global_basename = (await self.middleware.call('iscsi.global.config'))['basename']
iqn = f'{global_basename}:HA:{name}'
existing = await self.middleware.call('iscsi.target.logged_in_iqns')
if iqn in existing:
remote_ip = await self.middleware.call('failover.remote_ip')
await self.middleware.call('iscsi.target.logout_iqn', remote_ip, iqn, no_wait)
@private
async def logout_ha_targets(self, no_wait=False, raise_error=False):
"""
When called on a HA BACKUP node will attempt to login to all internal HA targets,
used in ALUA.
"""
ha_iqn_prefix_str = await self.middleware.call('iscsi.target.ha_iqn_prefix')
# Check what's already logged in
existing = await self.middleware.call('iscsi.target.logged_in_iqns')
# Generate the set of things we want to logout (don't assume every IQN, just the HA ones)
todo = set()
for iqn in existing.keys():
if iqn.startswith(ha_iqn_prefix_str):
todo.add(iqn)
if todo:
remote_ip = await self.middleware.call('failover.remote_ip')
# Logout the targets (in parallel)
exceptions = await asyncio.gather(*[self.logout_iqn(remote_ip, iqn, no_wait) for iqn in todo], return_exceptions=True)
failures = []
for iqn, exc in zip(todo, exceptions):
if isinstance(exc, Exception):
failures.append(str(exc))
else:
self.logger.info('Successfully logged out from %r', iqn)
if failures:
err = f'Failure logging out from targets: {", ".join(failures)}'
if raise_error:
raise CallError(err)
else:
self.logger.error(err)
@private
async def logout_empty_ha_targets(self, no_wait=False, raise_error=False):
"""
When called on a HA BACKUP node will attempt to login to all HA targets,
used in ALUA which are not currently associated with a LUN.
This can occur if they target is reporting as BUSY (i.e. suspended) during login.
"""
iqns = await self.middleware.call('iscsi.target.active_ha_iqns')
# Check what's already logged in, but has no LUNs
existing = await self.middleware.call('iscsi.target.logged_in_empty_iqns')
# Generate the set of things we want to logout (don't assume every IQN, just the HA ones)
todo = set()
for iqn in iqns.values():
if iqn in existing:
todo.add(iqn)
if todo:
remote_ip = await self.middleware.call('failover.remote_ip')
# Logout the targets (in parallel)
exceptions = await asyncio.gather(*[self.logout_iqn(remote_ip, iqn, no_wait) for iqn in todo], return_exceptions=True)
failures = []
for iqn, exc in zip(todo, exceptions):
if isinstance(exc, Exception):
failures.append(str(exc))
else:
self.logger.info('Successfully logged out from %r', iqn)
if failures:
err = f'Failure logging out from targets: {", ".join(failures)}'
if raise_error:
raise CallError(err)
else:
self.logger.error(err)
@private
def clustered_extents(self):
extents = []
basepath = pathlib.Path('/sys/kernel/scst_tgt/handlers')
for p in basepath.glob('*/*/cluster_mode'):
with p.open() as f:
if f.readline().strip() == '1':
extents.append(p.parent.name)
return extents
@private
async def cluster_mode_targets(self):
"""
Returns a list of target names that are currently in cluster_mode on this controller.
"""
targets = await self.middleware.call('iscsi.target.query')
extents = {extent['id']: extent for extent in await self.middleware.call('iscsi.extent.query', [['enabled', '=', True]])}
assoc = await self.middleware.call('iscsi.targetextent.query')
# Generate a dict, keyed by target ID whose value is a set of associated extent names
target_extents = defaultdict(set)
for a_tgt in filter(
lambda a: a['extent'] in extents and not extents[a['extent']]['locked'],
assoc
):
target_extents[a_tgt['target']].add(extents[a_tgt['extent']]['name'])
# Check sysfs to see what extents are in cluster mode
cl_extents = set(await self.middleware.call('iscsi.target.clustered_extents'))
# Now iterate over all the targets and return a list of those whose extents are all
# in cluster mode. Exclude targets with no extents.
result = []
for target in targets:
if target_extents[target['id']] and target_extents[target['id']].issubset(cl_extents):
result.append(target['name'])
return result
@private
async def cluster_mode_targets_luns(self):
"""
Returns a tuple containing:
- list of target names that are currently in cluster_mode on this controller.
- dict keyed by target name, where the value is a list of luns that are currently in cluster_mode on this controller.
"""
targets = await self.middleware.call('iscsi.target.query')
extents = {extent['id']: extent for extent in await self.middleware.call('iscsi.extent.query', [['enabled', '=', True]])}
assoc = await self.middleware.call('iscsi.targetextent.query')
# Generate a dict, keyed by target ID whose value is a set of associated extent names
target_extents = defaultdict(set)
# Also Generate a dict, keyed by target ID whose value is a set of (lunID, extent name) tuples
target_luns = defaultdict(set)
for a_tgt in filter(
lambda a: a['extent'] in extents and not extents[a['extent']]['locked'],
assoc
):
target_id = a_tgt['target']
extent_name = extents[a_tgt['extent']]['name']
target_extents[target_id].add(extent_name)
target_luns[target_id].add((a_tgt['lunid'], extent_name))
# Check sysfs to see what extents are in cluster mode
cl_extents = set(await self.middleware.call('iscsi.target.clustered_extents'))
cluster_mode_targets = []
cluster_mode_luns = defaultdict(list)
for target in targets:
# Find targets whose extents are all in cluster mode. Exclude targets with no extents.
if target_extents[target['id']] and target_extents[target['id']].issubset(cl_extents):
cluster_mode_targets.append(target['name'])
for (lunid, extent_name) in target_luns.get(target['id'], {}):
if extent_name in cl_extents:
cluster_mode_luns[target['name']].append(lunid)
return (cluster_mode_targets, cluster_mode_luns)
@private
async def active_targets(self):
"""
Returns the names of all targets whose extents are neither disabled nor locked,
and which have at least one extent configured.
"""
filters = [['OR', [['enabled', '=', False], ['locked', '=', True]]]]
bad_extents = []
for extent in await self.middleware.call('iscsi.extent.query', filters):
bad_extents.append(extent['id'])
targets = {t['id']: t['name'] for t in await self.middleware.call('iscsi.target.query', [], {'select': ['id', 'name']})}
assoc = {a_tgt['extent']: a_tgt['target'] for a_tgt in await self.middleware.call('iscsi.targetextent.query')}
for bad_extent in bad_extents:
del targets[assoc[bad_extent]]
# Also discount targets that do not have any extents
targets_with_extents = assoc.values()
for target_id in list(targets.keys()):
if target_id not in targets_with_extents:
del targets[target_id]
return list(targets.values())
@private
async def active_ha_iqns(self):
"""Return a dict keyed by target name with a value of the corresponding HA IQN for all
targets that are deemed to be active_targets (i.e. no disabled of locked extents, and
at least one extent configured)."""
targets = await self.middleware.call('iscsi.target.active_targets')
global_basename = (await self.middleware.call('iscsi.global.config'))['basename']
iqns = {}
for name in targets:
iqns[name] = f'{global_basename}:HA:{name}'
return iqns
@private
def set_ha_targets_sys(self, iqn_prefix, param, text):
sys_platform = pathlib.Path('/sys/devices/platform')
for targetname in sys_platform.glob('host*/session*/iscsi_session/session*/targetname'):
if targetname.read_text().startswith(iqn_prefix):
targetname.parent.joinpath(param).write_text(text)
@private
async def ha_iqn_prefix(self):
global_basename = (await self.middleware.call('iscsi.global.config'))['basename']
return f'{global_basename}:HA:'
@private
async def ha_iqn(self, name):
"""Return the IQN of the specified internal target."""
prefix = await self.middleware.call('iscsi.target.ha_iqn_prefix')
return f'{prefix}{name}'
@private
def iqn_ha_luns(self, iqn):
"""Return a list of (integer) LUNs which are offered by the specified IQN."""
result = []
try:
with os.scandir(f'/sys/kernel/scst_tgt/targets/iscsi/{iqn}/luns') as entries:
for entry in filter(lambda x: x.name.isnumeric(), entries):
result.append(int(entry.name))
except FileNotFoundError:
pass
return result
| 34,518 | Python | .py | 702 | 37.749288 | 149 | 0.587254 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,602 | snapshot_delete.py | truenas_middleware/src/middlewared/middlewared/plugins/vmware_/snapshot_delete.py | from datetime import timedelta
from middlewared.service import periodic, private, Service
import middlewared.sqlalchemy as sa
from middlewared.utils.time_utils import utc_now
PENDING_SNAPSHOT_DELETE_INTERVAL = timedelta(hours=3)
PENDING_SNAPSHOT_DELETE_LIFETIME = timedelta(days=7)
class VMWarePendingSnapshotDelete(sa.Model):
__tablename__ = "storage_vmwarependingsnapshotdelete"
id = sa.Column(sa.Integer(), primary_key=True)
vmware = sa.Column(sa.JSON())
vm_uuid = sa.Column(sa.String(200))
snapshot_name = sa.Column(sa.String(200))
datetime = sa.Column(sa.DateTime())
class VMWareService(Service):
@private
async def defer_deleting_snapshot(self, vmware, vm_uuid, snapshot_name):
await self.middleware.call(
"datastore.insert",
"storage.vmwarependingsnapshotdelete",
{
"vmware": vmware,
"vm_uuid": vm_uuid,
"snapshot_name": snapshot_name,
},
)
@periodic(PENDING_SNAPSHOT_DELETE_INTERVAL.total_seconds(), run_on_start=False)
@private
async def delete_pending_snapshots(self):
if not await self.middleware.call("network.general.can_perform_activity", "vmware"):
return
for pending_snapshot_delete in await self.middleware.call(
"datastore.query",
"storage.vmwarependingsnapshotdelete",
):
deleted = False
try:
si = await self.middleware.call("vmware.connect", pending_snapshot_delete["vmware"])
await self.middleware.call("vmware.delete_vmware_login_failed_alert", pending_snapshot_delete["vmware"])
except Exception as e:
await self.middleware.call("vmware.alert_vmware_login_failed", pending_snapshot_delete["vmware"], e)
else:
try:
for vm in await self.middleware.call("vmware.find_vms_by_uuid", si,
pending_snapshot_delete["vm_uuid"]):
try:
await self.middleware.call(
"vmware.delete_snapshot",
vm,
pending_snapshot_delete["snapshot_name"],
)
deleted = True
except Exception:
pass
except Exception:
pass
await self.middleware.call("vmware.disconnect", si)
if deleted or utc_now() - pending_snapshot_delete["datetime"] > PENDING_SNAPSHOT_DELETE_LIFETIME:
await self.middleware.call(
"datastore.delete",
"storage.vmwarependingsnapshotdelete",
pending_snapshot_delete["id"],
)
| 2,909 | Python | .py | 62 | 32.548387 | 120 | 0.572487 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,603 | cpu_temperatures.py | truenas_middleware/src/middlewared/middlewared/plugins/reporting/cpu_temperatures.py | from middlewared.service import private, Service
class ReportingService(Service):
@private
async def cpu_temperatures(self):
netdata_metrics = await self.middleware.call('netdata.get_all_metrics')
data = {}
temp_retrieved = False
for core, cpu_temp in netdata_metrics.get('cputemp.temperatures', {'dimensions': {}})['dimensions'].items():
data[core] = cpu_temp['value']
if not temp_retrieved:
temp_retrieved = bool(cpu_temp['value'])
return data if temp_retrieved else {}
| 563 | Python | .py | 12 | 38.333333 | 116 | 0.649635 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,604 | netdata_configure.py | truenas_middleware/src/middlewared/middlewared/plugins/reporting/netdata_configure.py | import os
import subprocess
from middlewared.service import private, Service
from middlewared.utils.filesystem.copy import copytree, CopyTreeConfig
from .utils import get_netdata_state_path
class ReportingService(Service):
@private
def netdata_storage_location(self):
systemdataset_config = self.middleware.call_sync('systemdataset.config')
if not systemdataset_config['path']:
return None
return f'{systemdataset_config["path"]}/netdata'
@private
def netdata_state_location(self):
# We don't check if system dataset is properly configured here because netdata conf won't be generated
# if storage location is not properly configured which we check in the netdata etc file.
return get_netdata_state_path()
@private
def post_dataset_mount_action(self):
netdata_state_path = get_netdata_state_path()
# We want to make sure this path exists always regardless of an error so that
# at least netdata can start itself gracefully
try:
os.makedirs(netdata_state_path, exist_ok=False)
except FileExistsError:
return
try:
copytree('/var/lib/netdata', netdata_state_path, config=CopyTreeConfig())
except Exception:
self.logger.error('Failed to copy netdata state over from /var/lib/netdata', exc_info=True)
os.chown(netdata_state_path, uid=999, gid=997)
os.chmod(netdata_state_path, mode=0o755)
@private
async def start_service(self):
if await self.middleware.call('failover.licensed'):
return
await self.middleware.call('service.start', 'netdata')
| 1,697 | Python | .py | 37 | 37.864865 | 110 | 0.69436 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,605 | processes.py | truenas_middleware/src/middlewared/middlewared/plugins/reporting/processes.py | import time
import psutil
from middlewared.event import EventSource
from middlewared.service import CallError
class ProcessesEventSource(EventSource):
"""
Retrieve currently running processes stats.
Usage: reporting.processes:{"interval": 10, "cpu_percent": 0.1, "memory_percent": 0.1}
"""
def run_sync(self):
options = {
"interval": 10,
"cpu_percent": 0.1,
"memory_percent": 0.1,
**(self.arg or {})
}
if options["interval"] < 5:
raise CallError("Interval should be >= 5")
processes = {}
first_iteration = True
while not self._cancel_sync.is_set():
iteration_processes = {}
for p in psutil.process_iter(["cmdline", "cpu_percent", "memory_percent", "num_threads"]):
existing_process = processes.get(p)
if existing_process is not None:
p = existing_process # Keep previously observed CPU time value
iteration_processes[p] = p
processes = iteration_processes
result = []
for process in processes.values():
if (
process.memory_percent() < options["memory_percent"] and
process.cpu_percent() < options["cpu_percent"]
):
continue
row = {
"cmdline": " ".join(process.cmdline()).strip(),
"cpu_percent": process.cpu_percent(),
"memory_percent": process.memory_percent(),
"num_threads": process.num_threads(),
"pid": process.pid,
}
result.append(row)
if not first_iteration:
self.send_event("ADDED", fields={"processes": result})
first_iteration = False
time.sleep(options["interval"])
def setup(middleware):
middleware.register_event_source("reporting.processes", ProcessesEventSource, roles=['REPORTING_READ'])
| 2,073 | Python | .py | 49 | 29.693878 | 107 | 0.550075 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,606 | rest.py | truenas_middleware/src/middlewared/middlewared/plugins/reporting/rest.py | import glob
import logging
from middlewared.service import accepts, Service
from middlewared.schema import Str, Dict, Int
from middlewared.utils.cpu import cpu_info
from middlewared.utils.disk_stats import get_disk_stats
from middlewared.utils.zfs import query_imported_fast_impl
from .netdata import ClientConnectError, Netdata
from .utils import calculate_disk_space_for_netdata, get_metrics_approximation, TIER_0_POINT_SIZE, TIER_1_POINT_SIZE
logger = logging.getLogger('netdata_api')
class NetdataService(Service):
class Config:
private = True
async def get_charts(self):
return await Netdata.get_charts()
async def active_total_metrics(self):
number = 0
for chart_details in (await Netdata.get_charts()).values():
number += len(chart_details['dimensions'])
return number
@accepts(Str('chart', required=True))
async def get_chart_details(self, chart):
return await Netdata.get_chart_details(chart)
@accepts(
Str('chart', required=True),
Dict(
Int('before', required=False, default=0),
Int('after', required=False, default=-1),
),
)
async def get_chart_metrics(self, chart, data):
return await Netdata.get_chart_metrics(chart, data)
async def get_all_metrics(self):
try:
return await Netdata.get_all_metrics()
except ClientConnectError:
logger.debug('Failed to connect to netdata when retrieving all metrics')
return {}
def calculated_metrics_count(self):
return get_metrics_approximation(
len(self.middleware.call_sync('device.get_disks', False, True)),
cpu_info()['core_count'],
self.middleware.call_sync('interface.query', [], {'count': True}),
len(query_imported_fast_impl()),
self.middleware.call_sync('datastore.query', 'vm.vm', [], {'count': True}),
len(glob.glob('/sys/fs/cgroup/**/*.service')),
)
def get_disk_space_for_tier0(self):
config = self.middleware.call_sync('reporting.config')
return calculate_disk_space_for_netdata(
self.calculated_metrics_count(), config['tier0_days'], TIER_0_POINT_SIZE, 1,
)
def get_disk_space_for_tier1(self):
config = self.middleware.call_sync('reporting.config')
return calculate_disk_space_for_netdata(
self.calculated_metrics_count(), config['tier1_days'], TIER_1_POINT_SIZE, config['tier1_update_interval'],
)
def get_disk_stats(self, disk_identifier_mapping=None):
return get_disk_stats(disk_identifier_mapping)
| 2,668 | Python | .py | 59 | 37.372881 | 118 | 0.667952 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,607 | export.py | truenas_middleware/src/middlewared/middlewared/plugins/reporting/export.py | import middlewared.sqlalchemy as sa
from middlewared.schema import accepts, Bool, Dict, Int, List, Patch, Str, returns
from middlewared.service import CRUDService, private, ValidationErrors
from .exporters.factory import export_factory
class ReportingExportsModel(sa.Model):
__tablename__ = 'reporting_exporters'
id = sa.Column(sa.Integer(), primary_key=True)
enabled = sa.Column(sa.Boolean())
type = sa.Column(sa.String())
name = sa.Column(sa.String())
attributes = sa.Column(sa.JSON())
class ReportingExportsService(CRUDService):
class Config:
namespace = 'reporting.exporters'
datastore = 'reporting.exporters'
cli_namespace = 'reporting.exporters'
role_prefix = 'REPORTING'
ENTRY = Dict(
'reporting_exporter_entry',
Int('id', required=True),
Bool('enabled', required=True),
Str(
'type', enum=[authenticator for authenticator in export_factory.get_exporters()],
required=True,
),
Dict(
'attributes',
additional_attrs=True,
description='Specific attributes of each `exporter`'
),
Str('name', description='User defined name of exporter configuration', required=True),
)
def __init__(self, *args, **kwargs):
super(ReportingExportsService, self).__init__(*args, **kwargs)
self.exporters = self.get_exporter_schemas()
@private
async def common_validation(self, data, schema_name, old=None):
verrors = ValidationErrors()
filters = [['name', '!=', old['name']]] if old else []
filters.append(['name', '=', data['name']])
if await self.query(filters):
verrors.add(f'{schema_name}.name', 'Specified name is already in use')
if data['type'] not in self.exporters:
verrors.add(
f'{schema_name}.type',
f'System does not support {data["type"]} as a reporting exporter type.'
)
else:
exporter_obj = self.get_exporter_object(data)
try:
data['attributes'] = await exporter_obj.validate_config(data['attributes'])
except ValidationErrors as ve:
verrors.extend(ve)
verrors.check()
async def do_create(self, data):
"""
Create a specific reporting exporter configuration containing required details for exporting reporting metrics.
"""
await self.common_validation(data, 'reporting_exporter_create')
oid = await self.middleware.call(
'datastore.insert',
self._config.datastore,
data,
)
if data['enabled']:
# Only restart if this is enabled
await self.middleware.call('service.restart', 'netdata')
return await self.get_instance(oid)
@accepts(
Int('id'),
Patch(
'reporting_exporter_entry',
'reporting_exporter_update',
('rm', {'name': 'id'}),
('rm', {'name': 'type'}),
('attr', {'update': True}),
),
)
async def do_update(self, oid, data):
"""
Update Reporting Exporter of `id`.
"""
old = await self.get_instance(oid)
new = old.copy()
attrs = data.pop('attributes', {})
new.update(data)
new['attributes'].update(attrs) # this is to be done separately so as to not overwrite the dict
await self.common_validation(new, 'reporting_exporter_update', old)
await self.middleware.call(
'datastore.update',
self._config.datastore,
oid,
new
)
await self.middleware.call('service.restart', 'netdata')
return await self.get_instance(oid)
async def do_delete(self, oid):
"""
Delete Reporting Exporter of `id`.
"""
await self.middleware.call(
'datastore.delete',
self._config.datastore,
oid,
)
await self.middleware.call('service.restart', 'netdata')
return True
@accepts(roles=['REPORTING_READ'])
@returns(List(
title='Reporting Exporter Schemas',
items=[Dict(
'schema_entry',
Str('key', required=True),
List(
'schema',
items=[Dict(
'attribute_schema',
additional_attrs=True,
title='Attribute Schema',
)],
),
title='Reporting Exporter Schema'
)],
))
def exporter_schemas(self):
"""
Get the schemas for all the reporting export types we support with their respective attributes
required for successfully exporting reporting metrics to them.
"""
return [
{'schema': [v.to_json_schema() for v in value.attrs.values()], 'key': key}
for key, value in self.exporters.items()
]
@private
def get_exporter_object(self, data):
return export_factory.exporter(data['type'])()
@private
def get_exporter_schemas(self):
return {k: klass.SCHEMA for k, klass in export_factory.get_exporters().items()}
| 5,278 | Python | .py | 139 | 28.179856 | 119 | 0.585371 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,608 | netdata_web.py | truenas_middleware/src/middlewared/middlewared/plugins/reporting/netdata_web.py | import os
import shutil
import threading
import time
from middlewared.schema import accepts, returns, Str
from middlewared.service import cli_private, job, pass_app, periodic, private, CallError, Service
from middlewared.utils import MIDDLEWARE_RUN_DIR
from middlewared.utils.crypto import generate_string
from passlib.apache import HtpasswdFile
BASIC_FILE = f'{MIDDLEWARE_RUN_DIR}/netdata-basic'
HTPASSWD_LOCK = threading.Lock()
class ReportingService(Service):
@private
async def netdataweb_basic_file(self):
return BASIC_FILE
@cli_private
@accepts(roles=['READONLY_ADMIN'])
@returns(Str('password'))
@pass_app()
def netdataweb_generate_password(self, app):
"""
Generate a password to access netdata web.
That password will be stored in htpasswd format for HTTP Basic access.
Concurrent access for the same user is not supported and may lead to undesired behavior.
"""
# Password schema is not used here because for READONLY_ADMIN
# will make it return "******" instead, breaking this method for that role.
if app and app.authenticated_credentials.is_user_session:
authenticated_user = app.authenticated_credentials.user['username']
else:
raise CallError('This method needs to be called from an authenticated user only.')
if not os.path.exists(BASIC_FILE):
with open(os.open(BASIC_FILE, flags=os.O_CREAT, mode=0o640)):
shutil.chown(BASIC_FILE, 'root', 'www-data')
with HTPASSWD_LOCK:
ht = HtpasswdFile(BASIC_FILE, autosave=True, default_scheme='bcrypt')
if ht.get_hash(authenticated_user):
self.logger.warning('Password for %r already exists, overwriting...', authenticated_user)
password = generate_string(16, punctuation_chars=True)
ht.set_password(authenticated_user, password)
try:
expire = self.middleware.call_sync('cache.get', 'NETDATA_WEB_EXPIRE')
except KeyError:
expire = {}
# Password will be valid for 8 hours
expire[authenticated_user] = int(time.monotonic() + 60 * 60 * 8)
self.middleware.call_sync('cache.put', 'NETDATA_WEB_EXPIRE', expire)
return password
@periodic(600)
@private
@job(lock='netdataweb_expire', transient=True, lock_queue_size=1)
def netdataweb_expire(self, job):
"""
Generated passwords are placed in the HTTP Basic file and should be valid for 8 hours.
We allow ourselves a 10 minutes wiggle room for simplicity sake, e.g. token can be valid
for up to 8 hours and 10 minutes.
"""
if not os.path.exists(BASIC_FILE):
return
try:
expire = self.middleware.call_sync('cache.get', 'NETDATA_WEB_EXPIRE')
except KeyError:
expire = {}
with HTPASSWD_LOCK:
ht = HtpasswdFile(BASIC_FILE)
time_now = int(time.monotonic())
for user in ht.users():
if expire_time := expire.get(user):
if time_now < expire_time:
continue
# User is not in our cache or expired, should be deleted
ht.delete(user)
ht.save()
| 3,317 | Python | .py | 73 | 36.315068 | 105 | 0.64921 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,609 | graphs.py | truenas_middleware/src/middlewared/middlewared/plugins/reporting/graphs.py | import collections
import errno
import time
import typing
from middlewared.schema import accepts, Dict, List, Ref, returns, Str
from middlewared.service import (
CallError, cli_private, filterable, filterable_returns, private, Service, ValidationErrors
)
from middlewared.utils import filter_list
from .netdata import GRAPH_PLUGINS
from .netdata.graph_base import GraphBase
from .utils import convert_unit, fetch_data_from_graph_plugins
class ReportingService(Service):
class Config:
cli_namespace = 'system.reporting'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__graphs: typing.Dict[str, GraphBase] = {}
for name, klass in GRAPH_PLUGINS.items():
self.__graphs[name] = klass(self.middleware)
@private
async def graph_names(self):
return list(self.__graphs.keys())
@cli_private
@accepts(
Str('name', required=True),
Ref('reporting_query'),
roles=['REPORTING_READ']
)
@returns(Ref('netdata_graph_reporting_data'))
async def netdata_graph(self, name, query):
"""
Get reporting data for `name` graph.
"""
graph_plugin = self.__graphs.get(name)
if graph_plugin is None:
raise CallError(f'{name!r} is not a valid graph plugin.', errno.ENOENT)
query_params = await self.middleware.call('reporting.translate_query_params', query)
await graph_plugin.build_context()
identifiers = await graph_plugin.get_identifiers() if graph_plugin.uses_identifiers else [None]
return await graph_plugin.export_multiple_identifiers(query_params, identifiers, query['aggregate'])
@cli_private
@filterable(roles=['REPORTING_READ'])
@filterable_returns(Dict(
'reporting_graph',
Str('name'),
Str('title'),
Str('vertical_label'),
List('identifiers', items=[Str('identifier')], null=True),
register=True
))
async def netdata_graphs(self, filters, options):
"""
Get reporting netdata graphs.
"""
return filter_list([await i.as_dict() for i in self.__graphs.values()], filters, options)
@cli_private
@accepts(
List('graphs', items=[
Dict(
'graph',
Str('name', required=True, enum=[i for i in GRAPH_PLUGINS]),
Str('identifier', default=None, null=True),
),
], empty=False),
Ref('reporting_query'),
roles=['REPORTING_READ']
)
@returns(List('reporting_data', items=[Dict(
'netdata_graph_reporting_data',
Str('name', required=True),
Str('identifier', required=True, null=True),
List('data'),
Dict(
'aggregations',
List('min'),
List('max'),
List('mean'),
),
additional_attrs=True,
register=True,
)]))
async def netdata_get_data(self, graphs, query):
"""
Get reporting data for given graphs.
List of possible graphs can be retrieved using `reporting.netdata_graphs` call.
For the time period of the graph either `unit` and `page` OR `start` and `end` should be
used, not both.
`aggregate` will return aggregate available data for each graph (e.g. min, max, mean).
.. examples(websocket)::
Get graph data of "nfsstat" from the last hour.
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "reporting.netdata_get_data",
"params": [
[{"name": "cpu"}],
{"unit": "HOURLY"},
]
}
"""
query_params = await self.middleware.call('reporting.translate_query_params', query)
graph_plugins = collections.defaultdict(list)
for graph in graphs:
graph_plugins[self.__graphs[graph['name']]].append(graph['identifier'])
results = []
async for result in fetch_data_from_graph_plugins(graph_plugins, query_params, query['aggregate']):
results.extend(result)
return results
@private
@accepts(Ref('reporting_query'))
async def netdata_get_all(self, query):
query_params = await self.middleware.call('reporting.translate_query_params', query)
rv = []
for graph_plugin in self.__graphs.values():
await graph_plugin.build_context()
identifiers = await graph_plugin.get_identifiers() if graph_plugin.uses_identifiers else [None]
rv.extend(await graph_plugin.export_multiple_identifiers(query_params, identifiers, query['aggregate']))
return rv
@private
def translate_query_params(self, query):
unit = query.get('unit')
if unit:
verrors = ValidationErrors()
for i in ('start', 'end'):
if i in query:
verrors.add(
f'reporting_query.{i}',
f'{i!r} should only be used if "unit" attribute is not provided.',
)
verrors.check()
else:
if 'start' not in query:
unit = 'HOUR'
else:
start_time = int(query['start'])
end_time = int(query.get('end') or time.time())
return {
'before': end_time,
'after': end_time - convert_unit(unit, query['page']) if unit else start_time,
}
| 5,590 | Python | .py | 142 | 29.760563 | 116 | 0.590892 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,610 | utils.py | truenas_middleware/src/middlewared/middlewared/plugins/reporting/utils.py | import collections
import contextlib
import os.path
import typing
from middlewared.plugins.system_dataset.utils import SYSDATASET_PATH
from .netdata.graph_base import GraphBase
# https://learn.netdata.cloud/docs/netdata-agent/configuration/optimizing-metrics-database/
# change-how-long-netdata-stores-metrics
TIER_0_POINT_SIZE = 1
TIER_1_POINT_SIZE = 4
def calculate_disk_space_for_netdata(
metric_intervals: dict, days: int, bytes_per_point: int, tier_interval: int
) -> int:
# Constants
sec_per_day = 86400
total_metrics = 0
for collection_interval_seconds, metrics in metric_intervals.items():
total_metrics += metrics / collection_interval_seconds
required_disk_space_bytes = days * (sec_per_day / tier_interval) * bytes_per_point * total_metrics
# Convert bytes to megabytes (1 MB = 1024 * 1024 bytes)
required_disk_space_mb = required_disk_space_bytes / (1024 * 1024)
return int(required_disk_space_mb)
def convert_unit(unit: str, page: int) -> int:
return {
'HOUR': 60 * 60,
'DAY': 60 * 60 * 24,
'WEEK': 60 * 60 * 24 * 7,
'MONTH': 60 * 60 * 24 * 30,
'YEAR': 60 * 60 * 24 * 365,
}[unit] * page
async def fetch_data_from_graph_plugins(
graph_plugins: typing.Dict[GraphBase, list], query_params: dict, aggregate: bool,
) -> collections.abc.AsyncIterable:
for graph_plugin, identifiers in graph_plugins.items():
await graph_plugin.build_context()
with contextlib.suppress(Exception):
yield await graph_plugin.export_multiple_identifiers(query_params, identifiers, aggregate=aggregate)
def get_netdata_state_path() -> str:
return os.path.join(SYSDATASET_PATH, 'netdata/ix_state')
def get_metrics_approximation(
disk_count: int, core_count: int, interface_count: int, pool_count: int, vms_count: int,
systemd_service_count: int, containers_count: typing.Optional[int] = 10,
) -> dict:
data = {
1: {
'system.cpu': 10,
'cpu.cpu': 10 * core_count,
'cpu.cpu0_cpuidle': 4 * core_count,
'cpu.cpufreq': core_count,
'system.intr': 1,
'system.ctxt': 1,
'system.forks': 1,
'system.processes': 2,
'zfs_state_pool': pool_count * 6,
'system.clock_sync_state': 1,
'system.clock_status': 2,
'system.clock_sync_offset': 1,
# diskstats
'system.io': 2,
'truenas_disk_stats.ops': 2 * disk_count,
'truenas_disk_stats.io': 2 * disk_count,
'truenas_disk_stats.busy': 1 * disk_count,
# meminfo
'system.ram': 4,
'mem.available': 1,
'mem.committed': 1,
'mem.writeback': 5,
'mem.kernel': 5,
'mem.slab': 2,
'mem.transparent_hugepages': 2,
# net
'system.net': 2,
'net': 2 * interface_count,
'net_speed': interface_count,
'net_duplex': 3 * interface_count,
'net_operstate': 7 * interface_count,
'net_mtu': interface_count,
'net_packets': 3 * interface_count,
'net_drops': 2 * interface_count,
'net_carrier': 2 * interface_count,
# uptime
'system.uptime': 1,
# loadavg
'system.load': 3,
'system.active_processes': 1,
# zfs arcstats
'zfs.arc_size': 4,
'zfs.reads': 5,
'zfs.hits': 2,
'zfs.hits_rate': 2,
'zfs.dhits': 2,
'zfs.dhits_rate': 2,
'zfs.phits': 2,
'zfs.phits_rate': 2,
'zfs.mhits': 2,
'zfs.mhits_rate': 2,
'zfs.list_hits': 4,
'zfs.arc_size_breakdown': 2,
'zfs.important_ops': 4,
'zfs.actual_hits': 2,
'zfs.actual_hits_rate': 2,
'zfs.demand_data_hits': 2,
'zfs.demand_data_hits_rate': 2,
'zfs.prefetch_data_hits': 2,
'zfs.prefetch_data_hits_rate': 2,
'zfs.hash_elements': 2,
'zfs.hash_chains': 2,
# cputemp
'cputemp.temperatures': core_count,
# ups
'nut_ups.charge': 1,
'nut_ups.runtime': 1,
'nut_ups.battery_voltage': 4,
'nut_ups.input_voltage': 3,
'nut_ups.input_current': 1,
'nut_ups.input_frequency': 2,
'nut_ups.output_voltage': 1,
'nut_ups.load': 1,
'nut_ups.temp': 1,
'netdata.plugin_chartsd_nut': 1,
# cgroups
'services.io_ops_write': systemd_service_count,
'services.io_ops_read': systemd_service_count,
'services.io_write': systemd_service_count,
'services.io_read': systemd_service_count,
'services.mem_usage': systemd_service_count,
'services.cpu': systemd_service_count,
'cgroup_qemu_vm.cpu_limit': vms_count,
'cgroup_qemu_vm.cpu': 2 * vms_count,
'cgroup_qemu_vm.throttled': vms_count,
'cgroup_qemu_vm.throttled_duration': vms_count,
'cgroup_qemu_vm.mem': 6 * vms_count,
'cgroup_qemu_vm.writeback': 2 * vms_count,
'cgroup_qemu_vm.pgfaults': 2 * vms_count,
'cgroup_qemu_vm.mem_usage': 2 * vms_count,
'cgroup_qemu_vm.mem_usage_limit': 2 * vms_count,
'cgroup_qemu_vm.mem_utilization': vms_count,
'cgroup_qemu_vm.io': 2 * vms_count,
'cgroup_qemu_vm.serviced_ops': 2 * vms_count,
'cgroup_qemu_vm.cpu_some_pressure': 3 * vms_count,
'cgroup_qemu_vm.cpu_some_pressure_stall_time': vms_count,
'cgroup_qemu_vm.cpu_full_pressure': 3 * vms_count,
'cgroup_qemu_vm.cpu_full_pressure_stall_time': vms_count,
'cgroup_qemu_vm.mem_some_pressure': 3 * vms_count,
'cgroup_qemu_vm.memory_some_pressure_stall_time': vms_count,
'cgroup_qemu_vm.mem_full_pressure': 3 * vms_count,
'cgroup_qemu_vm.memory_full_pressure_stall_time': vms_count,
'cgroup_qemu_vm.io_some_pressure': 3 * vms_count,
'cgroup_qemu_vm.io_some_pressure_stall_time': vms_count,
'cgroup_qemu_vm.io_full_pressure': 3 * vms_count,
'cgroup_qemu_vm.io_full_pressure_stall_time': vms_count,
'cgroup_hash.cpu_limit': containers_count,
'cgroup_hash.cpu': 2 * containers_count,
'cgroup_hash.throttled': containers_count,
'cgroup_hash.throttled_duration': containers_count,
'cgroup_hash.mem': 6 * containers_count,
'cgroup_hash.writeback': 2 * containers_count,
'cgroup_hash.pgfaults': 2 * containers_count,
'cgroup_hash.mem_usage': 2 * containers_count,
'cgroup_hash.mem_usage_limit': 2 * containers_count,
'cgroup_hash.mem_utilization': containers_count,
'cgroup_hash.cpu_some_pressure': 3 * containers_count,
'cgroup_hash.cpu_some_pressure_stall_time': containers_count,
'cgroup_hash.cpu_full_pressure': 3 * containers_count,
'cgroup_hash.cpu_full_pressure_stall_time': containers_count,
'cgroup_hash.mem_some_pressure': 3 * containers_count,
'cgroup_hash.memory_some_pressure_stall_time': containers_count,
'cgroup_hash.mem_full_pressure': 3 * containers_count,
'cgroup_hash.memory_full_pressure_stall_time': containers_count,
'cgroup_hash.io_some_pressure': 3 * containers_count,
'cgroup_hash.io_some_pressure_stall_time': containers_count,
'cgroup_hash.io_full_pressure': 3 * containers_count,
'cgroup_hash.io_full_pressure_stall_time': containers_count,
},
60: { # smartd_logs
'smart_log.temperature_celsius': disk_count}
}
return {
sec: sum(d.values()) for sec, d in data.items()
}
| 8,080 | Python | .py | 180 | 34.1 | 112 | 0.577597 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,611 | events.py | truenas_middleware/src/middlewared/middlewared/plugins/reporting/events.py | import psutil
import time
from middlewared.event import EventSource
from middlewared.schema import Dict, Float, Int
from middlewared.utils.disks import get_disk_names, get_disks_with_identifiers
from middlewared.validators import Range
from .realtime_reporting import get_arc_stats, get_cpu_stats, get_disk_stats, get_interface_stats, get_memory_info
class RealtimeEventSource(EventSource):
"""
Retrieve real time statistics for CPU, network,
virtual memory and zfs arc.
"""
ACCEPTS = Dict(
Int('interval', default=2, validators=[Range(min_=2)]),
)
RETURNS = Dict(
Dict('cpu', additional_attrs=True),
Dict(
'disks',
Float('busy'),
Float('read_bytes'),
Float('write_bytes'),
Float('read_ops'),
Float('write_ops'),
),
Dict('interfaces', additional_attrs=True),
Dict(
'memory',
Dict(
'classes',
Int('apps'),
Int('arc'),
Int('buffers'),
Int('cache'),
Int('page_tables'),
Int('slab_cache'),
Int('unused'),
),
Dict('extra', additional_attrs=True),
),
Dict('virtual_memory', additional_attrs=True),
Dict(
'zfs',
Int('arc_max_size'),
Int('arc_size'),
Float('cache_hit_ratio'),
),
)
def run_sync(self):
interval = self.arg['interval']
cores = self.middleware.call_sync('system.info')['cores']
disk_mapping = get_disks_with_identifiers()
while not self._cancel_sync.is_set():
# this gathers the most recent metric recorded via netdata (for all charts)
retries = 2
while retries > 0:
try:
netdata_metrics = self.middleware.call_sync('netdata.get_all_metrics')
except Exception:
retries -= 1
if retries <= 0:
raise
time.sleep(0.5)
else:
break
if failed_to_connect := not bool(netdata_metrics):
data = {'failed_to_connect': failed_to_connect}
else:
disks = get_disk_names()
if len(disks) != len(disk_mapping):
disk_mapping = get_disks_with_identifiers()
data = {
'zfs': get_arc_stats(netdata_metrics), # ZFS ARC Size
'memory': get_memory_info(netdata_metrics),
'virtual_memory': psutil.virtual_memory()._asdict(),
'cpu': get_cpu_stats(netdata_metrics, cores),
'disks': get_disk_stats(netdata_metrics, disks, disk_mapping),
'interfaces': get_interface_stats(
netdata_metrics, [
iface['name'] for iface in self.middleware.call_sync(
'interface.query', [], {'extra': {'retrieve_names_only': True}}
)
]
),
'failed_to_connect': False,
}
# CPU temperature
data['cpu']['temperature_celsius'] = self.middleware.call_sync('reporting.cpu_temperatures') or None
self.send_event('ADDED', fields=data)
time.sleep(interval)
def setup(middleware):
middleware.register_event_source('reporting.realtime', RealtimeEventSource, roles=['REPORTING_READ'])
| 3,669 | Python | .py | 92 | 26.423913 | 116 | 0.515577 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,612 | attachments.py | truenas_middleware/src/middlewared/middlewared/plugins/reporting/attachments.py | from middlewared.common.ports import ServicePortDelegate
from .netdata.utils import NETDATA_PORT
class ReportingServicePortDelegate(ServicePortDelegate):
name = 'reporting'
namespace = 'reporting'
title = 'Reporting Service'
async def get_ports_bound_on_wildcards(self):
return [NETDATA_PORT]
async def setup(middleware):
await middleware.call('port.register_attachment_delegate', ReportingServicePortDelegate(middleware))
| 458 | Python | .py | 10 | 41.3 | 104 | 0.795918 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,613 | update.py | truenas_middleware/src/middlewared/middlewared/plugins/reporting/update.py | import middlewared.sqlalchemy as sa
from middlewared.schema import accepts, Bool, Dict, Int, List, Ref, returns, Str, Timestamp
from middlewared.service import cli_private, filterable, filterable_returns, ConfigService, private
from middlewared.validators import Range
from .netdata import GRAPH_PLUGINS
class ReportingModel(sa.Model):
__tablename__ = 'reporting'
id = sa.Column(sa.Integer(), primary_key=True)
tier0_days = sa.Column(sa.Integer(), default=7)
tier1_days = sa.Column(sa.Integer(), default=30)
tier1_update_interval = sa.Column(sa.Integer(), default=300) # This is in seconds
class ReportingService(ConfigService):
class Config:
cli_namespace = 'system.reporting'
datastore = 'reporting'
role_prefix = 'REPORTING'
ENTRY = Dict(
'reporting_entry',
Int('tier1_days', validators=[Range(min_=1)], required=True),
)
async def do_update(self, data):
"""
`tier1_days` can be set to specify for how many days we want to store reporting history which in netdata
terms specifies the number of days netdata should be storing data in tier1 storage.
"""
old_config = await self.config()
config = old_config.copy()
config.update(data)
await self.middleware.call('datastore.update', self._config.datastore, old_config['id'], config)
await self.middleware.call('service.restart', 'netdata')
return await self.config()
@cli_private
@filterable(roles=['REPORTING_READ'])
@filterable_returns(Ref('reporting_graph'))
async def graphs(self, filters, options):
return await self.middleware.call('reporting.netdata_graphs', filters, options)
@cli_private
@accepts(
List('graphs', items=[
Dict(
'graph',
Str('name', required=True, enum=[i for i in GRAPH_PLUGINS]),
Str('identifier', default=None, null=True),
),
], empty=False),
Dict(
'reporting_query',
Str('unit', enum=['HOUR', 'DAY', 'WEEK', 'MONTH', 'YEAR']),
Int('page', default=1, validators=[Range(min_=1)]),
Timestamp('start'),
Timestamp('end'),
Bool('aggregate', default=True),
register=True,
),
roles=['REPORTING_READ']
)
@returns(Ref('netdata_graph_reporting_data'))
async def get_data(self, graphs, query):
"""
Get reporting data for given graphs.
List of possible graphs can be retrieved using `reporting.graphs` call.
For the time period of the graph either `unit` and `page` OR `start` and `end` should be
used, not both.
`aggregate` will return aggregate available data for each graph (e.g. min, max, mean).
.. examples(websocket)::
Get graph data of "nfsstat" from the last hour.
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "reporting.get_data",
"params": [
[{"name": "nfsstat"}],
{"unit": "HOURLY"},
]
}
"""
return await self.middleware.call('reporting.netdata_get_data', graphs, query)
@private
@accepts(Ref('reporting_query'))
async def get_all(self, query):
return await self.middleware.call('reporting.netdata_get_all', query)
@cli_private
@accepts(
Str('name', required=True),
Ref('reporting_query'),
roles=['REPORTING_READ']
)
@returns(Ref('netdata_graph_reporting_data'))
async def graph(self, name, query):
"""
Get reporting data for `name` graph.
"""
return await self.middleware.call('reporting.netdata_graph', name, query)
| 3,891 | Python | .py | 94 | 32.382979 | 112 | 0.610758 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,614 | client.py | truenas_middleware/src/middlewared/middlewared/plugins/reporting/netdata/client.py | import typing
import aiohttp
import aiohttp.client_exceptions
import asyncio
import contextlib
import json
import logging
from .exceptions import ApiException, ClientConnectError
from .utils import NETDATA_URI, NETDATA_REQUEST_TIMEOUT
logger = logging.getLogger('netdata_api')
class ClientMixin:
@classmethod
@contextlib.asynccontextmanager
async def request(
cls, resource: str, timeout: int = NETDATA_REQUEST_TIMEOUT, version: str = 'v1',
) -> aiohttp.ClientResponse:
assert version in ('v1', 'v2'), f'Invalid API version {version!r}'
resource = resource.removeprefix('/')
uri = f'{NETDATA_URI}/{version}/{resource}'
try:
async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=timeout)) as session:
async with session.get(uri) as resp:
if resp.status != 200:
raise ApiException(f'Received {resp.status!r} response code from {uri!r}')
yield resp
except (asyncio.TimeoutError, aiohttp.ClientResponseError) as e:
raise ApiException(f'Failed {resource!r} call: {e!r}')
except (aiohttp.client_exceptions.ClientConnectorError, aiohttp.client_exceptions.ClientOSError) as e:
raise ClientConnectError(f'Failed to connect to {uri!r}: {e!r}')
@classmethod
async def api_call(cls, resource: str, timeout: int = NETDATA_REQUEST_TIMEOUT, version: str = 'v1') -> dict:
try:
async with cls.request(resource, timeout, version) as resp:
output = ''
async for line in resp.content.iter_any():
output += line.decode(errors='ignore')
return json.loads(output)
except aiohttp.client_exceptions.ContentTypeError as e:
raise ApiException(f'Malformed response received from {resource!r} endpoint: {e}')
@classmethod
async def fetch(cls, uri: str, session: aiohttp.ClientSession, identifier: typing.Optional[str]) -> dict:
output = ''
response = {'error': None, 'data': None, 'uri': uri, 'identifier': identifier}
async with session.get(uri) as call_resp:
if call_resp.status != 200:
response['error'] = f'Received {call_resp.status!r} response code from {uri!r}'
else:
try:
async for line in call_resp.content.iter_any():
output += line.decode(errors='ignore')
response['data'] = json.loads(output)
except aiohttp.client_exceptions.ContentTypeError as e:
response['error'] = f'Malformed response received from {uri!r} endpoint: {e}'
except json.JSONDecodeError:
response['error'] = f'Failed to decode response from {uri!r}'
return response
@classmethod
@contextlib.asynccontextmanager
async def multiple_requests(
cls, resources: typing.List[typing.Tuple[str, str]], timeout: int = NETDATA_REQUEST_TIMEOUT, version: str = 'v1'
) -> typing.List[dict]:
assert version in ('v1', 'v2'), f'Invalid API version {version!r}'
uri = f'{NETDATA_URI}/{version}'
tasks = []
try:
async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=timeout)) as session:
for identifier, resource in resources:
resource = resource.removeprefix('/')
tasks.append(cls.fetch(f'{uri}/{resource}', session, identifier))
yield await asyncio.gather(*tasks)
except (asyncio.TimeoutError, aiohttp.ClientResponseError) as e:
raise ApiException(f'Failed {resources!r} call: {e!r}')
except (aiohttp.client_exceptions.ClientConnectorError, aiohttp.client_exceptions.ClientOSError) as e:
raise ClientConnectError(f'Failed to connect to {uri!r}: {e!r}')
@classmethod
async def api_calls(
cls, resources: typing.List[typing.Tuple[str, str]], timeout: int = NETDATA_REQUEST_TIMEOUT, version: str = 'v1'
) -> typing.List[typing.Tuple[typing.Optional[str], dict]]:
responses = []
try:
async with cls.multiple_requests(resources, timeout, version) as tasks:
for task in tasks:
if task['error']:
responses.append((task['identifier'], {
'labels': ['time'],
'data': [],
}))
else:
responses.append((task['identifier'], task['data']))
except Exception as e:
logger.debug('Failed to connect to netdata: %s', e)
return responses
| 4,775 | Python | .py | 92 | 40.086957 | 120 | 0.61235 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,615 | connector.py | truenas_middleware/src/middlewared/middlewared/plugins/reporting/netdata/connector.py | import errno
import typing
from .client import ClientMixin
from .exceptions import ApiException
from .utils import get_query_parameters
class Netdata(ClientMixin):
@classmethod
async def get_info(cls):
"""Get information about the running netdata instance"""
return await cls.api_call('info', version='v1')
@classmethod
async def get_all_metrics(cls):
return await cls.api_call('allmetrics?format=json', version='v1')
@classmethod
async def get_charts(cls):
"""
Get available charts/metrics. Each chart/metric points out information about 1 type of data.
"""
return (await cls.api_call('charts', version='v1'))['charts']
@classmethod
async def get_chart_details(cls, metric):
"""Get details for `chart`/`metric`"""
try:
return (await cls.get_charts())[metric]
except KeyError:
raise ApiException(f'Metric {metric!r} does not exist', errno=errno.ENOENT)
@classmethod
async def get_chart_metrics(cls, chart, query_params=None):
"""Get metrics for `chart`"""
return await cls.api_call(
f'data?chart={chart}&options=null2zero{get_query_parameters(query_params)}',
version='v1',
)
@classmethod
async def get_charts_metrics(
cls, charts: dict, parameters: dict
) -> typing.List[typing.Tuple[typing.Optional[str], dict]]:
"""Get metrics for multiple charts"""
query_params = get_query_parameters(parameters)
return await cls.api_calls([
(identifier, f'data?chart={chart_name}&options=null2zero{query_params}')
for identifier, chart_name in charts.items()
])
| 1,728 | Python | .py | 43 | 32.744186 | 100 | 0.655728 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,616 | graphs.py | truenas_middleware/src/middlewared/middlewared/plugins/reporting/netdata/graphs.py | import typing
from middlewared.utils.disk_temperatures import get_disks_for_temperature_reading
from .graph_base import GraphBase
from .utils import get_human_disk_name
class CPUPlugin(GraphBase):
title = 'CPU Usage'
uses_identifiers = False
vertical_label = '%CPU'
def get_chart_name(self, identifier: typing.Optional[str] = None) -> str:
return 'system.cpu'
def query_parameters(self) -> dict:
return super().query_parameters() | {
'dimensions': 'system|user|idle|softirq|nice|iowait',
}
class CPUTempPlugin(GraphBase):
title = 'CPU Temperature'
uses_identifiers = False
vertical_label = 'Celsius'
skip_zero_values_in_aggregation = True
def get_chart_name(self, identifier: typing.Optional[str]) -> str:
return 'cputemp.temperatures'
class DISKPlugin(GraphBase):
title = 'Disk I/O Bandwidth'
vertical_label = 'Kibibytes/s'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.disk_mapping = {}
def get_title(self):
return 'Disk I/O ({identifier})'
async def build_context(self):
all_charts = await self.all_charts()
self.disk_mapping = {
get_human_disk_name(disk): disk['identifier']
for disk in await self.middleware.call('disk.query')
if f'truenas_disk_stats.io.{disk["identifier"]}' in all_charts
}
async def get_identifiers(self) -> typing.Optional[list]:
return list(self.disk_mapping.keys())
def get_chart_name(self, identifier: typing.Optional[str] = None) -> str:
return f'truenas_disk_stats.io.{self.disk_mapping[identifier]}'
class InterfacePlugin(GraphBase):
title = 'Interface Traffic'
vertical_label = 'Kilobits/s'
def get_title(self):
return 'Interface Traffic ({identifier})'
async def get_identifiers(self) -> typing.Optional[list]:
ifaces = {f'net.{i["name"]}' for i in await self.middleware.call('interface.query')}
return [iface.split('.')[-1] for iface in (ifaces & set(await self.all_charts()))]
def get_chart_name(self, identifier: typing.Optional[str] = None) -> str:
return f'net.{identifier}'
def normalize_metrics(self, metrics) -> dict:
metrics = super().normalize_metrics(metrics)
if len(metrics['legend']) < 3:
for to_add in {'time', 'received', 'sent'} - set(metrics['legend']):
metrics['legend'].append(to_add)
sent_column = metrics['legend'].index('sent')
for index, data in enumerate(metrics['data']):
data_length = len(data)
if data_length < 3:
for i in range(3 - data_length):
data.append(0)
if data[sent_column] is not None:
metrics['data'][index][sent_column] = abs(data[sent_column])
return metrics
class LoadPlugin(GraphBase):
title = 'System Load Average'
uses_identifiers = False
vertical_label = 'Load'
LOAD_MAPPING = {
'load1': 'shortterm',
'load5': 'midterm',
'load15': 'longterm',
}
def get_chart_name(self, identifier: typing.Optional[str] = None) -> str:
return 'system.load'
def normalize_metrics(self, metrics) -> dict:
metrics = super().normalize_metrics(metrics)
metrics['legend'] = [self.LOAD_MAPPING.get(legend, legend) for legend in metrics['legend']]
return metrics
class ProcessesPlugin(GraphBase):
title = 'System Active Processes'
uses_identifiers = False
vertical_label = 'Processes'
def get_chart_name(self, identifier: typing.Optional[str] = None) -> str:
return 'system.active_processes'
class MemoryPlugin(GraphBase):
title = 'Physical memory utilization'
uses_identifiers = False
vertical_label = 'Mebibytes'
def get_chart_name(self, identifier: typing.Optional[str] = None) -> str:
return 'system.ram'
class UptimePlugin(GraphBase):
title = 'System Uptime'
uses_identifiers = False
vertical_label = 'Seconds'
def get_chart_name(self, identifier: typing.Optional[str] = None) -> str:
return 'system.uptime'
# TODO: Revisit these zfs plugins and see the maximum parity we can achieve with old stats
# we were collecting
class ARCActualRatePlugin(GraphBase):
title = 'ZFS Actual Cache Hits Rate'
uses_identifiers = False
vertical_label = 'Events/s'
def get_chart_name(self, identifier: typing.Optional[str] = None) -> str:
return 'zfs.actual_hits_rate'
class ARCRatePlugin(GraphBase):
title = 'ZFS ARC Hits Rate'
uses_identifiers = False
vertical_label = 'Events/s'
def get_chart_name(self, identifier: typing.Optional[str] = None) -> str:
return 'zfs.hits_rate'
class ARCSizePlugin(GraphBase):
title = 'ZFS ARC Size'
uses_identifiers = False
vertical_label = 'Mebibytes'
LABEL_MAPPING = {
'arcsz': 'arc_size',
}
def get_chart_name(self, identifier: typing.Optional[str] = None) -> str:
return 'zfs.arc_size'
def query_parameters(self) -> dict:
return super().query_parameters() | {
'dimensions': 'arcsz',
}
def normalize_metrics(self, metrics) -> dict:
metrics = super().normalize_metrics(metrics)
metrics['legend'] = [self.LABEL_MAPPING.get(legend, legend) for legend in metrics['legend']]
return metrics
class ARCResultPlugin(GraphBase):
title = 'ZFS ARC Result'
vertical_label = 'Percentage'
IDENTIFIER_MAPPING = {
'demand_data': 'zfs.demand_data_hits',
'prefetch_data': 'zfs.prefetch_data_hits',
}
async def get_identifiers(self) -> typing.Optional[list]:
return list(self.IDENTIFIER_MAPPING.keys())
def get_chart_name(self, identifier: typing.Optional[str]) -> str:
return self.IDENTIFIER_MAPPING[identifier]
class DiskTempPlugin(GraphBase):
title = 'Disks Temperature'
vertical_label = 'Celsius'
disk_mapping = {}
skip_zero_values_in_aggregation = True
def get_title(self):
return 'Disk Temperature {identifier}'
async def build_context(self):
self.disk_mapping = {}
all_charts = await self.all_charts()
for disk in (await self.middleware.run_in_thread(get_disks_for_temperature_reading)).values():
identifier = disk.id if disk.id.startswith('nvme') else disk.serial
for k in (identifier, identifier.replace('-', '_')):
if f'smart_log_smart.disktemp.{k}' in all_charts:
self.disk_mapping[get_human_disk_name(disk.__dict__)] = k
break
async def get_identifiers(self) -> typing.Optional[list]:
return list(self.disk_mapping.keys())
def normalize_metrics(self, metrics) -> dict:
metrics = super().normalize_metrics(metrics)
if len(metrics['legend']) < 2:
for to_add in {'time', 'temperature_value'} - set(metrics['legend']):
metrics['legend'].append(to_add)
else:
metrics['legend'][1] = 'temperature_value'
return metrics
def get_chart_name(self, identifier: typing.Optional[str] = None) -> str:
return f'smart_log_smart.disktemp.{self.disk_mapping[identifier]}'
class UPSBase(GraphBase):
UPS_IDENTIFIER = None
skip_zero_values_in_aggregation = True
async def export_multiple_identifiers(
self, query_params: dict, identifiers: list, aggregate: bool = True
) -> typing.List[dict]:
self.UPS_IDENTIFIER = (await self.middleware.call('ups.config'))['identifier']
return await super().export_multiple_identifiers(query_params, identifiers, aggregate)
def query_parameters(self) -> dict:
return super().query_parameters() | {
'group': 'median'
}
class UPSChargePlugin(UPSBase):
title = 'UPS Charging'
vertical_label = 'Percentage'
uses_identifiers = False
def get_chart_name(self, identifier: typing.Optional[str]) -> str:
return f'nut_{self.UPS_IDENTIFIER}.charge'
class UPSRuntimePlugin(UPSBase):
title = 'UPS Runtime'
vertical_label = 'Seconds'
uses_identifiers = False
def get_chart_name(self, identifier: typing.Optional[str]) -> str:
return f'nut_{self.UPS_IDENTIFIER}.runtime'
class UPSVoltagePlugin(UPSBase):
title = 'UPS Voltage'
vertical_label = 'Volts'
IDENTIFIER_MAPPING = {
'battery': 'battery_voltage',
'input': 'input_voltage',
'output': 'output_voltage'
}
async def get_identifiers(self) -> typing.Optional[list]:
return list(self.IDENTIFIER_MAPPING.keys())
def get_chart_name(self, identifier: typing.Optional[str]) -> str:
return f'nut_{self.UPS_IDENTIFIER}.{self.IDENTIFIER_MAPPING[identifier]}'
class UPSCurrentPlugin(UPSBase):
title = 'UPS Input Current'
vertical_label = 'Ampere'
uses_identifiers = False
def get_chart_name(self, identifier: typing.Optional[str]) -> str:
return f'nut_{self.UPS_IDENTIFIER}.input_current'
class UPSFrequencyPlugin(UPSBase):
title = 'UPS Input Frequency'
vertical_label = 'Hz'
uses_identifiers = False
def get_chart_name(self, identifier: typing.Optional[str]) -> str:
return f'nut_{self.UPS_IDENTIFIER}.input_frequency'
class UPSLoadPlugin(UPSBase):
title = 'UPS Input Load'
vertical_label = 'Percentage'
uses_identifiers = False
def get_chart_name(self, identifier: typing.Optional[str]) -> str:
return f'nut_{self.UPS_IDENTIFIER}.load'
class UPSTemperaturePlugin(UPSBase):
title = 'UPS Temperature'
vertical_label = 'Celsius'
uses_identifiers = False
def get_chart_name(self, identifier: typing.Optional[str]) -> str:
return f'nut_{self.UPS_IDENTIFIER}.temp'
| 9,916 | Python | .py | 227 | 36.45815 | 102 | 0.660297 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,617 | utils.py | truenas_middleware/src/middlewared/middlewared/plugins/reporting/netdata/utils.py | from urllib.parse import urlencode
NETDATA_PORT = 6999
NETDATA_REQUEST_TIMEOUT = 30 # seconds
NETDATA_URI = f'http://127.0.0.1:{NETDATA_PORT}/api'
NETDATA_UPDATE_EVERY = 2 # seconds
def get_query_parameters(query_params: dict | None, prefix: str = '&') -> str:
"""
retrieve complete uri by adding query params to the uri by properly normalizing
each query param and their special characters
"""
if query_params is None:
return ''
# Normalize query parameters
normalized_params = {key: str(value) for key, value in query_params.items()}
# Encode and append query parameters to the base URI
encoded_params = urlencode(normalized_params)
return f'{prefix}{encoded_params}'
def get_human_disk_name(disk_details: dict) -> str:
"""
This will return a human-readable name for the disk which is guaranteed to be unique
"""
identifier = disk_details['identifier']
disk_type = disk_details['type']
if disk_type == 'SSD' and disk_details['name'].startswith('nvme'):
disk_type = 'NVME'
model = disk_details['model']
human_identifier = ''
if disk_type:
human_identifier = f'{disk_type} | '
if model:
human_identifier += f'{model} Model | '
human_identifier += f'{identifier}'
return human_identifier
| 1,321 | Python | .py | 33 | 35.030303 | 88 | 0.683386 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,618 | __init__.py | truenas_middleware/src/middlewared/middlewared/plugins/reporting/netdata/__init__.py | from .connector import Netdata # noqa
from .exceptions import ApiException, ClientConnectError # noqa
from .graph_base import GraphBase, GRAPH_PLUGINS # noqa
from .graphs import * # noqa
| 187 | Python | .py | 4 | 45.75 | 63 | 0.808743 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,619 | exceptions.py | truenas_middleware/src/middlewared/middlewared/plugins/reporting/netdata/exceptions.py | from middlewared.service import CallError
class ApiException(CallError):
pass
class ClientConnectError(CallError):
pass
| 132 | Python | .py | 5 | 23 | 41 | 0.837398 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,620 | graph_base.py | truenas_middleware/src/middlewared/middlewared/plugins/reporting/netdata/graph_base.py | import re
import statistics
import typing
from .connector import Netdata
GRAPH_PLUGINS = {}
RE_GRAPH_PLUGIN = re.compile(r'^(?P<name>.+)Plugin$')
class GraphMeta(type):
def __new__(cls, name, bases, dct):
klass = type.__new__(cls, name, bases, dct)
reg = RE_GRAPH_PLUGIN.search(name)
if reg and not hasattr(klass, 'plugin'):
klass.plugin = reg.group('name').lower()
elif not name.endswith('Base') and not hasattr(klass, 'plugin'):
raise ValueError(f'Could not determine plugin name for {name!r}')
if reg and not hasattr(klass, 'name'):
klass.name = reg.group('name').lower()
GRAPH_PLUGINS[klass.name] = klass
elif hasattr(klass, 'name'):
GRAPH_PLUGINS[klass.name] = klass
elif not name.endswith('Base'):
raise ValueError(f'Could not determine class name for {name!r}')
return klass
class GraphBase(metaclass=GraphMeta):
aggregations = ('min', 'mean', 'max')
title = None
uses_identifiers = True
vertical_label = None
skip_zero_values_in_aggregation = False
AGG_MAP = {
'min': min,
'mean': statistics.mean,
'max': max,
}
def __init__(self, middleware):
self.middleware = middleware
def __repr__(self) -> str:
return f"<Graph: {self.plugin}>"
async def all_charts(self) -> typing.Dict[str, dict]:
try:
return await Netdata.get_charts()
except Exception as e:
self.middleware.logger.warning('Failed to connect to netdata: %s', e)
return {}
def get_title(self) -> str:
return self.title
def get_vertical_label(self) -> str:
return self.vertical_label
async def build_context(self):
pass
async def as_dict(self) -> dict:
await self.build_context()
return {
'name': self.name,
'title': self.get_title(),
'vertical_label': self.vertical_label,
'identifiers': await self.get_identifiers() if self.uses_identifiers else None,
}
async def get_identifiers(self) -> list:
return []
def normalize_metrics(self, metrics) -> dict:
metrics['legend'] = metrics.pop('labels')
if metrics['data'] and metrics['data'][-1] and all(m == 0 for m in metrics['data'][-1][1:]):
# we will now remove last entry of data as when end if sometimes is specified as time which does not
# exist in netdata database, netdata adds a last entry of 0 which we don't want to show
metrics['data'].pop()
return metrics
def get_chart_name(self, identifier: typing.Optional[str]) -> str:
raise NotImplementedError()
def aggregate_metrics(self, data):
# Initialize the aggregation dictionary
aggregations = {}
all_aggregation_values = {
'min': float('inf'),
'max': float('-inf'),
'mean': 0.0,
'total_points': 0
}
default_aggregation_values = {
key: all_aggregation_values[key] for key in set(self.aggregations) | {'total_points'}
}
final_aggregated_values = {k: {} for k in self.aggregations}
for legend in data['legend'][1:]:
aggregations[legend] = default_aggregation_values.copy()
# Traverse the data matrix and calculate aggregations
data_length = len(data['data'])
for index, row in enumerate(data['data']):
for idx, legend in enumerate(data['legend'][1:], start=1):
value = row[idx]
# Skip None values
if value is None or (self.skip_zero_values_in_aggregation and value == 0):
continue
# Update the aggregation values
# When using built in min/max functions, a lag of 5 secs was seen when doing this math for
# 1200 disks, so we are doing it manually here with if/else clauses
if 'min' in final_aggregated_values and aggregations[legend]['min'] > value:
aggregations[legend]['min'] = value
if 'max' in final_aggregated_values and aggregations[legend]['max'] < value:
aggregations[legend]['max'] = value
if 'mean' in final_aggregated_values:
aggregations[legend]['mean'] += value
aggregations[legend]['total_points'] += 1
# Reason for doing this here is to avoid another loop because with 1200 disks adding another loop
# even just for legends is an expensive operation
if index == data_length - 1:
# Calculate the final mean for each metric
if 'max' in final_aggregated_values:
final_aggregated_values['max'][legend] = aggregations[legend]['max']
if 'min' in final_aggregated_values:
final_aggregated_values['min'][legend] = aggregations[legend]['min']
if 'mean' in final_aggregated_values:
if aggregations[legend]['total_points'] > 0:
aggregations[legend]['mean'] /= aggregations[legend]['total_points']
else:
aggregations[legend]['mean'] = 0.0
final_aggregated_values['mean'][legend] = aggregations[legend]['mean']
data['aggregations'] = final_aggregated_values
return data
def query_parameters(self) -> dict:
return {
'format': 'json',
'options': 'flip|null2zero|natural-points',
'points': 2999, # max supported points are 3000 in UI, we keep 2999 because netdata accounts for index 0
'group': 'average',
'gtime': 0,
}
def process_chart_metrics(self, responses: list, query_params: dict, aggregate: bool) -> list:
results = []
for identifier, chart_metrics in responses:
data = {
'name': self.name,
'identifier': identifier or self.name,
**self.normalize_metrics(chart_metrics),
'start': query_params['after'],
'end': query_params['before'],
'aggregations': dict(),
}
if self.aggregations and aggregate:
data = self.aggregate_metrics(data)
results.append(data)
return results
async def export_multiple_identifiers(
self, query_params: dict, identifiers: list, aggregate: bool = True
) -> typing.List[dict]:
responses = await Netdata.get_charts_metrics({
identifier: self.get_chart_name(identifier) for identifier in identifiers
}, self.query_parameters() | query_params)
# Normalize the results
return await self.middleware.run_in_thread(self.process_chart_metrics, responses, query_params, aggregate)
| 7,037 | Python | .py | 148 | 35.939189 | 117 | 0.584537 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,621 | factory.py | truenas_middleware/src/middlewared/middlewared/plugins/reporting/exporters/factory.py | import errno
from middlewared.service_exception import CallError
from .graphite import GraphiteExporter
class ExportFactory:
def __init__(self):
self._creators = {}
def register(self, exporter):
self._creators[exporter.NAME.upper()] = exporter
def exporter(self, name):
name = name.upper()
if name not in self._creators:
raise CallError(f'Unable to locate {name!r} exporter', errno=errno.ENOENT)
return self._creators[name]
def get_exporters(self):
return self._creators
export_factory = ExportFactory()
for exporter_type in [
GraphiteExporter,
]:
export_factory.register(exporter_type)
| 681 | Python | .py | 20 | 28.35 | 86 | 0.700461 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,622 | graphite.py | truenas_middleware/src/middlewared/middlewared/plugins/reporting/exporters/graphite.py | from middlewared.schema import accepts, Bool, Dict, Int, Str
from middlewared.validators import Port, Range
from .base import Export
class GraphiteExporter(Export):
NAME = 'graphite'
SCHEMA = Dict(
'graphite',
Str('destination_ip', required=True),
Int('destination_port', required=True, validators=[Port()]),
Str('prefix', default='scale'),
Str('namespace', required=True),
Int('update_every', validators=[Range(min_=1)], default=1),
Int('buffer_on_failures', validators=[Range(min_=1)], default=10),
Bool('send_names_instead_of_ids', default=True),
Str('matching_charts', default='*'),
)
@staticmethod
@accepts(SCHEMA)
async def validate_config(data):
return data
| 774 | Python | .py | 20 | 32.25 | 74 | 0.655541 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,623 | base.py | truenas_middleware/src/middlewared/middlewared/plugins/reporting/exporters/base.py | class Export:
NAME = NotImplementedError()
SCHEMA = NotImplementedError()
@staticmethod
async def validate_config(data):
raise NotImplementedError()
| 175 | Python | .py | 6 | 23.833333 | 36 | 0.724551 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,624 | cgroup.py | truenas_middleware/src/middlewared/middlewared/plugins/reporting/realtime_reporting/cgroup.py | import collections
import typing
def get_cgroup_stats(netdata_metrics: dict, cgroups: typing.List[str]) -> dict[str, dict]:
data = collections.defaultdict(dict)
cgroup_keys = list(filter(lambda x: x.startswith('cgroup_'), netdata_metrics.keys()))
for cgroup in cgroups:
for i in filter(lambda x: x.startswith(f'cgroup_{cgroup}.'), cgroup_keys):
name = i.split('.', 1)[-1]
context = data[cgroup][name] = {}
metric = netdata_metrics[i]
unit = metric["units"].lower()
unit = unit.replace('/', '_')
for dimension, value in metric['dimensions'].items():
dimension = dimension.replace(' ', '_')
context[f'{name}_{dimension}_{unit}'] = value['value']
return data
| 788 | Python | .py | 16 | 40 | 90 | 0.595052 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,625 | memory.py | truenas_middleware/src/middlewared/middlewared/plugins/reporting/realtime_reporting/memory.py | import humanfriendly
from .utils import normalize_value, safely_retrieve_dimension
def get_memory_info(netdata_metrics: dict) -> dict:
with open('/proc/meminfo') as f:
meminfo = {
s[0]: humanfriendly.parse_size(s[1], binary=True)
for s in [
line.split(':', 1)
for line in f.readlines()
]
}
classes = {
'page_tables': normalize_value(
safely_retrieve_dimension(netdata_metrics, 'mem.kernel', 'PageTables', 0), multiplier=1024 * 1024,
),
'slab_cache': normalize_value(
safely_retrieve_dimension(netdata_metrics, 'mem.kernel', 'Slab', 0), multiplier=1024 * 1024,
),
'cache': normalize_value(
safely_retrieve_dimension(netdata_metrics, 'system.ram', 'cached', 0), multiplier=1024 * 1024,
),
'buffers': normalize_value(
safely_retrieve_dimension(netdata_metrics, 'system.ram', 'buffers', 0), multiplier=1024 * 1024,
),
'unused': normalize_value(
safely_retrieve_dimension(netdata_metrics, 'system.ram', 'free', 0), multiplier=1024 * 1024,
),
'arc': normalize_value(
safely_retrieve_dimension(netdata_metrics, 'zfs.arc_size', 'size', 0), multiplier=1024 * 1024,
),
'apps': normalize_value(
safely_retrieve_dimension(netdata_metrics, 'system.ram', 'used', 0), multiplier=1024 * 1024,
),
}
extra = {
'inactive': normalize_value(meminfo['Inactive'], multiplier=1024),
'committed': normalize_value(
safely_retrieve_dimension(netdata_metrics, 'mem.committed', 'Committed_AS', 0), multiplier=1024 * 1024,
),
'active': normalize_value(meminfo['Active'], multiplier=1024),
'vmalloc_used': normalize_value(
safely_retrieve_dimension(netdata_metrics, 'mem.kernel', 'VmallocUsed', 0), multiplier=1024 * 1024,
),
'mapped': normalize_value(meminfo['Mapped'], multiplier=1024),
}
return {
'classes': classes,
'extra': extra,
}
| 2,117 | Python | .py | 49 | 33.836735 | 115 | 0.599418 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,626 | utils.py | truenas_middleware/src/middlewared/middlewared/plugins/reporting/realtime_reporting/utils.py | import contextlib
import typing
def safely_retrieve_dimension(
all_metrics: dict, chart: str, dimension: typing.Optional[str] = None, default: typing.Optional[typing.Any] = None
) -> typing.Any:
"""
Safely retrieve a dimension from a chart. If the dimension is not found, return the default value
and if no dimension is explicitly provided, return all the dimensions found for the chart.
"""
with contextlib.suppress(KeyError):
if dimension:
value = all_metrics[chart]['dimensions'][dimension]['value']
return value if value is not None else default
else:
return {
dimension_name: value['value']
for dimension_name, value in all_metrics[chart]['dimensions'].items()
}
return default
def normalize_value(
value: int, multiplier: int = 1, divisor: int = 1, absolute: bool = True, round_value: bool = True,
) -> typing.Union[int, float]:
normalized = (value / divisor) * multiplier
if absolute:
normalized = abs(normalized)
if round_value:
normalized = round(normalized)
return normalized
| 1,151 | Python | .py | 28 | 34.214286 | 118 | 0.666369 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,627 | __init__.py | truenas_middleware/src/middlewared/middlewared/plugins/reporting/realtime_reporting/__init__.py | from .arcstat import get_arc_stats # noqa
from .cpu import get_cpu_stats # noqa
from .ifstat import get_interface_stats # noqa
from .iostat import get_disk_stats # noqa
from .memory import get_memory_info # noqa
| 217 | Python | .py | 5 | 42.4 | 47 | 0.764151 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,628 | iostat.py | truenas_middleware/src/middlewared/middlewared/plugins/reporting/realtime_reporting/iostat.py | from .utils import normalize_value, safely_retrieve_dimension
def get_disk_stats(netdata_metrics: dict, disks: list[str], disk_mapping: dict[str, str]) -> dict:
total_disks = len(disks)
read_ops = read_bytes = write_ops = write_bytes = busy = 0
for disk in disks:
mapped_key = disk_mapping.get(disk)
read_ops += safely_retrieve_dimension(
netdata_metrics, f'truenas_disk_stats.ops.{mapped_key}', f'{mapped_key}.read_ops', 0
)
read_bytes += normalize_value(
safely_retrieve_dimension(
netdata_metrics, f'truenas_disk_stats.io.{mapped_key}', f'{mapped_key}.reads', 0
), multiplier=1024,
)
write_ops += normalize_value(safely_retrieve_dimension(
netdata_metrics, f'truenas_disk_stats.ops.{mapped_key}', f'{mapped_key}.write_ops', 0
))
write_bytes += normalize_value(
safely_retrieve_dimension(
netdata_metrics, f'truenas_disk_stats.io.{mapped_key}', f'{mapped_key}.writes', 0
), multiplier=1024,
)
busy += safely_retrieve_dimension(
netdata_metrics, f'truenas_disk_stats.busy.{mapped_key}', f'{mapped_key}.busy', 0
)
return {
'read_ops': read_ops,
'read_bytes': read_bytes,
'write_ops': write_ops,
'write_bytes': write_bytes,
'busy': busy / total_disks if total_disks else 0,
}
| 1,442 | Python | .py | 32 | 35.71875 | 98 | 0.604833 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,629 | ifstat.py | truenas_middleware/src/middlewared/middlewared/plugins/reporting/realtime_reporting/ifstat.py | import collections
import typing
from .utils import normalize_value, safely_retrieve_dimension
def get_interface_stats(netdata_metrics: dict, interfaces: typing.List[str]) -> dict:
data = collections.defaultdict(dict)
for interface_name in interfaces:
link_state = bool(safely_retrieve_dimension(netdata_metrics, f'net_operstate.{interface_name}', 'up', 0))
data[interface_name]['link_state'] = 'LINK_STATE_UP' if link_state else 'LINK_STATE_DOWN'
data[interface_name]['speed'] = normalize_value(safely_retrieve_dimension(
netdata_metrics, f'net_speed.{interface_name}', 'speed', 0), divisor=1000
)
if link_state:
# In Bluefin, `received_bytes` and `sent_bytes` represent bytes per interval,
# while `received_bytes_rate` and `sent_bytes_rate` represent bytes per second.
# However, Netdata is currently sending data in kilobits per second.
# After converting the Netdata value to bytes per second,
# we need to multiply `received_bytes` and `sent_bytes` by the interval
# to maintain unit consistency with Bluefin.
# https://github.com/truenas/middleware/blob/30dbedbe170b750775e58e7d9c86cfcd00f52730/src/middlewared/
# middlewared/plugins/reporting/ifstat.py#L73C17-L73C25
# We have removed received_bytes/sent_bytes from the data structure because of the unneeded computation
# involved in getting to that and as netdata is giving us kilobit/s already, we just need to convert
# it to bytes/s
data[interface_name].update({
'received_bytes_rate': normalize_value(
safely_retrieve_dimension(netdata_metrics, f'net.{interface_name}', 'received', 0),
multiplier=1000, divisor=8
),
'sent_bytes_rate': normalize_value(
safely_retrieve_dimension(netdata_metrics, f'net.{interface_name}', 'sent', 0),
multiplier=1000, divisor=8
),
})
else:
data[interface_name].update({
'received_bytes': 0,
'sent_bytes': 0,
'received_bytes_rate': 0,
'sent_bytes_rate': 0,
})
return data
| 2,331 | Python | .py | 41 | 44.512195 | 115 | 0.627133 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,630 | cpu.py | truenas_middleware/src/middlewared/middlewared/plugins/reporting/realtime_reporting/cpu.py | from .utils import safely_retrieve_dimension
def get_cpu_core_stats(netdata_metrics: dict, core_number: int = None, chart_name: str = None) -> dict:
chart = f'cpu.cpu{core_number}' if core_number is not None else chart_name
data = {
'user': safely_retrieve_dimension(netdata_metrics, chart, 'user', 0),
'nice': safely_retrieve_dimension(netdata_metrics, chart, 'nice', 0),
'system': safely_retrieve_dimension(netdata_metrics, chart, 'system', 0),
'idle': safely_retrieve_dimension(netdata_metrics, chart, 'idle', 0),
'iowait': safely_retrieve_dimension(netdata_metrics, chart, 'iowait', 0),
'irq': safely_retrieve_dimension(netdata_metrics, chart, 'irq', 0),
'softirq': safely_retrieve_dimension(netdata_metrics, chart, 'softirq', 0),
'steal': safely_retrieve_dimension(netdata_metrics, chart, 'steal', 0),
'guest': safely_retrieve_dimension(netdata_metrics, chart, 'guest', 0),
'guest_nice': safely_retrieve_dimension(netdata_metrics, chart, 'guest_nice', 0),
'usage': 0,
}
if cp_total := sum(data.values()):
# usage is the sum of all but idle and iowait
data['usage'] = ((cp_total - data['idle'] - data['iowait']) / cp_total) * 100
return data
def get_all_cores_stats(netdata_metrics: dict, cores: int) -> dict:
data = {}
for core_num in range(cores):
data[str(core_num)] = get_cpu_core_stats(netdata_metrics, core_num)
return data
def get_cpu_stats(netdata_metrics: dict, cores: int) -> dict:
return {
**({str(core_num): get_cpu_core_stats(netdata_metrics, core_num) for core_num in range(cores)}),
'average': get_cpu_core_stats(netdata_metrics, chart_name='system.cpu'),
}
| 1,752 | Python | .py | 30 | 51.566667 | 104 | 0.658892 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,631 | arcstat.py | truenas_middleware/src/middlewared/middlewared/plugins/reporting/realtime_reporting/arcstat.py | from .utils import normalize_value, safely_retrieve_dimension
def get_arc_stats(netdata_metrics: dict) -> dict:
data = {
'arc_max_size': normalize_value(
safely_retrieve_dimension(netdata_metrics, 'zfs.arc_size', 'max', 0), multiplier=1024 * 1024,
),
'arc_size': normalize_value(
safely_retrieve_dimension(netdata_metrics, 'zfs.arc_size', 'size', 0), multiplier=1024 * 1024,
),
'cache_hit_ratio': 0.0,
}
hits = safely_retrieve_dimension(netdata_metrics, 'zfs.hits', 'hits', 0)
misses = safely_retrieve_dimension(netdata_metrics, 'zfs.hits', 'misses', 0)
if total := (hits + misses):
data['cache_hit_ratio'] = hits / total
return data
| 734 | Python | .py | 16 | 38.625 | 106 | 0.635854 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,632 | bridge.py | truenas_middleware/src/middlewared/middlewared/plugins/interface/bridge.py | from middlewared.service import private, Service
from .netif import netif
class InterfaceService(Service):
class Config:
namespace_alias = 'interfaces'
@private
def bridge_setup(self, bridge, parent_interfaces):
name = bridge['interface']['int_interface']
bridge_mtu = bridge['interface']['int_mtu'] or 1500
try:
iface = netif.get_interface(name)
except KeyError:
netif.create_interface(name)
iface = netif.get_interface(name)
self.logger.info('Setting up %r', name)
if iface.mtu != bridge_mtu:
self.logger.info('Setting %r MTU to %d', name, bridge_mtu)
iface.mtu = bridge_mtu
db_members = set(bridge['members'])
os_members = set(iface.members)
for member in os_members - db_members:
# We do not remove vnetX interfaces from bridge as they would be consumed by libvirt
if member.startswith('vnet'):
continue
# remove members from the bridge that aren't in the db
self.logger.info('Removing member interface %r from %r', member, name)
iface.delete_member(member)
for member in db_members - os_members:
# now add members that are written in db but do not exist in
# the bridge on OS side
try:
self.logger.info('Adding member interface %r to %r', member, name)
iface.add_member(member)
except FileNotFoundError:
self.logger.error('Bridge member %r not found', member)
continue
# now make sure the bridge member is up
member_iface = netif.get_interface(member)
if netif.InterfaceFlags.UP not in member_iface.flags:
self.logger.info('Bringing up member interface %r in %r', member_iface.name, name)
member_iface.up()
for member in db_members:
parent_interfaces.append(member)
iface.set_learning(member, bridge.get('enable_learning', True))
if iface.stp != bridge['stp']:
verb = 'off' if not bridge['stp'] else 'on'
self.logger.info(f'Turning STP {verb} for {name!r}')
iface.toggle_stp(name, int(bridge['stp']))
# finally we need to up the main bridge if it's not already up
if netif.InterfaceFlags.UP not in iface.flags:
self.logger.info('Bringing up %r', name)
iface.up()
| 2,513 | Python | .py | 52 | 36.846154 | 98 | 0.605392 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,633 | interface_types.py | truenas_middleware/src/middlewared/middlewared/plugins/interface/interface_types.py | from enum import Enum
from middlewared.service import private, Service
class InterfaceType(Enum):
BRIDGE = 'BRIDGE'
LINK_AGGREGATION = 'LINK_AGGREGATION'
PHYSICAL = 'PHYSICAL'
UNKNOWN = 'UNKNOWN'
VLAN = 'VLAN'
class InterfaceService(Service):
class Config:
namespace_alias = 'interfaces'
@private
async def type(self, iface_state):
if iface_state['name'].startswith(('br',)):
return InterfaceType.BRIDGE
elif iface_state['name'].startswith('bond'):
return InterfaceType.LINK_AGGREGATION
elif iface_state['name'].startswith('vlan'):
return InterfaceType.VLAN
elif not iface_state['cloned']:
return InterfaceType.PHYSICAL
else:
return InterfaceType.UNKNOWN
@private
async def validate_name(self, type_, name):
if type_ == InterfaceType.BRIDGE:
if not (name.startswith('br') and name[2:].isdigit()):
raise ValueError('Bridge interface must start with "br" followed by an unique number.')
if type_ == InterfaceType.LINK_AGGREGATION:
if not (name.startswith('bond') and name[4:].isdigit()):
raise ValueError('Link aggregation interface must start with "bond" followed by an unique number.')
if type_ == InterfaceType.VLAN:
if not (name.startswith('vlan') and name[4:].isdigit()):
raise ValueError('VLAN interface must start with "vlan" followed by an unique number.')
| 1,529 | Python | .py | 34 | 36.147059 | 115 | 0.648485 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,634 | vlan.py | truenas_middleware/src/middlewared/middlewared/plugins/interface/vlan.py | from middlewared.service import private, Service
from .netif import netif
class InterfaceService(Service):
class Config:
namespace_alias = 'interfaces'
@private
def vlan_setup(self, vlan, parent_interfaces):
self.logger.info('Setting up %r', vlan['vlan_vint'])
try:
iface = netif.get_interface(vlan['vlan_vint'])
except KeyError:
try:
netif.create_vlan(vlan['vlan_vint'], vlan['vlan_pint'], vlan['vlan_tag'])
except FileNotFoundError:
self.logger.warning(
'VLAN %r parent interface %r not found, skipping.', vlan['vlan_vint'], vlan['vlan_pint']
)
return
iface = netif.get_interface(vlan['vlan_vint'])
if iface.parent != vlan['vlan_pint'] or iface.tag != vlan['vlan_tag'] or iface.pcp != vlan['vlan_pcp']:
iface.unconfigure()
try:
iface.configure(vlan['vlan_pint'], vlan['vlan_tag'], vlan['vlan_pcp'])
except FileNotFoundError:
self.logger.warning(
'VLAN %r parent interface %r not found, skipping.', vlan['vlan_vint'], vlan['vlan_pint']
)
return
try:
parent_iface = netif.get_interface(iface.parent)
except KeyError:
self.logger.warning('Could not find %r from %r', iface.parent, vlan['vlan_vint'])
return
parent_interfaces.append(iface.parent)
parent_iface.up()
iface.up()
| 1,562 | Python | .py | 36 | 31.611111 | 111 | 0.568511 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,635 | configure.py | truenas_middleware/src/middlewared/middlewared/plugins/interface/configure.py | import ipaddress
import os
import signal
import re
from .netif import netif
from .interface_types import InterfaceType
from middlewared.service import private, Service
class InterfaceService(Service):
class Config:
namespace_alias = 'interfaces'
@private
def configure(self, data, aliases, options):
options = options or {}
name = data['int_interface']
self.logger.info('Configuring interface %r', name)
iface = netif.get_interface(name)
addrs_configured = set([a for a in iface.addresses if a.af != netif.AddressFamily.LINK])
has_ipv6 = (
data['int_version'] == 6 or
data['int_ipv6auto'] or
any(alias['alias_version'] == 6 for alias in aliases)
)
if self.middleware.call_sync('failover.node') == 'B':
addr_key = 'int_address_b'
alias_key = 'alias_address_b'
else:
addr_key = 'int_address'
alias_key = 'alias_address'
addrs_database = set()
dhclient_run, dhclient_pid = self.middleware.call_sync('interface.dhclient_status', name)
if dhclient_run and not data['int_dhcp']:
# dhclient is running on the interface but is marked to not have dhcp configure the interface
self.logger.debug('Killing dhclient for %r', name)
os.kill(dhclient_pid, signal.SIGTERM)
elif dhclient_run and data['int_dhcp'] and (i := self.middleware.call_sync('interface.dhclient_leases', name)):
# dhclient is running on the interface and is marked for dhcp AND we have a lease file for it
_addr = re.search(r'fixed-address\s+(.+);', i)
_net = re.search(r'option subnet-mask\s+(.+);', i)
if (_addr and (_addr := _addr.group(1))) and (_net and (_net := _net.group(1))):
addrs_database.add(self.alias_to_addr({'address': _addr, 'netmask': _net}))
else:
self.logger.info('Unable to get address from dhclient lease file for %r', name)
if data[addr_key] and not data['int_dhcp']:
addrs_database.add(self.alias_to_addr({'address': data[addr_key], 'netmask': data['int_netmask']}))
if vip := data.get('int_vip', ''):
netmask = '32' if data['int_version'] == 4 else '128'
addrs_database.add(self.alias_to_addr({'address': vip, 'netmask': netmask}))
alias_vips = []
for alias in aliases:
addrs_database.add(self.alias_to_addr({'address': alias[alias_key], 'netmask': alias['alias_netmask']}))
if alias['alias_vip']:
alias_vip = alias['alias_vip']
alias_vips.append(alias_vip)
addrs_database.add(self.alias_to_addr(
{'address': alias_vip, 'netmask': '32' if alias['alias_version'] == 4 else '128'}
))
for addr in addrs_configured:
address = str(addr.address)
if address.startswith('fe80::'):
# having a link-local address causes no harm and is a
# pre-requisite for IPv6 working in general. Just ignore it.
continue
elif address == vip or address in alias_vips:
# keepalived service is responsible for deleting the VIP(s)
continue
elif addr not in addrs_database:
# Remove addresses configured and not in database
self.logger.debug('%s: removing %s', name, addr)
iface.remove_address(addr)
elif not data['int_dhcp']:
self.logger.debug('%s: removing possible valid_lft and preferred_lft on %s', name, addr)
iface.replace_address(addr)
autoconf = '1' if has_ipv6 else '0'
self.middleware.call_sync('tunable.set_sysctl', f'net.ipv6.conf.{name}.autoconf', autoconf)
if vip or alias_vips:
if not self.middleware.call_sync('service.started', 'keepalived'):
self.middleware.call_sync('service.start', 'keepalived')
else:
self.middleware.call_sync('service.reload', 'keepalived')
# Add addresses in database and not configured
for addr in (addrs_database - addrs_configured):
address = str(addr.address)
if address == vip or address in alias_vips:
# keepalived service is responsible for adding the VIP(s)
continue
self.logger.debug('%s: adding %s', name, addr)
iface.add_address(addr)
# In case there is no MTU in interface and it is currently
# different than the default of 1500, revert it
if not options.get('skip_mtu'):
if data['int_mtu']:
if iface.mtu != data['int_mtu']:
iface.mtu = data['int_mtu']
elif iface.mtu != 1500:
iface.mtu = 1500
if data['int_name'] and iface.description != data['int_name']:
try:
iface.description = data['int_name']
except Exception:
self.logger.warning('Failed to set interface description on %s', name, exc_info=True)
if netif.InterfaceFlags.UP not in iface.flags:
iface.up()
# If dhclient is not running and dhcp is configured, the caller should
# start it based on what we return here
# TODO: what are we doing with ipv6auto??
return not dhclient_run and data['int_dhcp']
@private
def autoconfigure(self, iface, wait_dhcp):
dhclient_running = self.middleware.call_sync('interface.dhclient_status', iface.name)[0]
if not dhclient_running:
# Make sure interface is UP before starting dhclient
# NAS-103577
if netif.InterfaceFlags.UP not in iface.flags:
iface.up()
return self.middleware.call_sync('interface.dhclient_start', iface.name, wait_dhcp)
@private
def unconfigure(self, iface, cloned_interfaces, parent_interfaces):
name = iface.name
self.logger.info('Unconfiguring interface %r', name)
# Interface not in database lose addresses
iface.flush()
dhclient_running, dhclient_pid = self.middleware.call_sync('interface.dhclient_status', name)
# Kill dhclient if its running for this interface
if dhclient_running:
os.kill(dhclient_pid, signal.SIGTERM)
# If we have bridge/vlan/lagg not in the database at all
# it gets destroy, otherwise just bring it down.
if (name not in cloned_interfaces and
self.middleware.call_sync('interface.type', iface.asdict()) in [
InterfaceType.BRIDGE, InterfaceType.LINK_AGGREGATION, InterfaceType.VLAN,
]):
netif.destroy_interface(name)
elif name not in parent_interfaces:
iface.down()
@private
def alias_to_addr(self, alias):
addr = netif.InterfaceAddress()
ip = ipaddress.ip_interface(f'{alias["address"]}/{alias["netmask"]}')
addr.af = getattr(netif.AddressFamily, 'INET6' if ip.version == 6 else 'INET')
addr.address = ip.ip
addr.netmask = ip.netmask
addr.broadcast = ip.network.broadcast_address
if 'vhid' in alias:
addr.vhid = alias['vhid']
return addr
@private
async def get_configured_interfaces(self):
"""
Return a list of configured interfaces.
This will include names of regular interfaces that have been configured,
plus any higher-order interfaces and their constituents."""
ds = await self.middleware.call('interface.get_datastores')
# Interfaces
result = set([i['int_interface'] for i in ds['interfaces']])
# Bridges
for bridge in ds['bridge']:
result.update(bridge['members'])
# VLAN
for vlan in ds['vlan']:
result.add(vlan['vlan_pint'])
# Link Aggregation
for lag in ds['laggmembers']:
result.add(lag['lagg_physnic'])
return list(result)
| 8,152 | Python | .py | 164 | 38.530488 | 119 | 0.604044 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,636 | dhclient.py | truenas_middleware/src/middlewared/middlewared/plugins/interface/dhclient.py | import os
from contextlib import suppress
from subprocess import run, PIPE, STDOUT
from middlewared.service import private, Service
from middlewared.utils.cgroups import move_to_root_cgroups
LEASEFILE_TEMPLATE = '/var/lib/dhcp/dhclient.leases.{}'
PIDFILE_TEMPLATE = '/var/run/dhclient.{}.pid'
class InterfaceService(Service):
class Config:
namespace_alias = 'interfaces'
@private
def dhclient_start(self, interface, wait=False):
cmd = ['dhclient']
if not wait:
cmd.append('-nw')
cmd.extend(['-lf', LEASEFILE_TEMPLATE.format(interface)])
cmd.extend(['-pf', PIDFILE_TEMPLATE.format(interface)])
cmd.extend([interface])
proc = run(cmd, stdout=PIPE, stderr=STDOUT)
if proc.returncode != 0:
self.logger.error('Failed to run dhclient on %r: %r', interface, proc.stdout.decode())
else:
try:
with open(PIDFILE_TEMPLATE.format(interface)) as f:
pid = int(f.read().strip())
move_to_root_cgroups(pid)
except Exception:
self.logger.warning('Failed to move dhclient to root cgroups', exc_info=True)
@private
def dhclient_status(self, interface):
"""
Get the current status of dhclient for a given `interface`.
Args:
interface (str): name of the interface
Returns:
tuple(bool, pid): if dhclient is running follow its pid.
"""
pid = None
running = False
with suppress((FileNotFoundError, ValueError, OSError)):
with open(PIDFILE_TEMPLATE.format(interface)) as f:
pid = int(f.read().strip())
os.kill(pid, 0)
running = True
return running, pid
@private
def dhclient_leases(self, interface):
"""
Reads the leases file for `interface` and returns the content.
Args:
interface (str): name of the interface.
Returns:
str: content of dhclient leases file for `interface`.
"""
with suppress(FileNotFoundError):
with open(LEASEFILE_TEMPLATE.format(interface)) as f:
return f.read()
| 2,243 | Python | .py | 57 | 29.842105 | 98 | 0.610676 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,637 | lag_protocols.py | truenas_middleware/src/middlewared/middlewared/plugins/interface/lag_protocols.py | from middlewared.service import no_authz_required, private, Service
class InterfaceService(Service):
class Config:
namespace_alias = 'interfaces'
@private
@no_authz_required
async def lag_supported_protocols(self):
return ['LACP', 'FAILOVER', 'LOADBALANCE']
| 294 | Python | .py | 8 | 31.25 | 67 | 0.723404 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,638 | internal_ifaces.py | truenas_middleware/src/middlewared/middlewared/plugins/interface/internal_ifaces.py | from middlewared.service import private, Service
from .netif import netif
class InterfaceService(Service):
class Config:
namespace_alias = 'interfaces'
@private
async def internal_interfaces(self):
# expicit call to list() is important here
result = list(netif.INTERNAL_INTERFACES)
result.extend(await self.middleware.call('failover.internal_interface.detect'))
result.extend(await self.middleware.call('rdma.interface.internal_interfaces'))
result.extend(await self.middleware.call('virt.global.internal_interfaces'))
if (await self.middleware.call('truenas.get_chassis_hardware')).startswith('TRUENAS-F'):
# The eno1 interface needs to be masked on the f-series platform because
# this interface is shared with the BMC. Details for why this is done
# can be obtained from platform team.
result.append('eno1')
return result + await self.middleware.call('docker.network.interfaces_mapping')
| 1,021 | Python | .py | 18 | 48.5 | 96 | 0.714142 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,639 | listen.py | truenas_middleware/src/middlewared/middlewared/plugins/interface/listen.py | from collections import namedtuple
from itertools import zip_longest
from middlewared.schema import Dict, List, returns, Str
from middlewared.service import accepts, Service, private
PreparedDelegate = namedtuple("PreparedDelegate", ["delegate", "state", "addresses"])
PRE_SYNC_LISTEN_DELEGATES = []
POST_ROLLBACK_LISTEN_DELEGATES = []
class InterfaceService(Service):
delegates = []
class Config:
namespace_alias = "interfaces"
@private
def register_listen_delegate(self, delegate):
self.delegates.append(delegate)
@accepts()
@returns(List('services_to_be_restarted', items=[Dict(
'service_restart',
Str('type', required=True),
Str('service', required=True),
List('ips', required=True, items=[Str('ip')]),
)]))
async def services_restarted_on_sync(self):
"""
Returns which services will be set to listen on 0.0.0.0 (and, thus, restarted) on sync.
Example result:
[
// Samba service will be set ot listen on 0.0.0.0 and restarted because it was set up to listen on
// 192.168.0.1 which is being removed.
{"type": "SYSTEM_SERVICE", "service": "cifs", "ips": ["192.168.0.1"]},
]
"""
return [dict(await pd.delegate.repr(pd.state), ips=pd.addresses)
for pd in await self.listen_delegates_prepare()]
@private
async def listen_delegates_prepare(self):
original_datastores = await self.middleware.call("interface.get_original_datastores")
if not original_datastores:
return []
datastores = await self.middleware.call("interface.get_datastores")
old_addresses = self._collect_addresses(original_datastores)
addresses = self._collect_addresses(datastores)
gone_addresses = old_addresses - addresses
result = []
for delegate in self.delegates:
state = await delegate.get_listen_state(gone_addresses)
delegate_addresses = [address
for address in gone_addresses
if await delegate.listens_on(state, address)]
if delegate_addresses:
result.append(PreparedDelegate(delegate, state, delegate_addresses))
return result
def _collect_addresses(self, datastores):
addresses = set()
for iface, alias in zip_longest(datastores["interfaces"], datastores["alias"], fillvalue={}):
addresses.add(iface.get("int_address", ""))
addresses.add(iface.get("int_address_b", ""))
addresses.add(iface.get("int_vip", ""))
addresses.add(alias.get("alias_address", ""))
addresses.add(alias.get("alias_address_b", ""))
addresses.add(alias.get("alias_vip", ""))
addresses.discard("")
return addresses
async def interface_pre_sync(middleware):
PRE_SYNC_LISTEN_DELEGATES[:] = await middleware.call("interface.listen_delegates_prepare")
async def interface_post_sync(middleware):
if POST_ROLLBACK_LISTEN_DELEGATES:
for pd in POST_ROLLBACK_LISTEN_DELEGATES:
middleware.logger.info("Restoring listen IPs on delegate %r: %r", pd.delegate, pd.state)
middleware.create_task(pd.delegate.set_listen_state(pd.state))
POST_ROLLBACK_LISTEN_DELEGATES[:] = []
return
for pd in PRE_SYNC_LISTEN_DELEGATES:
middleware.logger.info("Resetting listen IPs on delegate %r because %r addresses were removed", pd.delegate,
pd.addresses)
middleware.create_task(pd.delegate.reset_listens(pd.state))
async def interface_post_rollback(middleware):
POST_ROLLBACK_LISTEN_DELEGATES[:] = PRE_SYNC_LISTEN_DELEGATES.copy()
async def setup(middleware):
middleware.register_hook("interface.pre_sync", interface_pre_sync, sync=True)
middleware.register_hook("interface.post_sync", interface_post_sync, sync=True)
middleware.register_hook("interface.post_rollback", interface_post_rollback, sync=True)
| 4,080 | Python | .py | 81 | 41.259259 | 116 | 0.659955 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,640 | capabilities.py | truenas_middleware/src/middlewared/middlewared/plugins/interface/capabilities.py | from middlewared.service_exception import ValidationErrors
from middlewared.service import Service, private
from middlewared.schema import Dict, Str, List, accepts, returns
from middlewared.plugins.interface.netif_linux import ethernet_settings
EHS = ethernet_settings.EthernetHardwareSettings
class InterfaceCapabilitiesService(Service):
class Config:
namespace = 'interface.capabilities'
namespace_alias = 'interface.features'
cli_namespace = 'network.interface.capabilities'
@private
async def validate(self, data, dev):
verrors = ValidationErrors()
unavail = [i for i in data['capabilities'] if i not in dev.supported_capabilities]
if unavail:
# gave us a capability that isn't supported on the device
# or is "fixed" (meaning it can't be changed)
verrors.add(
f'capabilities_set.{data["action"]}',
f'"{data["name"]}" does not support "{", ".join(unavail)}"'
)
verrors.check()
@accepts(Str('name', required=True), roles=['NETWORK_INTERFACE_READ'])
@returns(Dict(
'capabilties',
List('enabled', items=[Str('capability')], required=True),
List('disabled', items=[Str('capability')], required=True),
List('supported', items=[Str('capability')], required=True),
))
def get(self, name):
"""
Return enabled, disabled and supported capabilities (also known as features)
on a given interface.
`name` String representing name of the interface
"""
with EHS(name) as dev:
return dev._caps
@accepts(Dict(
'capabilities_set',
Str('name', required=True),
List('capabilties', required=True),
Str('action', enum=['ENABLE', 'DISABLE'], required=True),
), roles=['NETWORK_INTERFACE_WRITE'])
@returns(List('capabilities', items=[Str('capability')], required=True))
def set(self, data):
"""
Enable or Disable capabilties (also known as features) on a given interface.
`name` String representing name of the interface
`capabilities` List representing capabilities to be acted upon
`action` String when set to 'ENABLE' will enable `capabilities` else if set
to `DISABLE` will disable `capabilities`.
"""
with EHS(data['name']) as dev:
self.middleware.call_sync('interface.capabilities.validate', data, dev)
if data['action'] == 'enable':
dev.enabled_capabilities = data['capabilities']
else:
dev.disabled_capabilities = data['capabilities']
caps = self.middleware.call_sync('interface.capabilities.get', data['name'])
return caps['enabled'] if data['action'] == 'ENABLE' else caps['disabled']
| 2,851 | Python | .py | 60 | 38.583333 | 90 | 0.643397 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,641 | control.py | truenas_middleware/src/middlewared/middlewared/plugins/interface/control.py | from middlewared.service import Service, private
from .netif import netif
class InterfaceService(Service):
class Config:
namespace_alias = 'interfaces'
@private
def destroy(self, name):
netif.destroy_interface(name)
| 249 | Python | .py | 8 | 26 | 48 | 0.741525 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,642 | lag.py | truenas_middleware/src/middlewared/middlewared/plugins/interface/lag.py | from middlewared.service import private, Service
from .netif import netif
class InterfaceService(Service):
class Config:
namespace_alias = 'interfaces'
@private
def lag_setup(self, lagg, members, parent_interfaces, sync_interface_opts):
name = lagg['lagg_interface']['int_interface']
self.logger.info('Setting up %s', name)
try:
iface = netif.get_interface(name)
except KeyError:
iface = None
else:
first_port = next(iter(iface.ports), None)
if first_port is None or first_port[0] != members[0]['lagg_physnic']:
self.logger.info('Destroying %s because its first port has changed', name)
netif.destroy_interface(name)
iface = None
if iface is None:
netif.create_interface(name)
iface = netif.get_interface(name)
info = {'protocol': None, 'xmit_hash_policy': None, 'lacpdu_rate': None, 'primary_interface': None}
protocol = getattr(netif.AggregationProtocol, lagg['lagg_protocol'].upper())
if iface.protocol != protocol:
info['protocol'] = protocol
if protocol.name == 'FAILOVER':
db_primary = [i['lagg_physnic'] for i in members][0]
curr_primary = iface.primary_interface
if curr_primary != db_primary:
info['primary_interface'] = db_primary
if lagg['lagg_xmit_hash_policy']:
# passing the xmit_hash_policy value needs to be lower-case
# or `ip-link` will error with invalid argument
xmit_hash = lagg['lagg_xmit_hash_policy'].lower()
if iface.xmit_hash_policy != xmit_hash:
info['xmit_hash_policy'] = xmit_hash
# passing the lacp_rate value needs to be lower-case
# or `ip-link` will error with invalid argument
if lagg['lagg_lacpdu_rate']:
lacpdu_rate = lagg['lagg_lacpdu_rate'].lower()
if iface.lacpdu_rate != lacpdu_rate:
info['lacpdu_rate'] = lacpdu_rate
if any(i is not None for i in info.values()):
# means one of the lagg options changed or is being
# setup for the first time so we have to down the
# interface before performing any of the actions
iface.down()
if info['protocol'] is not None:
# we _always_ have to start with the protocol
# information first since it deletes members
# (if any) of the current lagg and then changes
# the protocol
self.logger.info('Changing protocol on %r to %s', name, info['protocol'].name)
iface.protocol = info['protocol']
if info['xmit_hash_policy'] is not None:
self.logger.info('Changing xmit_hash_policy on %r to %s', name, info['xmit_hash_policy'])
iface.xmit_hash_policy = info['xmit_hash_policy']
if info['lacpdu_rate'] is not None:
self.logger.info('Changing lacpdu_rate on %r to %s', name, info['lacpdu_rate'])
iface.lacpdu_rate = info['lacpdu_rate']
if info['primary_interface'] is not None:
self.logger.info('Changing primary interface on %r to %s', name, info['primary_interface'])
iface.primary_interface = info['primary_interface']
# be sure and bring the lagg back up after making changes
iface.up()
members_database = []
members_configured = {p[0] for p in iface.ports}
for member in members:
# For Link Aggregation MTU is configured in parent, not ports
sync_interface_opts[member['lagg_physnic']]['skip_mtu'] = True
members_database.append(member['lagg_physnic'])
# Remove member ports configured in bond but do not exist in database
iface.delete_ports(list(members_configured - set(members_database)))
# Add member ports that exist in db but not configured in bond
iface.add_ports([i for i in members_database if i not in members_configured])
for port in iface.ports:
try:
port_iface = netif.get_interface(port[0])
except KeyError:
self.logger.warning('Could not find %s from %s', port[0], name)
continue
parent_interfaces.append(port[0])
port_iface.up()
| 4,477 | Python | .py | 84 | 40.892857 | 107 | 0.598948 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,643 | lag_options.py | truenas_middleware/src/middlewared/middlewared/plugins/interface/lag_options.py | import enum
class XmitHashChoices(enum.Enum):
LAYER2 = 'LAYER2'
LAYER23 = 'LAYER2+3'
LAYER34 = 'LAYER3+4'
class LacpduRateChoices(enum.Enum):
SLOW = 'SLOW'
FAST = 'FAST'
| 194 | Python | .py | 8 | 20.25 | 35 | 0.681319 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,644 | link_address.py | truenas_middleware/src/middlewared/middlewared/plugins/interface/link_address.py | import re
from sqlalchemy.exc import IntegrityError
from middlewared.service import private, Service
INTERFACE_FILTERS = [["type", "=", "PHYSICAL"]]
RE_FREEBSD_BRIDGE = re.compile(r"bridge([0-9]+)$")
RE_FREEBSD_LAGG = re.compile(r"lagg([0-9]+)$")
class DuplicateHardwareInterfaceLinkAddresses(Exception):
def __init__(self, name1, name2, link_address):
self.name1 = name1
self.name2 = name2
self.link_address = link_address
super().__init__(name1, name2, link_address)
def __str__(self):
return f"Interfaces {self.name1!r} and {self.name2!r} have the same hardware link address {self.link_address!r}"
class InterfaceService(Service):
class Config:
namespace_alias = "interfaces"
@private
async def persist_link_addresses(self):
try:
if await self.middleware.call("failover.node") == "B":
local_key = "link_address_b"
remote_key = "link_address"
else:
local_key = "link_address"
remote_key = "link_address_b"
real_interfaces = RealInterfaceCollection(
await self.middleware.call("interface.query", INTERFACE_FILTERS),
)
real_interfaces_remote = None
if await self.middleware.call("failover.status") == "MASTER":
try:
real_interfaces_remote = RealInterfaceCollection(
await self.middleware.call("failover.call_remote", "interface.query", [INTERFACE_FILTERS]),
)
except Exception as e:
self.middleware.logger.warning(f"Exception while retrieving remote network interfaces: {e!r}")
db_interfaces = DatabaseInterfaceCollection(
await self.middleware.call("datastore.query", "network.interface_link_address"),
)
for real_interface in real_interfaces:
name = real_interfaces.get_name(real_interface)
await self.__handle_interface(db_interfaces, name, local_key,
real_interface["state"]["hardware_link_address"])
if real_interfaces_remote is not None:
real_interface_remote = real_interfaces_remote.by_name.get(name)
if real_interface_remote is None:
self.middleware.logger.warning(f"Interface {name!r} is only present on the local system")
else:
try:
remote_hardware_link_address = real_interface_remote["state"]["hardware_link_address"]
except KeyError:
pass
else:
await self.__handle_interface(db_interfaces, name, remote_key, remote_hardware_link_address)
except DuplicateHardwareInterfaceLinkAddresses as e:
self.middleware.logger.error(f"Not persisting network interfaces link addresses: {e}")
except Exception:
self.middleware.logger.error("Unhandled exception while persisting network interfaces link addresses",
exc_info=True)
async def __handle_interface(self, db_interfaces, name, key, link_address):
interface = db_interfaces.by_name.get(name)
if interface is None:
self.middleware.logger.debug(f"Creating interface {name!r} {key} = {link_address!r}")
interface = {
"interface": name,
"link_address": None,
"link_address_b": None,
key: link_address,
}
interface["id"] = await self.middleware.call("datastore.insert", "network.interface_link_address",
interface)
db_interfaces.interfaces.append(interface)
elif interface[key] != link_address:
self.middleware.logger.debug(f"Updating interface {name!r} {key} = {link_address!r}")
await self.middleware.call("datastore.update", "network.interface_link_address", interface["id"],
{key: link_address})
class InterfaceCollection:
def __init__(self, interfaces):
self.interfaces = interfaces
@property
def by_name(self):
return {self.get_name(i): i for i in self.interfaces}
def __iter__(self):
return iter(self.interfaces)
def get_name(self, i):
raise NotImplementedError
class DatabaseInterfaceCollection(InterfaceCollection):
def get_name(self, i):
return i["interface"]
class RealInterfaceCollection(InterfaceCollection):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.by_link_address = {}
for i in self.interfaces:
link_address = i["state"]["hardware_link_address"]
if link_address in self.by_link_address:
raise DuplicateHardwareInterfaceLinkAddresses(self.by_link_address[link_address]["name"], i["name"],
link_address)
self.by_link_address[link_address] = i
def get_name(self, i):
return i["name"]
class InterfaceRenamer:
def __init__(self, middleware):
self.middleware = middleware
self.mapping = {}
def rename(self, old_name, new_name):
self.middleware.logger.info("Renaming interface %r to %r", old_name, new_name)
self.mapping[old_name] = new_name
async def commit(self):
for interface in await self.middleware.call("datastore.query", "network.interface_link_address"):
if new_name := self.mapping.get(interface["interface"]):
self.middleware.logger.info("Renaming hardware interface %r to %r", interface["interface"], new_name)
try:
await self.middleware.call(
"datastore.update", "network.interface_link_address", interface["id"], {"interface": new_name},
{"ha_sync": False},
)
except IntegrityError:
self.middleware.logger.warning(
f"Already had configuration for hardware interface {new_name!r}, removing old entry"
)
await self.middleware.call(
"datastore.delete", "network.interface_link_address", interface["id"], {"ha_sync": False},
)
for interface in await self.middleware.call("datastore.query", "network.interfaces", [], {"prefix": "int_"}):
if new_name := self.mapping.get(interface["interface"]):
self.middleware.logger.info("Renaming interface configuration %r to %r", interface["interface"],
new_name)
try:
await self.middleware.call(
"datastore.update", "network.interfaces", interface["id"], {"interface": new_name},
{"prefix": "int_", "ha_sync": False},
)
except IntegrityError:
self.middleware.logger.warning(
f"Already had configuration for interface {new_name!r}, removing old entry"
)
await self.middleware.call(
"datastore.delete", "network.interfaces", interface["id"], {"ha_sync": False},
)
for bridge in await self.middleware.call("datastore.query", "network.bridge"):
updated = False
for i, member in enumerate(bridge["members"]):
if new_name := self.mapping.get(member):
self.middleware.logger.info("Changing bridge %r member %r to %r", bridge["id"], member, new_name)
bridge["members"][i] = new_name
updated = True
if updated:
await self.middleware.call(
"datastore.update", "network.bridge", bridge["id"], {"members": bridge["members"]},
{"ha_sync": False},
)
await self._commit_laggs()
for vlan in await self.middleware.call("datastore.query", "network.vlan"):
if new_name := self.mapping.get(vlan["vlan_pint"]):
self.middleware.logger.info("Changing VLAN %r parent NIC from %r to %r", vlan["vlan_vint"],
vlan["vlan_pint"], new_name)
await self.middleware.call(
"datastore.update", "network.vlan", vlan["id"], {"vlan_pint": new_name}, {"ha_sync": False},
)
for vm_device in await self.middleware.call("datastore.query", "vm.device", [["dtype", "=", "NIC"]]):
if new_name := self.mapping.get(vm_device["attributes"].get("nic_attach")):
self.middleware.logger.info("Changing VM NIC device %r from %r to %r", vm_device["id"],
vm_device["attributes"]["nic_attach"], new_name)
await self.middleware.call("datastore.update", "vm.device", vm_device["id"], {
"attributes": {**vm_device["attributes"], "nic_attach": new_name},
}, {"ha_sync": False})
async def _commit_laggs(self):
lagg_members = await self.middleware.call("datastore.query", "network.lagginterfacemembers", [],
{"prefix": "lagg_"})
lagg_members_changed = False
for lagg_member in lagg_members:
if new_name := self.mapping.get(lagg_member["physnic"]):
self.middleware.logger.info("Changing LAGG member %r physical NIC from %r to %r", lagg_member["id"],
lagg_member["physnic"], new_name)
lagg_member["physnic"] = new_name
lagg_member.pop("delete", None)
lagg_members_changed = True
for other_lagg_member in lagg_members:
if other_lagg_member["id"] != lagg_member["id"]:
if other_lagg_member["physnic"] == new_name:
other_lagg_member["delete"] = True
for lagg_member in lagg_members:
if lagg_member.get("delete"):
self.middleware.logger.info(
"Deleting LAGG member %r as it uses physical NIC which is now also used in another LAGG member",
lagg_member["physnic"],
)
if lagg_members_changed:
for lagg_member in lagg_members:
await self.middleware.call(
"datastore.delete", "network.lagginterfacemembers", lagg_member["id"], {"ha_sync": False},
)
for order, lagg_member in enumerate(lagg_members):
if "delete" not in lagg_member:
await self.middleware.call("datastore.insert", "network.lagginterfacemembers", {
"interfacegroup": lagg_member["interfacegroup"]["id"],
"physnic": lagg_member["physnic"],
"ordernum": order,
}, {"prefix": "lagg_", "ha_sync": False})
async def setup(middleware):
try:
interface_renamer = InterfaceRenamer(middleware)
if await middleware.call("failover.node") == "B":
link_address_key = "link_address_b"
else:
link_address_key = "link_address"
real_interfaces = RealInterfaceCollection(await middleware.call("interface.query", INTERFACE_FILTERS))
# Migrate BSD network interfaces to Linux
for db_interface in await middleware.call("datastore.query", "network.interfaces", [], {"prefix": "int_"}):
if m := RE_FREEBSD_BRIDGE.match(db_interface["interface"]):
interface_renamer.rename(db_interface["interface"], f"br{m.group(1)}")
if m := RE_FREEBSD_LAGG.match(db_interface["interface"]):
interface_renamer.rename(db_interface["interface"], f"bond{m.group(1)}")
db_interfaces = DatabaseInterfaceCollection(
await middleware.call("datastore.query", "network.interface_link_address"),
)
for db_interface in db_interfaces:
if db_interface[link_address_key] is not None:
real_interface_by_link_address = real_interfaces.by_link_address.get(db_interface[link_address_key])
if real_interface_by_link_address is None:
middleware.logger.warning(
"Interface with link address %r does not exist anymore (its name was %r)",
db_interface[link_address_key], db_interface["interface"],
)
continue
if real_interface_by_link_address["name"] == db_interface["interface"]:
continue
middleware.logger.info(
"Interface %r is now %r (matched by link address %r)",
db_interface["interface"], real_interface_by_link_address["name"], db_interface[link_address_key],
)
interface_renamer.rename(db_interface["interface"], real_interface_by_link_address["name"])
await interface_renamer.commit()
except DuplicateHardwareInterfaceLinkAddresses as e:
middleware.logger.error(f"Not migrating network interfaces: {e}")
except Exception:
middleware.logger.error("Unhandled exception while migrating network interfaces", exc_info=True)
await middleware.call("interface.persist_link_addresses")
| 13,801 | Python | .py | 240 | 41.7375 | 120 | 0.568478 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,645 | bits.py | truenas_middleware/src/middlewared/middlewared/plugins/interface/netif_linux/bits.py | from enum import IntEnum
__all__ = ["InterfaceFlags", "InterfaceV6Flags", "InterfaceLinkState", "NeighborDiscoveryFlags"]
class InterfaceFlags(IntEnum):
# include/uapi/linux/if.h
UP = 1 << 0 # sysfs
BROADCAST = 1 << 1 # volatile
DEBUG = 1 << 2 # sysfs
LOOPBACK = 1 << 3 # volatile
POINTOPOINT = 1 << 4 # volatile
NOTRAILERS = 1 << 5 # sysfs
RUNNING = 1 << 6 # volatile
NOARP = 1 << 7 # sysfs
PROMISC = 1 << 8 # sysfs
ALLMULTI = 1 << 9 # sysfs
MASTER = 1 << 10 # volatile
SLAVE = 1 << 11 # volatile
MULTICAST = 1 << 12 # sysfs
PORTSEL = 1 << 13 # sysfs
AUTOMEDIA = 1 << 14 # sysfs
DYNAMIC = 1 << 15 # sysfs
LOWER_UP = 1 << 16
DORMANT = 1 << 17
ECHO = 1 << 18
class InterfaceV6Flags(IntEnum):
# include/uapi/linux/if_addr.h
TEMPORARY = 0x01
NODAD = 0x02
OPTIMISTIC = 0x04
DADFAILED = 0x08
HOMEADDRESS = 0x10
DEPRECATED = 0x20
TENTATIVE = 0x40
PERMANENT = 0x80
MANAGETEMPADDR = 0x100
NOPREFIXROUTE = 0x200
MCAUTOJOIN = 0x400
STABLE_PRIVACY = 0x800
class InterfaceLinkState(IntEnum):
LINK_STATE_UNKNOWN = 0
LINK_STATE_DOWN = 1
LINK_STATE_UP = 2
class NeighborDiscoveryFlags(IntEnum):
PERFORMNUD = 0
ACCEPT_RTADV = 0
PREFER_SOURCE = 0
IFDISABLED = 0
DONT_SET_IFROUTE = 0
AUTO_LINKLOCAL = 0
NO_RADR = 0
NO_PREFER_IFACE = 0
| 1,418 | Python | .py | 50 | 23.66 | 96 | 0.622517 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,646 | bridge.py | truenas_middleware/src/middlewared/middlewared/plugins/interface/netif_linux/bridge.py | import json
import logging
from pyroute2 import NDB
from .utils import run
logger = logging.getLogger(__name__)
__all__ = ["create_bridge", "BridgeMixin"]
def create_bridge(name):
with NDB(log="off") as ndb:
ndb.interfaces.create(ifname=name, kind="bridge").set("state", "up").commit()
class BridgeMixin:
def add_member(self, name):
run(["ip", "link", "set", name, "master", self.name])
def set_learning(self, name, enable):
run(["bridge", "link", "set", "dev", name, "learning", "on" if enable else "off"])
def delete_member(self, name):
run(["ip", "link", "set", name, "nomaster"])
@property
def members(self):
return [
link["ifname"]
for link in json.loads(run(["bridge", "-json", "link"]).stdout)
if link.get("master") == self.name
]
@property
def stp(self):
with NDB(log="off") as ndb:
with ndb.interfaces[self.name] as br:
return bool(br['br_stp_state'])
def toggle_stp(self, name, value):
# 0 is off > 0 is on
with NDB(log="off") as ndb:
with ndb.interfaces[name] as br:
br['br_stp_state'] = value
| 1,217 | Python | .py | 33 | 29.454545 | 90 | 0.580205 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,647 | vlan.py | truenas_middleware/src/middlewared/middlewared/plugins/interface/netif_linux/vlan.py | import glob
import logging
import os
import re
from pyroute2 import NDB
from .utils import run
logger = logging.getLogger(__name__)
__all__ = ["create_vlan", "VlanMixin"]
def create_vlan(name, parent, tag):
with NDB(log="off") as ndb:
ndb.interfaces[parent].set("state", "up").commit() # make sure parent is up
ndb.interfaces.create(ifname=name, link=parent, vlan_id=tag, kind="vlan").set("state", "up").commit()
class VlanMixin:
@property
def parent(self):
return os.path.basename(os.readlink(glob.glob(f"/sys/devices/virtual/net/{self.name}/lower_*")[0]))
@property
def tag(self):
with open(f"/proc/net/vlan/{self.name}") as f:
return int(re.search(r"VID: ([0-9]+)", f.read()).group(1))
@property
def pcp(self):
return None
def configure(self, parent, tag, pcp):
create_vlan(self.name, parent, tag)
def unconfigure(self):
run(["ip", "link", "delete", self.name])
| 981 | Python | .py | 27 | 31.074074 | 109 | 0.646872 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,648 | interface.py | truenas_middleware/src/middlewared/middlewared/plugins/interface/netif_linux/interface.py | from pyroute2 import NDB
from .address import AddressMixin
from .bridge import BridgeMixin
from .bits import InterfaceFlags, InterfaceV6Flags
from .lagg import LaggMixin
from .utils import bitmask_to_set, INTERNAL_INTERFACES
from .vlan import VlanMixin
from .vrrp import VrrpMixin
from .ethernet_settings import EthernetHardwareSettings
__all__ = ["Interface", "CLONED_PREFIXES"]
# Keep this as an immutable type since this
# is used all over the place, and we don't want
# the contents to change
CLONED_PREFIXES = ("br", "vlan", "bond")
class Interface(AddressMixin, BridgeMixin, LaggMixin, VlanMixin, VrrpMixin):
def __init__(self, dev):
self.name = dev.get_attr('IFLA_IFNAME')
self._mtu = dev.get_attr('IFLA_MTU') or 0
self._flags = dev['flags'] or 0
self._nd6_flags = dev.get_attr('IFLA_AF_SPEC').get_attr('AF_INET6').get_attr('IFLA_INET6_FLAGS') or 0
self._link_state = f'LINK_STATE_{dev.get_attr("IFLA_OPERSTATE")}'
self._link_address = dev.get_attr('IFLA_ADDRESS')
self._permanent_link_address = dev.get_attr('IFLA_PERM_ADDRESS')
self._cloned = any((
(self.name.startswith(CLONED_PREFIXES)),
(self.name.startswith(INTERNAL_INTERFACES))
))
self._rxq = dev.get_attr('IFLA_NUM_RX_QUEUES') or 1
self._txq = dev.get_attr('IFLA_NUM_TX_QUEUES') or 1
self._bus = dev.get_attr('IFLA_PARENT_DEV_BUS_NAME')
def _read(self, name, type_=str):
return self._sysfs_read(f"/sys/class/net/{self.name}/{name}", type_)
def _sysfs_read(self, path, type_=str):
with open(path, "r") as f:
value = f.read().strip()
return type_(value)
@property
def bus(self):
return self._bus
@property
def orig_name(self):
return self.name
@property
def description(self):
return self.name
@description.setter
def description(self, value):
pass
@property
def mtu(self):
return self._mtu
@mtu.setter
def mtu(self, value):
with NDB(log='off') as ndb:
with ndb.interfaces[self.orig_name] as dev:
dev['mtu'] = value
# NDB() synchronizes state but the instantiation
# of this class won't reflect the changed MTU
# unless a new instance is created. This is a
# cheap way of updating the "state".
self._mtu = value
@property
def cloned(self):
return self._cloned
@property
def flags(self):
return bitmask_to_set(self._flags, InterfaceFlags)
@property
def nd6_flags(self):
return bitmask_to_set(self._nd6_flags, InterfaceV6Flags)
@property
def link_state(self):
return self._link_state
@property
def link_address(self):
return self._link_address
@property
def permanent_link_address(self):
return self._permanent_link_address
@property
def rx_queues(self):
return self._rxq
@property
def tx_queues(self):
return self._txq
def asdict(self, address_stats=False, vrrp_config=None):
state = {
'name': self.name,
'orig_name': self.orig_name,
'description': self.description,
'mtu': self.mtu,
'cloned': self.cloned,
'flags': [i.name for i in self.flags],
'nd6_flags': [i.name for i in self.nd6_flags],
'capabilities': [],
'link_state': self.link_state,
'media_type': '',
'media_subtype': '',
'active_media_type': '',
'active_media_subtype': '',
'supported_media': [],
'media_options': None,
'link_address': self.link_address or '',
'permanent_link_address': self.permanent_link_address,
'hardware_link_address': self.permanent_link_address or self.link_address or '',
'aliases': [i.asdict(stats=address_stats) for i in self.addresses],
'vrrp_config': vrrp_config,
'rx_queues': self.rx_queues,
'tx_queues': self.tx_queues,
}
with EthernetHardwareSettings(self.name) as dev:
state.update({
'capabilities': dev.enabled_capabilities,
'supported_media': dev.supported_media,
'media_type': dev.media_type,
'media_subtype': dev.media_subtype,
'active_media_type': dev.active_media_type,
'active_media_subtype': dev.active_media_subtype,
})
if self.name.startswith('bond'):
state.update({
'protocol': self.protocol.name if self.protocol is not None else self.protocol,
'ports': [{'name': p, 'flags': [x.name for x in f]} for p, f in self.ports],
'xmit_hash_policy': self.xmit_hash_policy,
'lacpdu_rate': self.lacpdu_rate,
})
if self.name.startswith('vlan'):
state.update({
'parent': self.parent,
'tag': self.tag,
'pcp': self.pcp,
})
return state
def up(self):
with NDB(log='off') as ndb:
with ndb.interfaces[self.name] as dev:
# this context manager waits until the interface
# is up and "ready" before exiting
dev['state'] = 'up'
def down(self):
with NDB(log='off') as ndb:
with ndb.interfaces[self.name] as dev:
dev['state'] = 'down'
| 5,580 | Python | .py | 143 | 29.643357 | 109 | 0.589236 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,649 | lagg.py | truenas_middleware/src/middlewared/middlewared/plugins/interface/netif_linux/lagg.py | import enum
import logging
import pathlib
from pyroute2 import NDB
from .utils import run
logger = logging.getLogger(__name__)
__all__ = ["AggregationProtocol", "create_lagg"]
class AggregationProtocol(enum.Enum):
LACP = "802.3ad"
FAILOVER = "active-backup"
LOADBALANCE = "balance-xor"
def create_lagg(name):
with NDB(log="off") as ndb:
ndb.interfaces.create(ifname=name, kind="bond").set("state", "up").commit()
class LaggMixin:
@property
def protocol(self):
value = self._sysfs_read(f"/sys/devices/virtual/net/{self.name}/bonding/mode").split()[0]
for protocol in AggregationProtocol:
if protocol.value == value:
return protocol
@protocol.setter
def protocol(self, value):
for port in self.ports:
self.delete_port(port[0])
run(["ip", "link", "set", self.name, "type", "bond", "mode", value.value])
@property
def xmit_hash_policy(self):
if self.protocol in (AggregationProtocol.LACP, AggregationProtocol.LOADBALANCE):
# this option only applies to 802.3ad and/or balance-xor
return self._sysfs_read(self.get_options_path("xmit_hash_policy")).split()[0]
@xmit_hash_policy.setter
def xmit_hash_policy(self, value):
run(["ip", "link", "set", self.name, "type", "bond", "xmit_hash_policy", value])
@property
def lacpdu_rate(self):
if self.protocol == AggregationProtocol.LACP:
# this option only applies to 802.3ad
return self._sysfs_read(self.get_options_path("lacp_rate")).split()[0]
@lacpdu_rate.setter
def lacpdu_rate(self, value):
run(["ip", "link", "set", self.name, "type", "bond", "lacp_rate", value])
@property
def primary_interface(self):
if self.protocol == AggregationProtocol.FAILOVER:
return self._sysfs_read(self.get_options_path("primary")).strip() or None
@primary_interface.setter
def primary_interface(self, value):
run(["ip", "link", "set", self.name, "type", "bond", "primary", value])
@property
def ports(self):
ports = []
for port in self._sysfs_read(f"/sys/devices/virtual/net/{self.name}/bonding/slaves").split():
ports.append((port, set()))
return ports
def get_options_path(self, value):
return str(pathlib.Path(f"/sys/class/net/{self.name}/bonding/").joinpath(value))
def add_port(self, member_port):
self.add_ports([member_port])
def add_ports(self, member_ports):
with NDB(log='off') as ndb:
for member in member_ports:
try:
with ndb.interfaces[member] as mp:
if mp['state'] == 'up':
# caller of this method will up() the interfaces after
# the parent bond interface has been fully configured
mp['state'] = 'down'
except KeyError:
# interface was added to bond but maybe it no longer exists,
# for example, after a reboot
logger.warning('Failed adding %r to %r. Interface not found', member, self.name)
continue
else:
with ndb.interfaces[self.name] as bond:
try:
bond.add_port(member)
except Exception:
logger.warning('Failed adding %r to %r', member, self.name, exc_inf=True)
def delete_port(self, member_port):
return self.delete_ports([member_port])
def delete_ports(self, member_ports):
with NDB(log='off') as ndb:
for to_delete in member_ports:
if not ndb.interfaces.get(to_delete):
logger.warning('Failed removing %r from %r. Interface not found', to_delete, self.name)
else:
try:
with ndb.interfaces[self.name] as bond:
bond.del_port(to_delete)
except Exception:
logger.warning('Failed removing %r from %r.', to_delete, self.name, exc_info=True)
continue
| 4,280 | Python | .py | 93 | 34.365591 | 107 | 0.579011 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,650 | utils.py | truenas_middleware/src/middlewared/middlewared/plugins/interface/netif_linux/utils.py | import subprocess
__all__ = ["bitmask_to_set", "INTERNAL_INTERFACES", "run"]
# Keep this as an immutable type since this
# is used all over the place and we don't want
# the contents to change
INTERNAL_INTERFACES = (
"wg", "lo", "tun", "tap", "docker", "veth", "vnet", "macvtap", "ix", "tailscale",
)
def bitmask_to_set(n, enumeration):
return {e for e in enumeration if n & e.value}
def run(*args, **kwargs):
kwargs.setdefault("check", True)
kwargs.setdefault("stdout", subprocess.PIPE)
kwargs.setdefault("stderr", subprocess.PIPE)
kwargs.setdefault("encoding", "utf-8")
kwargs.setdefault("errors", "ignore")
return subprocess.run(*args, **kwargs)
| 688 | Python | .py | 17 | 37.176471 | 85 | 0.688253 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,651 | routing.py | truenas_middleware/src/middlewared/middlewared/plugins/interface/netif_linux/routing.py | # -*- coding=utf-8 -*-
import collections
import enum
import ipaddress
import logging
import os
import socket
import bidict
from pyroute2 import IPRoute
from pyroute2.netlink.exceptions import NetlinkDumpInterrupted
from .address.ipv6 import ipv6_netmask_to_prefixlen
from .address.types import AddressFamily
logger = logging.getLogger(__name__)
__all__ = ["Route", "RouteFlags", "RoutingTable", "RouteTable", "IPRoute", "RuleTable"]
DEFAULT_TABLE_ID = 254 # This is the default table named as "main" and most of what we do happens here
ip = IPRoute()
class Route:
def __init__(
self, network, netmask, gateway=None, interface=None, flags=None,
table_id=None, preferred_source=None, scope=None,
):
self.network = ipaddress.ip_address(network)
self.netmask = ipaddress.ip_address(netmask)
self.gateway = ipaddress.ip_address(gateway) if gateway else None
self.interface = interface or None
self.flags = flags or set()
self.table_id = table_id
self.scope = scope
self.preferred_source = preferred_source
def asdict(self):
return {
'network': str(self.network),
'netmask': str(self.netmask),
'gateway': str(self.gateway) if self.gateway else None,
'interface': self.interface,
'flags': [x.name for x in self.flags],
'table_id': self.table_id,
'scope': self.scope,
'preferred_source': self.preferred_source,
}
@property
def af(self):
if self.network.version == 4:
return AddressFamily.INET
if self.network.version == 6:
return AddressFamily.INET6
return None
def __eq__(self, other):
if not isinstance(other, Route):
return False
return (
self.network == other.network and
self.netmask == other.netmask and
self.gateway == other.gateway
)
def __hash__(self):
return hash((self.network, self.netmask, self.gateway))
class RouteTable:
def __init__(self, table_id, table_name):
self.table_id = table_id
self.table_name = table_name
def create(self):
with open("/etc/iproute2/rt_tables", "a+") as f:
f.write(f'{self.table_id} {self.table_name}\n')
@property
def exists(self):
return self.table_name in RoutingTable().routing_tables
@property
def is_reserved(self):
return self.table_id in (255, 254, 253, 0)
@property
def routes(self):
return RoutingTable().routes_internal(self.table_id)
def flush_routes(self):
ip.flush_routes(table=self.table_id)
def flush_rules(self):
ip.flush_rules(table=self.table_id)
def __eq__(self, other):
return self.table_id == other.table_id
def asdict(self):
return {
"id": self.table_id,
"name": self.table_name,
"routes": [r.asdict() for r in self.routes],
}
class RouteFlags(enum.IntEnum):
# include/uapi/linux/route.h
UP = 0x0001
GATEWAY = 0x0002
HOST = 0x0004
REJECT = 0x0200
DYNAMIC = 0x0010
MODIFIED = 0x0020
# DONE = defs.RTF_DONE
# XRESOLVE = defs.RTF_XRESOLVE
# LLINFO = defs.RTF_LLINFO
# LLDATA = defs.RTF_LLDATA
STATIC = 0x8000 # no-op
# BLACKHOLE = defs.RTF_BLACKHOLE
# PROTO1 = defs.RTF_PROTO1
# PROTO2 = defs.RTF_PROTO2
# PROTO3 = defs.RTF_PROTO3
# PINNED = defs.RTF_PINNED
# LOCAL = defs.RTF_LOCAL
# BROADCAST = defs.RTF_BROADCAST
# MULTICAST = defs.RTF_MULTICAST
# STICKY = defs.RTF_STICKY
RTM_F_CLONED = 0x200
class RoutingTable:
@property
def routes(self):
return self.routes_internal()
def routes_internal(self, table_filter=None):
interfaces = self._interfaces()
result = []
for r in ip.get_routes(table=table_filter):
if r["flags"] & RTM_F_CLONED:
continue
attrs = dict(r["attrs"])
if "RTA_DST" in attrs:
network = ipaddress.ip_address(attrs["RTA_DST"])
netmask = ipaddress.ip_network(f"{attrs['RTA_DST']}/{r['dst_len']}").netmask
else:
network, netmask = {
socket.AF_INET: (ipaddress.IPv4Address(0), ipaddress.IPv4Address(0)),
socket.AF_INET6: (ipaddress.IPv6Address(0), ipaddress.IPv6Address(0)),
}[r["family"]]
result.append(Route(
network,
netmask,
ipaddress.ip_address(attrs["RTA_GATEWAY"]) if "RTA_GATEWAY" in attrs else None,
interfaces[attrs["RTA_OIF"]] if "RTA_OIF" in attrs and attrs["RTA_OIF"] in interfaces else None,
table_id=attrs["RTA_TABLE"],
preferred_source=attrs.get("RTA_PREFSRC"),
scope=r["scope"],
))
return result
@property
def routing_tables(self):
if not os.path.exists("/etc/iproute2/rt_tables"):
return {}
with open("/etc/iproute2/rt_tables", "r") as f:
return {
t["name"]: RouteTable(t["id"], t["name"])
for t in map(lambda v: {"id": int(v.split()[0].strip()), "name": v.split()[1].strip()}, filter(
lambda v: v.strip() and not v.startswith("#") and v.split()[0].strip().isdigit(),
f.readlines()
))
}
@property
def default_route_ipv4(self):
f = list(filter(lambda r: int(r.network) == 0 and int(r.netmask) == 0 and r.af == AddressFamily.INET,
self.routes_internal(DEFAULT_TABLE_ID)))
return f[0] if len(f) > 0 else None
@property
def default_route_ipv6(self):
f = list(filter(lambda r: int(r.network) == 0 and int(r.netmask) == 0 and r.af == AddressFamily.INET6,
self.routes_internal(DEFAULT_TABLE_ID)))
return f[0] if len(f) > 0 else None
def add(self, route):
self._op("add", route)
def change(self, route):
self._op("set", route)
def delete(self, route):
self._op("delete", route)
def _interfaces(self):
return bidict.bidict({i["index"]: dict(i["attrs"]).get("IFLA_IFNAME") for i in self._ip_links()})
def _ip_links(self):
retries = 5
while True:
try:
return ip.get_links()
except NetlinkDumpInterrupted:
retries -= 1
if retries <= 0:
raise
def _op(self, op, route):
if route.netmask.version == 4:
prefixlen = ipaddress.ip_network(f"{route.network}/{route.netmask}").prefixlen
elif route.netmask.version == 6:
prefixlen = ipv6_netmask_to_prefixlen(str(route.netmask))
else:
raise RuntimeError()
kwargs = dict(dst=f"{route.network}/{prefixlen}", gateway=str(route.gateway) if route.gateway else None)
for key, value in map(
lambda v: [v[0], v[1]() if isinstance(v[1], collections.abc.Callable) else v[1]],
filter(
lambda v: v[2] if len(v) == 3 else v[1], (
("oif", lambda: self._interfaces().inv[route.interface], route.interface is not None),
("table", route.table_id),
("scope", route.scope),
("prefsrc", route.preferred_source),
)
)
):
kwargs[key] = value
ip.route(op, **kwargs)
class RuleTable:
@property
def rules(self):
rules = []
tables = {t.table_id: t for t in RoutingTable().routing_tables.values()}
for rule in filter(lambda r: r.get('attrs'), ip.get_rules()):
attrs = dict(rule['attrs'])
if not all(k in attrs for k in ('FRA_TABLE', 'FRA_PRIORITY')) or attrs.get('FRA_TABLE') not in tables:
continue
rules.append({
'table': tables[attrs['FRA_TABLE']],
'priority': attrs['FRA_PRIORITY'],
'source_addr': attrs.get('FRA_SRC'),
})
return rules
def add_rule(self, table_id, priority, source_addr=None):
kwargs = {'table': table_id, 'priority': priority}
if source_addr:
kwargs['src'] = source_addr
ip.rule('add', **kwargs)
def delete_rule(self, priority):
ip.rule('delete', priority=priority)
def rule_exists(self, priority):
return any(priority == rule['priority'] for rule in self.rules)
| 8,680 | Python | .py | 221 | 29.656109 | 114 | 0.577363 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,652 | netif.py | truenas_middleware/src/middlewared/middlewared/plugins/interface/netif_linux/netif.py | import logging
from pyroute2 import IPRoute, NetlinkDumpInterrupted
from .bridge import create_bridge
from .interface import Interface, CLONED_PREFIXES
from .lagg import AggregationProtocol, create_lagg
from .utils import run
from .vlan import create_vlan
logger = logging.getLogger(__name__)
__all__ = ["AggregationProtocol", "create_vlan", "create_interface", "destroy_interface", "get_interface",
"list_interfaces", "CLONED_PREFIXES"]
def create_interface(name):
if name.startswith("br"):
create_bridge(name)
return name
if name.startswith("bond"):
create_lagg(name)
return name
raise ValueError(f"Invalid interface name: {name!r}")
def destroy_interface(name):
if name.startswith(("bond", "br", "vlan")):
run(["ip", "link", "delete", name])
else:
run(["ip", "link", "set", name, "down"])
def get_interface(name, safe_retrieval=False):
ifaces = list_interfaces()
return ifaces.get(name) if safe_retrieval else ifaces[name]
def list_interfaces():
max_retries = 3
for attempt in range(1, max_retries + 1):
try:
with IPRoute() as ipr:
return {dev.get_attr('IFLA_IFNAME'): Interface(dev) for dev in ipr.get_links()}
except NetlinkDumpInterrupted:
if attempt < max_retries:
# When the kernel is producing a dump of a kernel structure
# over multiple netlink messages, and the structure changes
# mid-way, NLM_F_DUMP_INTR is added to the header flags.
# This an indication that the requested dump contains
# inconsistent data and must be re-requested. See function
# nl_dump_check_consistent() in include/net/netlink.h. The
# pyroute2 library raises this specific exception for this
# scenario so we'll try again (up to a max of 3 times).
continue
else:
raise
| 1,993 | Python | .py | 45 | 35.822222 | 106 | 0.644961 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,653 | __init__.py | truenas_middleware/src/middlewared/middlewared/plugins/interface/netif_linux/__init__.py | from .address import * # noqa
from .bits import * # noqa
from .interface import * # noqa
from .netif import * # noqa
from .routing import * # noqa
from .utils import * # noqa
| 181 | Python | .py | 6 | 29.166667 | 32 | 0.691429 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,654 | ethernet_settings.py | truenas_middleware/src/middlewared/middlewared/plugins/interface/netif_linux/ethernet_settings.py | from logging import getLogger
from pyroute2.ethtool import Ethtool
from pyroute2.ethtool.ioctl import NotSupportedError, NoSuchDevice
logger = getLogger(__name__)
class EthernetHardwareSettings:
def __init__(self, interface):
self._name = interface
self._eth = Ethtool()
self._caps = self.__capabilities__()
self._media = self.__mediainfo__()
def __capabilities__(self):
result = {'enabled': [], 'disabled': [], 'supported': []}
try:
for i in self._eth.get_features(self._name):
for name, feature in i.items():
if not name.strip() or not feature.available:
# testing shows that there are features
# without a name so make sure we ignore
# those as well as ignore the feature if
# it's not "available" to be changed
continue
if feature.enable:
result['enabled'].append(name)
else:
result['disabled'].append(name)
result['supported'].append(name)
except Exception:
logger.error('Failed to get capabilities for %s', self._name, exc_info=True)
return result
def __set_features__(self, action, capabilities):
features = []
for cap in capabilities:
if action == 'enable' and cap in self.disabled_capabilities:
# means the feature(s) being requested to be enabled is currently disabled
features.append(cap)
elif action == 'disable' and cap in self.enabled_capabilities:
# means the feature(s) being requested to be disabled is currently enabled
features.append(cap)
if features:
changed_features = self._eth.get_features(self._name)
set_features = False
for feature in features:
try:
changed_features.features[feature].enable = True if action == 'enable' else False
set_features = True
except KeyError:
logger.error('Feature "%s" not found on interface "%s"', feature, self._name)
continue
if set_features:
# actually send the request to the kernel to enable/disable the feature(s)
self._eth.set_features(self._name, changed_features)
@property
def enabled_capabilities(self):
return self._caps['enabled']
@enabled_capabilities.setter
def enabled_capabilities(self, capabilities):
self.__set_features__('enable', capabilities)
@property
def disabled_capabilities(self):
return self._caps['disabled']
@disabled_capabilities.setter
def disabled_capabilities(self, capabilities):
self.__set_features__('disable', capabilities)
@property
def supported_capabilities(self):
return self._caps['supported']
def __mediainfo__(self):
result = {
'media_type': '',
'media_subtype': '',
'active_media_type': '',
'active_media_subtype': '',
'supported_media': [],
}
# We use the undocumented `with_netlink=False` kwargs because
# it was noticed that we were getting log spam on machines in
# the lab.
# Log message looks like:
# "pyroute2.ethtool.ethtool.from_netlink():224 - Bit name is not the same as the target: FEC_NONE <> None"
# This looks like a bug upstream but the keyword arg works around
# the problem.
try:
attrs = self._eth.get_link_mode(self._name, with_netlink=False)
mst = 'Unknown'
if attrs.speed is not None:
# looks like 1000Mb/s, 10000Mb/s, etc
mst = f'{attrs.speed}Mb/s'
# looks like ("Unknown Twisted Pair" OR "1000Mb/s Twisted Pair" etc
mst = f'{mst} {self._eth.get_link_info(self._name, with_netlink=False).port}'
# fill out the results
result['media_type'] = 'Ethernet'
result['media_subtype'] = 'autoselect' if attrs.autoneg else mst
result['active_media_type'] = 'Ethernet'
result['active_media_subtype'] = mst # just matches media_subtype...gross
result['supported_media'].extend(attrs.supported_modes)
except (NotSupportedError, NoSuchDevice):
# NotSupportedError:
# ----saw this on a VM running inside xen where the
# ----nic driver being used doesnt report any type
# ----of media info (ethtool binary didnt report anything either)
# ----so ignore these errors
# NoSuchDevice:
# ----udevd will rename interfaces from "old" names (eth0) to new names (enp5s0)
# ----[2.283069] r8169 0000:05:00.0 enp5s0: renamed from eth0 (this is from dmesg)
# ----For whatever, reason, ethtool will barf and say "No Such Device" even though
# ----it clearly exists. By the time this method is called, the device exists but
# ----we're failing because of a driver problem and/or because the device has been
# ----renamed. Instead of spamming logs, just pass and return empty information
# ----The situation I saw on real hardware is a Realtek card using the 2.5Gbps driver
# ----[4205.447000] RTL8226 2.5Gbps PHY r8169-500:00: attached PHY driver
# ----[RTL8226 2.5Gbps PHY] (mii_bus:phy_addr=r8169-500:00, irq=IGNORE)
pass
except Exception:
logger.error('Failed to get media info for %s', self._name, exc_info=True)
return result
@property
def media_type(self):
return self._media['media_type']
@property
def media_subtype(self):
return self._media['media_subtype']
@property
def active_media_type(self):
return self._media['active_media_type']
@property
def active_media_subtype(self):
return self._media['active_media_subtype']
@property
def supported_media(self):
return self._media['supported_media']
def close(self):
self._eth.close()
def __enter__(self):
return self
def __exit__(self, typ, value, traceback):
self.close()
| 6,456 | Python | .py | 136 | 35.941176 | 114 | 0.591285 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,655 | vrrp.py | truenas_middleware/src/middlewared/middlewared/plugins/interface/netif_linux/vrrp.py | # -*- coding=utf-8 -*-
__all__ = ['VrrpMixin']
class VrrpMixin:
@property
def vrrp_config(self):
try:
return self.data
except AttributeError:
# Interface class doesn't initialize
# this class on inheritance so return None
# until this attribute is explicitly configured
return None
@vrrp_config.setter
def vrrp_config(self, data):
"""
Keepalived (VRRP daemon) on SCALE actually adds/deletes
the VIP from the interface as needed depending on whether
or not the controller is in the MASTER or BACKUP state.
So this function simply gets set to whatever is sent to it
in the `interface/configure.py` plugin. This mixin is used
so that interfaces on SCALE can have a "vrrp_config" property
to match as close as possible to how freeBSD and CARP works.
"""
self.data = data
| 946 | Python | .py | 24 | 30.958333 | 69 | 0.643716 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,656 | mixin.py | truenas_middleware/src/middlewared/middlewared/plugins/interface/netif_linux/address/mixin.py | import ipaddress
import time
from pyroute2 import IPRoute
from pyroute2.netlink.exceptions import NetlinkDumpInterrupted
from middlewared.plugins.interface.netif_linux.utils import run
from .ipv6 import ipv6_netmask_to_prefixlen
from .types import AddressFamily, InterfaceAddress, LinkAddress
__all__ = ["AddressMixin"]
class AddressMixin:
def add_address(self, address):
self._address_op("add", address)
def flush(self):
# Remove all configured ip addresses
run(['ip', 'addr', 'flush', 'dev', self.name, 'scope', 'global'])
def remove_address(self, address):
self._address_op("del", address)
def replace_address(self, address):
self._address_op("replace", address)
def _address_op(self, op, address):
if isinstance(address.address, LinkAddress):
return
netmask = str(address.netmask)
if isinstance(address.address, ipaddress.IPv6Address):
netmask = ipv6_netmask_to_prefixlen(netmask)
cmd = ["ip", "addr", op, f"{address.address}/{netmask}"]
if op == 'add':
# make sure we tell linux to assign proper broadcast address
# when adding an IPv4 address to an interface
# (doesn't apply to IPv6)
cmd.extend(["brd", "+"]) if ':' not in f'{address.address}' else None
cmd.extend(["dev", self.name])
run(cmd)
def _get_addresses(self):
addresses = []
with IPRoute(strict_check=True) as ipr:
# strict_check forces kernel to do the filtering increasing performance
if index := (ipr.link_lookup(ifname=self.name) or [None])[0]:
# The kernel doesn't return IFA_LABEL for IPv6 addresses so we have
# to lookup the index for a given interface
for addr in ipr.addr('dump', index=index):
if addr['family'] == AddressFamily.INET.value:
addresses.append(InterfaceAddress(
AddressFamily.INET,
ipaddress.IPv4Interface(f'{addr.get_attr("IFA_ADDRESS")}/{addr["prefixlen"]}'),
))
elif addr['family'] == AddressFamily.INET6.value:
addresses.append(InterfaceAddress(
AddressFamily.INET6,
ipaddress.IPv6Interface(f'{addr.get_attr("IFA_ADDRESS")}/{addr["prefixlen"]}'),
))
for mac in ipr.link('dump', index=index):
if mac_addr := mac.get_attr('IFLA_ADDRESS'):
addresses.append(InterfaceAddress(AddressFamily.LINK, LinkAddress(self.name, mac_addr)))
return addresses
@property
def addresses(self):
retries = 5
while True:
try:
return self._get_addresses()
except NetlinkDumpInterrupted:
# low-grade hardware can produce this which
# isn't necessarily fatal and the request
# should be retried
retries -= 1
if retries == 0:
raise
time.sleep(0.2)
| 3,209 | Python | .py | 68 | 34.529412 | 112 | 0.581626 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,657 | __init__.py | truenas_middleware/src/middlewared/middlewared/plugins/interface/netif_linux/address/__init__.py | from .mixin import * # noqa
from .types import * # noqa
| 58 | Python | .py | 2 | 28 | 28 | 0.678571 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,658 | ipv6.py | truenas_middleware/src/middlewared/middlewared/plugins/interface/netif_linux/address/ipv6.py | # -*- coding=utf-8 -*-
import ipaddress
import logging
logger = logging.getLogger(__name__)
__all__ = ["ipv6_netmask_to_prefixlen"]
def ipv6_netmask_to_prefixlen(netmask):
bits = bin(ipaddress.IPv6Address._ip_int_from_string(netmask))[2:].rstrip("0")
if not all(c == "1" for c in bits):
raise ValueError("Invalid IPv6 netmask %r", netmask)
return len(bits)
| 382 | Python | .py | 10 | 34.7 | 82 | 0.686649 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,659 | types.py | truenas_middleware/src/middlewared/middlewared/plugins/interface/netif_linux/address/types.py | # -*- coding=utf-8 -*-
import enum
import ipaddress
import logging
logger = logging.getLogger(__name__)
__all__ = ['AddressFamily', 'LinkAddress', 'InterfaceAddress']
class AddressFamily(enum.IntEnum):
UNIX = 1
INET = 2
# IMPLINK = defs.AF_IMPLINK
# PUP = defs.AF_PUP
# CHAOS = defs.AF_CHAOS
# NETBIOS = defs.AF_NETBIOS
# ISO = defs.AF_ISO
# OSI = defs.AF_OSI
# ECMA = defs.AF_ECMA
# DATAKIT = defs.AF_DATAKIT
# CCITT = defs.AF_CCITT
# SNA = defs.AF_SNA
# DECnet = defs.AF_DECnet
# DLI = defs.AF_DLI
# LAT = defs.AF_LAT
# HYLINK = defs.AF_HYLINK
# APPLETALK = defs.AF_APPLETALK
# ROUTE = defs.AF_ROUTE
LINK = 17
# COIP = defs.AF_COIP
# CNT = defs.AF_CNT
# IPX = defs.AF_IPX
# SIP = defs.AF_SIP
# ISDN = defs.AF_ISDN
# E164 = defs.AF_E164
INET6 = 10
# NATM = defs.AF_NATM
# ATM = defs.AF_ATM
# NETGRAPH = defs.AF_NETGRAPH
# SLOW = defs.AF_SLOW
# SCLUSTER = defs.AF_SCLUSTER
# ARP = defs.AF_ARP
# BLUETOOTH = defs.AF_BLUETOOTH
# IEEE80211 = defs.AF_IEEE80211
# INET_SDP = defs.AF_INET_SDP
# INET6_SDP = defs.AF_INET6_SDP
class LinkAddress(object):
def __init__(self, ifname=None, address=None):
self.ifname = ifname
self.address = address
def __str__(self):
return self.address
def asdict(self):
return {
'ifname': self.ifname,
'address': self.address
}
def __hash__(self):
return hash((self.ifname, self.address))
def __eq__(self, other):
return \
self.ifname == other.ifname and \
self.address == other.address
def __ne__(self, other):
return not self == other
class InterfaceAddress(object):
def __init__(self, af=None, address=None):
self.af = af
if isinstance(address, (ipaddress.IPv4Interface, ipaddress.IPv6Interface)):
self.address = address.ip
self.netmask = address.netmask
self.broadcast = address.network.broadcast_address
else:
self.address = address
self.netmask = None
self.broadcast = None
self.dest_address = None
self.scope = None
self.ipv6_flags = None
self.vhid = None
self.received_packets = self.received_errors = self.received_dropped_packets = self.received_bytes = \
self.sent_packets = self.sent_errors = self.sent_bytes = self.collisions = self.sent_dropped_packets = None
def __str__(self):
return u'{0}/{1}'.format(self.address, self.netmask)
def __hash__(self):
return hash((self.af, self.address, self.netmask, self.broadcast, self.dest_address))
def asdict(self, stats=False):
ret = {
'type': self.af.name,
'address': self.address.address if type(self.address) is LinkAddress else str(self.address)
}
if stats:
ret['stats'] = {
'received_packets': self.received_packets,
'received_errors': self.received_errors,
'received_dropped_packets': self.received_dropped_packets,
'received_bytes': self.received_bytes,
'sent_packets': self.sent_packets,
'sent_errors': self.sent_errors,
'sent_bytes': self.sent_bytes,
'collisions': self.collisions,
'sent_dropped_packets': self.sent_dropped_packets,
}
if self.netmask:
# XXX yuck!
ret['netmask'] = bin(int(self.netmask)).count('1')
if self.broadcast:
ret['broadcast'] = str(self.broadcast)
return ret
def __eq__(self, other):
return \
self.af == other.af and \
self.address == other.address and \
self.netmask == other.netmask and \
self.broadcast == other.broadcast and \
self.dest_address == other.dest_address and \
self.vhid == other.vhid
def __ne__(self, other):
return not self == other
| 4,121 | Python | .py | 116 | 27.137931 | 119 | 0.584171 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,660 | download.py | truenas_middleware/src/middlewared/middlewared/plugins/update_/download.py | # -*- coding=utf-8 -*-
import errno
import itertools
import os
import subprocess
import time
import requests
import requests.exceptions
from middlewared.service import CallError, private, Service
from middlewared.utils.size import format_size
from .utils import DOWNLOAD_UPDATE_FILE, scale_update_server
class UpdateService(Service):
@private
def download_impl_scale(self, job, train, location, progress_proportion):
job.set_progress(0, "Retrieving update manifest")
train_check = self.middleware.call_sync("update.check_train", train)
if train_check["status"] == "AVAILABLE":
dst = os.path.join(location, DOWNLOAD_UPDATE_FILE)
if os.path.exists(dst):
job.set_progress(0, "Verifying existing update")
checksum = subprocess.run(
["sha256sum", dst], stdout=subprocess.PIPE, encoding="utf-8"
).stdout.split()[0]
if checksum == train_check["checksum"]:
return True
self.middleware.logger.warning("Invalid update file checksum %r, re-downloading", checksum)
os.unlink(dst)
st = os.statvfs(location)
avail = st.f_bavail * st.f_frsize
# make sure we have at least as the filesize plus 500MiB
required_size = train_check["filesize"] + 500 * 1024**2
if required_size > avail:
raise CallError(
f"{location}: insufficient available space: {format_size(avail)}, "
f"required: {format_size(required_size)}", errno.ENOSPC
)
for i in itertools.count(1):
with open(dst, "ab") as f:
download_start = time.monotonic()
progress = None
try:
start = os.path.getsize(dst)
with requests.get(
f"{scale_update_server()}/{train}/{train_check['filename']}",
stream=True,
timeout=30,
headers={"Range": f"bytes={start}-"}
) as r:
r.raise_for_status()
total = start + int(r.headers["Content-Length"])
for chunk in r.iter_content(chunk_size=8 * 1024 * 1024):
progress = f.tell()
job.set_progress(
progress / total * progress_proportion,
f'Downloading update: {format_size(total)} at '
f'{format_size(progress / (time.monotonic() - download_start))}/s'
)
f.write(chunk)
break
except Exception as e:
if i < 5 and progress and any(ee in str(e) for ee in ("ECONNRESET", "ETIMEDOUT")):
self.middleware.logger.warning("Recoverable update download error: %r", e)
time.sleep(2)
continue
raise
size = os.path.getsize(dst)
if size != total:
os.unlink(dst)
raise CallError(f'Downloaded update file mismatch ({size} != {total})')
return True
return False
| 3,500 | Python | .py | 71 | 31.56338 | 107 | 0.50249 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,661 | upload_location_linux.py | truenas_middleware/src/middlewared/middlewared/plugins/update_/upload_location_linux.py | # -*- coding=utf-8 -*-
import contextlib
import os
import shutil
import subprocess
from middlewared.service import private, Service
from .utils import UPLOAD_LOCATION
from .utils_linux import run_kw
class UpdateService(Service):
@private
def get_upload_location(self):
return UPLOAD_LOCATION
@private
def create_upload_location(self):
os.makedirs(UPLOAD_LOCATION, exist_ok=True)
if not os.path.ismount(UPLOAD_LOCATION):
subprocess.run(["mount", "-o", "size=2800M", "-t", "tmpfs", "none", UPLOAD_LOCATION], **run_kw)
for item in os.listdir(UPLOAD_LOCATION):
item = os.path.join(UPLOAD_LOCATION, item)
with contextlib.suppress(Exception):
if os.path.isdir(item):
shutil.rmtree(item, ignore_errors=True)
else:
os.unlink(item)
shutil.chown(UPLOAD_LOCATION, "www-data", "www-data")
os.chmod(UPLOAD_LOCATION, 0o755)
return UPLOAD_LOCATION
@private
def destroy_upload_location(self):
if os.path.ismount(UPLOAD_LOCATION):
subprocess.run(["umount", UPLOAD_LOCATION], **run_kw)
| 1,184 | Python | .py | 31 | 30.225806 | 107 | 0.645415 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,662 | utils.py | truenas_middleware/src/middlewared/middlewared/plugins/update_/utils.py | # -*- coding=utf-8 -*-
import configparser
import itertools
import os
import re
from middlewared.utils import MIDDLEWARE_RUN_DIR
DEFAULT_SCALE_UPDATE_SERVER = "https://update.ixsystems.com/scale"
SCALE_MANIFEST_FILE = "/data/manifest.json"
DOWNLOAD_UPDATE_FILE = "update.sqsh"
UPLOAD_LOCATION = os.path.join(MIDDLEWARE_RUN_DIR, "upload_image")
SEP = re.compile(r"[-.]")
def can_update(old_version, new_version):
for x, y in itertools.zip_longest(SEP.split(old_version), SEP.split(new_version), fillvalue=''):
if x.startswith('U') and x[1:].isdigit():
x = x[1:]
if y.startswith('U') and y[1:].isdigit():
y = y[1:]
for special in ['CUSTOM']:
if x == special and y != special:
return False
elif x != special and y == special:
return True
if not x.isdigit() and (y.isdigit() or y == ''):
return True
if (x.isdigit() or x == '') and not y.isdigit():
return False
if x == 'MASTER' and y != 'MASTER':
return False
elif x != 'MASTER' and y == 'MASTER':
return True
if (x == 'INTERNAL') != (y == 'INTERNAL'):
return True
if x.isdigit() and y.isdigit():
x = int(x)
y = int(y)
if x < y:
return True
if x > y:
return False
return False
def scale_update_server():
cfp = configparser.ConfigParser()
cfp.read("/data/update.conf")
return cfp.get("Defaults", "url", fallback=DEFAULT_SCALE_UPDATE_SERVER)
| 1,596 | Python | .py | 44 | 28.25 | 100 | 0.573099 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,663 | utils_linux.py | truenas_middleware/src/middlewared/middlewared/plugins/update_/utils_linux.py | # -*- coding=utf-8 -*-
import contextlib
import logging
import subprocess
import tempfile
from middlewared.service import CallError
logger = logging.getLogger(__name__)
run_kw = dict(check=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding="utf-8", errors="ignore")
@contextlib.contextmanager
def mount_update(path):
with tempfile.TemporaryDirectory() as mounted:
try:
subprocess.run(["mount", "-t", "squashfs", "-o", "loop", path, mounted], **run_kw)
except subprocess.CalledProcessError as e:
raise CallError(f"Invalid update image file. Please, re-download update. Error: {e.stdout}")
try:
yield mounted
finally:
subprocess.run(["umount", mounted], **run_kw)
| 765 | Python | .py | 19 | 34.578947 | 110 | 0.68691 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,664 | trains.py | truenas_middleware/src/middlewared/middlewared/plugins/update_/trains.py | import json
from aiohttp import ClientResponseError, ClientSession, ClientTimeout
from middlewared.service import CallError, private, Service
from middlewared.utils.network import INTERNET_TIMEOUT
from middlewared.utils.functools_ import cache
from .utils import can_update, scale_update_server, SCALE_MANIFEST_FILE
class UpdateService(Service):
opts = {'raise_for_status': True, 'trust_env': True, 'timeout': ClientTimeout(INTERNET_TIMEOUT)}
update_srv = scale_update_server()
@private
@cache
def get_manifest_file(self):
with open(SCALE_MANIFEST_FILE) as f:
return json.load(f)
@private
async def fetch(self, url):
async with ClientSession(**self.opts) as client:
try:
async with client.get(url) as resp:
return await resp.json()
except ClientResponseError as e:
raise CallError(f'Error while fetching update manifest: {e}')
@private
async def get_scale_update(self, train, current_version):
new_manifest = await self.fetch(f"{self.update_srv}/{train}/manifest.json")
if not can_update(current_version, new_manifest["version"]):
return {"status": "UNAVAILABLE"}
return {
"status": "AVAILABLE",
"changes": [{
"operation": "upgrade",
"old": {
"name": "TrueNAS",
"version": current_version,
},
"new": {
"name": "TrueNAS",
"version": new_manifest["version"],
}
}],
"notice": None,
"notes": None,
"release_notes_url": await self.middleware.call("system.release_notes_url", new_manifest["version"]),
"changelog": new_manifest["changelog"],
"version": new_manifest["version"],
"filename": new_manifest["filename"],
"filesize": new_manifest["filesize"],
"checksum": new_manifest["checksum"],
}
@private
async def get_trains_data(self):
return {
"current_train": (await self.middleware.call("update.get_manifest_file"))["train"],
**(await self.fetch(f"{self.update_srv}/trains.json"))
}
@private
async def check_train(self, train):
old_vers = (await self.middleware.call("update.get_manifest_file"))["version"]
return await self.middleware.call("update.get_scale_update", train, old_vers)
| 2,540 | Python | .py | 59 | 32.59322 | 113 | 0.59571 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,665 | download_linux.py | truenas_middleware/src/middlewared/middlewared/plugins/update_/download_linux.py | from middlewared.service import private, Service
class UpdateService(Service):
@private
async def download_impl(self, job, train, location, progress_proportion):
return await self.middleware.call('update.download_impl_scale', job, train, location, progress_proportion)
| 287 | Python | .py | 5 | 52.8 | 114 | 0.778571 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,666 | install_linux.py | truenas_middleware/src/middlewared/middlewared/plugins/update_/install_linux.py | import json
import logging
import os
from middlewared.plugins.config import UPLOADED_DB_PATH
from middlewared.service import CallError, private, Service
from middlewared.utils import sw_info
from .utils import can_update
from .utils_linux import mount_update
logger = logging.getLogger(__name__)
class UpdateService(Service):
@private
def install(self, job, path, options):
if os.path.exists(UPLOADED_DB_PATH):
raise CallError(
"An unapplied uploaded configuration exists. Please, reboot the system to apply this configuration "
"before running upgrade."
)
state = self.middleware.call_sync("boot.get_state")
if (
state["scan"] and
state["scan"]["function"] == "RESILVER" and
state["scan"]["state"] == "SCANNING"
):
raise CallError(
"One or more boot pool devices are currently being resilvered. The upgrade cannot continue "
"until the resilvering operation is finished."
)
def progress_callback(progress, description):
job.set_progress((0.5 + 0.5 * progress) * 100, description)
progress_callback(0, "Reading update file")
with mount_update(path) as mounted:
with open(os.path.join(mounted, "manifest.json")) as f:
manifest = json.load(f)
old_version = sw_info()['version']
new_version = manifest["version"]
if old_version == new_version:
raise CallError(f'You already are using {new_version}')
if not can_update(old_version, new_version):
raise CallError(f'Unable to downgrade from {old_version} to {new_version}')
self.middleware.call_sync("update.install_scale", mounted, progress_callback, options)
| 1,859 | Python | .py | 40 | 36.625 | 116 | 0.63571 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,667 | pending_linux.py | truenas_middleware/src/middlewared/middlewared/plugins/update_/pending_linux.py | # -*- coding=utf-8 -*-
import json
import os
import subprocess
from middlewared.service import private, Service
from .utils import SCALE_MANIFEST_FILE, DOWNLOAD_UPDATE_FILE
from .utils_linux import mount_update
run_kw = dict(check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8", errors="ignore")
class UpdateService(Service):
@private
def get_pending_in_path(self, path):
if not os.path.exists(os.path.join(path, DOWNLOAD_UPDATE_FILE)):
return []
with open(SCALE_MANIFEST_FILE) as f:
old_manifest = json.load(f)
try:
with mount_update(os.path.join(path, DOWNLOAD_UPDATE_FILE)) as mounted:
with open(os.path.join(mounted, "manifest.json")) as f:
new_manifest = json.load(f)
except Exception:
self.middleware.logger.error("Failed reading update", exc_info=True)
return []
return [
{
"operation": "upgrade",
"old": {
"name": "TrueNAS",
"version": old_manifest["version"],
},
"new": {
"name": "TrueNAS",
"version": new_manifest["version"],
}
}
]
| 1,310 | Python | .py | 35 | 26.6 | 108 | 0.557222 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,668 | install.py | truenas_middleware/src/middlewared/middlewared/plugins/update_/install.py | import errno
import json
import logging
import os
import subprocess
import time
from middlewared.service import CallError, private, Service
from middlewared.utils.size import format_size
logger = logging.getLogger(__name__)
run_kw = dict(check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8", errors="ignore")
STARTING_INSTALLER = "Starting installer"
class UpdateService(Service):
@private
def install_scale(self, mounted, progress_callback, options):
raise_warnings = options.pop("raise_warnings", True)
with open(os.path.join(mounted, "manifest.json")) as f:
manifest = json.load(f)
boot_pool_name = self.middleware.call_sync("boot.pool_name")
self.middleware.call_sync("update.ensure_free_space", boot_pool_name, manifest["size"])
for file, checksum in manifest["checksums"].items():
progress_callback(0, f"Verifying {file}")
our_checksum = subprocess.run(["sha1sum", os.path.join(mounted, file)], **run_kw).stdout.split()[0]
if our_checksum != checksum:
raise CallError(f"Checksum mismatch for {file!r}: {our_checksum} != {checksum}")
progress_callback(0, "Running pre-checks")
warning = self._execute_truenas_install(mounted, {
"json": True,
"old_root": "/",
"precheck": True,
}, progress_callback)
if warning and raise_warnings:
raise CallError(warning, errno.EAGAIN)
progress_callback(0, STARTING_INSTALLER)
command = {
"disks": self.middleware.call_sync("boot.get_disks"),
"json": True,
"old_root": "/",
"pool_name": boot_pool_name,
"src": mounted,
**options,
}
self._execute_truenas_install(mounted, command, progress_callback)
def _execute_truenas_install(self, cwd, command, progress_callback):
p = subprocess.Popen(
["python3", "-m", "truenas_install"], cwd=cwd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding="utf-8", errors="ignore",
)
p.stdin.write(json.dumps(command))
p.stdin.close()
stderr = ""
error = None
for line in iter(p.stdout.readline, ""):
try:
data = json.loads(line)
except ValueError:
stderr += line
else:
if "progress" in data and "message" in data:
progress_callback(data["progress"], data["message"])
elif "error" in data:
error = data["error"]
else:
raise ValueError(f"Invalid truenas_install JSON: {data!r}")
p.wait()
if error is not None:
result = error
else:
result = stderr
if p.returncode != 0:
raise CallError(result or f"Abnormal installer process termination with code {p.returncode}")
else:
return result
@private
def ensure_free_space(self, pool_name, size):
space_left = self._space_left(pool_name)
if space_left > size:
return
for bootenv in reversed(self.middleware.call_sync(
"bootenv.query",
[
["keep", "=", False],
["mountpoint", "=", "-"],
["activated", "=", False],
],
{"order_by": ["created"]},
)):
space_left_before_prune = space_left
logger.info("Pruning %r", bootenv["id"])
self.middleware.call_sync("bootenv.delete", bootenv["id"])
be_size = bootenv["rawspace"]
if be_size is None:
be_size = 0
for i in range(10):
space_left = self._space_left(pool_name)
if space_left > size:
return
freed_space = space_left - space_left_before_prune
if freed_space >= be_size * 0.5:
return
logger.debug("Only freed %d bytes of %d, waiting for deferred operation to complete...", freed_space,
be_size)
time.sleep(1)
raise CallError(
f"Insufficient disk space available on {pool_name} ({format_size(space_left)}). "
f"Need {format_size(size)}",
errno.ENOSPC,
)
def _space_left(self, pool_name):
filters = [["name", "=", pool_name]]
options = {"get": True, "extra": {"flat": False, "retrieve_children": False, "properties": ["available"]}}
return self.middleware.call_sync('zfs.dataset.query', filters, options)['properties']['available']['parsed']
| 4,796 | Python | .py | 111 | 31.855856 | 117 | 0.566617 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,669 | ui.py | truenas_middleware/src/middlewared/middlewared/plugins/system_general/ui.py | import asyncio
from middlewared.schema import accepts, Dict, Int, returns, Str
from middlewared.service import CallError, rest_api_metadata, private, Service
from middlewared.validators import Range
from .utils import HTTPS_PROTOCOLS
class SystemGeneralService(Service):
ui_allowlist = []
class Config:
namespace = 'system.general'
cli_namespace = 'system.general'
@accepts()
@returns(Dict('available_ui_address_choices', additional_attrs=True, title='Available UI IPv4 Address Choices'))
async def ui_address_choices(self):
"""
Returns UI ipv4 address choices.
"""
return {
d['address']: d['address'] for d in await self.middleware.call(
'interface.ip_in_use', {'ipv4': True, 'ipv6': False, 'any': True, 'static': True}
)
}
@accepts()
@returns(Dict('available_ui_v6address_choices', additional_attrs=True, title='Available UI IPv6 Address Choices'))
async def ui_v6address_choices(self):
"""
Returns UI ipv6 address choices.
"""
return {
d['address']: d['address'] for d in await self.middleware.call(
'interface.ip_in_use', {'ipv4': False, 'ipv6': True, 'any': True, 'static': True}
)
}
@accepts()
@returns(Dict(
'ui_https_protocols',
*[Str(k, enum=[k]) for k in HTTPS_PROTOCOLS],
title='UI HTTPS Protocol Choices'
))
def ui_httpsprotocols_choices(self):
"""
Returns available HTTPS protocols.
"""
return dict(zip(HTTPS_PROTOCOLS, HTTPS_PROTOCOLS))
@accepts()
@returns(Dict('ui_certificate_choices', additional_attrs=True, title='UI Certificate Choices'))
async def ui_certificate_choices(self):
"""
Return choices of certificates which can be used for `ui_certificate`.
"""
return {
i['id']: i['name']
for i in await self.middleware.call('certificate.query', [
('cert_type_CSR', '=', False)
])
}
@rest_api_metadata(extra_methods=['GET'])
@accepts(Int('delay', default=3, validators=[Range(min_=0)]), roles=['SYSTEM_GENERAL_WRITE'])
async def ui_restart(self, delay):
"""
Restart HTTP server to use latest UI settings.
HTTP server will be restarted after `delay` seconds.
"""
event_loop = asyncio.get_event_loop()
event_loop.call_later(delay, lambda: self.middleware.create_task(self.middleware.call('service.restart', 'http')))
@accepts(roles=['SYSTEM_GENERAL_READ'])
@returns(Str('local_url'))
async def local_url(self):
"""
Returns configured local url in the format of protocol://host:port
"""
config = await self.middleware.call('system.general.config')
if config['ui_certificate']:
protocol = 'https'
port = config['ui_httpsport']
else:
protocol = 'http'
port = config['ui_port']
if '0.0.0.0' in config['ui_address'] or '127.0.0.1' in config['ui_address']:
hosts = ['127.0.0.1']
else:
hosts = config['ui_address']
errors = []
for host in hosts:
try:
reader, writer = await asyncio.wait_for(self.middleware.create_task(asyncio.open_connection(
host,
port=port,
)), timeout=5)
writer.close()
return f'{protocol}://{host}:{port}'
except Exception as e:
errors.append(f'{host}: {e}')
raise CallError('Unable to connect to any of the specified UI addresses:\n' + '\n'.join(errors))
@private
async def get_ui_urls(self):
config = await self.middleware.call('system.general.config')
kwargs = {'static': True} if await self.middleware.call('failover.licensed') else {}
# http is always used
http_proto = 'http://'
http_port = config['ui_port']
# populate https data if necessary
https_proto = https_port = None
if config['ui_certificate']:
https_proto = 'https://'
https_port = config['ui_httpsport']
all_ip4 = '0.0.0.0' in config['ui_address']
all_ip6 = '::' in config['ui_v6address']
urls = set()
for i in await self.middleware.call('interface.ip_in_use', kwargs):
http_url = http_proto + (i["address"] if i['type'] == 'INET' else f'[{i["address"]}]')
if http_port != 80:
http_url += f':{http_port}'
https_url = None
if https_proto is not None:
https_url = https_proto + (i["address"] if i['type'] == 'INET' else f'[{i["address"]}]')
if https_port != 443:
https_url += f':{https_port}'
if (i['type'] == 'INET' and all_ip4) or (i['type'] == 'INET6' and all_ip6):
urls.add(http_url)
if https_url:
urls.add(https_url)
elif i['address'] in config['ui_address'] or i['address'] in config['ui_v6address']:
urls.add(http_url)
if https_url:
urls.add(https_url)
return sorted(urls)
@private
async def get_ui_allowlist(self):
"""
We store this in a state and not read this configuration variable directly from the database so it is
synchronized with HTTP service restarts and HTTP configuration commit/rollback works properly.
Otherwise, changing `ui_allowlist` would immediately block/unblock new connections (we want to block/unblock
them only after explicit HTTP service restart).
"""
return self.ui_allowlist
@private
async def update_ui_allowlist(self):
self.ui_allowlist = (await self.middleware.call('system.general.config'))['ui_allowlist']
async def setup(middleware):
await middleware.call('system.general.update_ui_allowlist')
| 6,086 | Python | .py | 140 | 33.5 | 122 | 0.587589 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,670 | language.py | truenas_middleware/src/middlewared/middlewared/plugins/system_general/language.py | from middlewared.i18n import set_language
from middlewared.schema import accepts, Dict, returns
from middlewared.service import private, Service
LANGUAGES = dict([
('af', 'Afrikaans'),
('ar', 'Arabic'),
('ast', 'Asturian'),
('az', 'Azerbaijani'),
('bg', 'Bulgarian'),
('be', 'Belarusian'),
('bn', 'Bengali'),
('br', 'Breton'),
('bs', 'Bosnian'),
('ca', 'Catalan'),
('cs', 'Czech'),
('cy', 'Welsh'),
('da', 'Danish'),
('de', 'German'),
('dsb', 'Lower Sorbian'),
('el', 'Greek'),
('en', 'English'),
('en-au', 'Australian English'),
('en-gb', 'British English'),
('eo', 'Esperanto'),
('es', 'Spanish'),
('es-ar', 'Argentinian Spanish'),
('es-co', 'Colombian Spanish'),
('es-mx', 'Mexican Spanish'),
('es-ni', 'Nicaraguan Spanish'),
('es-ve', 'Venezuelan Spanish'),
('et', 'Estonian'),
('eu', 'Basque'),
('fa', 'Persian'),
('fi', 'Finnish'),
('fr', 'French'),
('fy', 'Frisian'),
('ga', 'Irish'),
('gd', 'Scottish Gaelic'),
('gl', 'Galician'),
('he', 'Hebrew'),
('hi', 'Hindi'),
('hr', 'Croatian'),
('hsb', 'Upper Sorbian'),
('hu', 'Hungarian'),
('ia', 'Interlingua'),
('id', 'Indonesian'),
('io', 'Ido'),
('is', 'Icelandic'),
('it', 'Italian'),
('ja', 'Japanese'),
('ka', 'Georgian'),
('kab', 'Kabyle'),
('kk', 'Kazakh'),
('km', 'Khmer'),
('kn', 'Kannada'),
('ko', 'Korean'),
('lb', 'Luxembourgish'),
('lt', 'Lithuanian'),
('lv', 'Latvian'),
('mk', 'Macedonian'),
('ml', 'Malayalam'),
('mn', 'Mongolian'),
('mr', 'Marathi'),
('my', 'Burmese'),
('nb', 'Norwegian Bokmål'),
('ne', 'Nepali'),
('nl', 'Dutch'),
('nn', 'Norwegian Nynorsk'),
('os', 'Ossetic'),
('pa', 'Punjabi'),
('pl', 'Polish'),
('pt', 'Portuguese'),
('pt-br', 'Brazilian Portuguese'),
('ro', 'Romanian'),
('ru', 'Russian'),
('sk', 'Slovak'),
('sl', 'Slovenian'),
('sq', 'Albanian'),
('sr', 'Serbian'),
('sr-latn', 'Serbian Latin'),
('sv', 'Swedish'),
('sw', 'Swahili'),
('ta', 'Tamil'),
('te', 'Telugu'),
('th', 'Thai'),
('tr', 'Turkish'),
('tt', 'Tatar'),
('udm', 'Udmurt'),
('uk', 'Ukrainian'),
('ur', 'Urdu'),
('vi', 'Vietnamese'),
('zh-hans', 'Simplified Chinese'),
('zh-hant', 'Traditional Chinese'),
])
class SystemGeneralService(Service):
class Config:
namespace = 'system.general'
cli_namespace = 'system.general'
@accepts()
@returns(Dict('system_language_choices', additional_attrs=True, title='System Language Choices'))
async def language_choices(self):
"""
Returns language choices.
"""
return LANGUAGES
@private
def set_language(self):
language = self.middleware.call_sync('system.general.config')['language']
set_language(language)
| 2,958 | Python | .py | 109 | 22 | 101 | 0.513371 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,671 | timezone.py | truenas_middleware/src/middlewared/middlewared/plugins/system_general/timezone.py | from subprocess import run
from middlewared.schema import accepts, Dict, returns
from middlewared.service import Service
from middlewared.utils.functools_ import cache
class SystemGeneralService(Service):
class Config:
namespace = 'system.general'
cli_namespace = 'system.general'
@accepts()
@returns(Dict('system_timezone_choices', additional_attrs=True, title='System Timezone Choices'))
@cache
def timezone_choices(self):
"""Returns available timezones"""
choices = dict()
for i in run(['timedatectl', 'list-timezones'], capture_output=True).stdout.decode().split('\n'):
if (choice := i.strip()):
choices[choice] = choice
return choices
| 741 | Python | .py | 18 | 34.555556 | 105 | 0.688022 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,672 | keymap.py | truenas_middleware/src/middlewared/middlewared/plugins/system_general/keymap.py | import collections
import re
from middlewared.schema import accepts, Dict, returns
from middlewared.service import private, Service
class SystemGeneralService(Service):
KBDMAP_CHOICES = None
class Config:
namespace = 'system.general'
cli_namespace = 'system.general'
@accepts()
@returns(Dict('kbdmap_choices', additional_attrs=True))
async def kbdmap_choices(self):
"""
Returns kbdmap choices.
"""
if not self.KBDMAP_CHOICES:
self.KBDMAP_CHOICES = await self.middleware.call('system.general.read_kbdmap_choices')
return self.KBDMAP_CHOICES
@private
def read_kbdmap_choices(self):
with open('/usr/share/X11/xkb/rules/xorg.lst', 'r') as f:
key = None
items = collections.defaultdict(list)
for line in f.readlines():
line = line.rstrip()
if line.startswith('! '):
key = line[2:]
if line.startswith(' '):
items[key].append(re.split(r'\s+', line.lstrip(), 1))
choices = dict(items['layout'])
for variant, desc in items['variant']:
lang, title = desc.split(': ', 1)
choices[f'{lang}.{variant}'] = title
return dict(sorted(choices.items(), key=lambda t: t[1]))
| 1,340 | Python | .py | 34 | 30.029412 | 98 | 0.598304 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,673 | country.py | truenas_middleware/src/middlewared/middlewared/plugins/system_general/country.py | import csv
from middlewared.schema import accepts, Dict, returns
from middlewared.service import private, Service
class SystemGeneralService(Service):
COUNTRY_CHOICES = None
class Config:
namespace = 'system.general'
cli_namespace = 'system.general'
@accepts()
@returns(Dict('country_choices', additional_attrs=True, register=True))
async def country_choices(self):
"""
Returns country choices.
"""
if not self.COUNTRY_CHOICES:
self.COUNTRY_CHOICES = await self.middleware.call('system.general.get_country_choices')
return self.COUNTRY_CHOICES
@private
def get_country_choices(self):
def _get_index(country_columns, column):
index = -1
i = 0
for c in country_columns:
if c.lower() == column.lower():
index = i
break
i += 1
return index
country_file = '/etc/iso_3166_2_countries.csv'
cni, two_li = None, None
country_choices = {}
with open(country_file, 'r', encoding='utf-8') as csvfile:
reader = csv.reader(csvfile)
for index, row in enumerate(reader):
if index != 0:
if row[cni] and row[two_li]:
if row[two_li] in country_choices:
# If two countries in the iso file have the same key, we concatenate their names
country_choices[row[two_li]] += f' + {row[cni]}'
else:
country_choices[row[two_li]] = row[cni]
else:
# ONLY CNI AND TWO_LI ARE BEING CONSIDERED FROM THE CSV
cni = _get_index(row, 'Common Name')
two_li = _get_index(row, 'ISO 3166-1 2 Letter Code')
return country_choices
| 1,928 | Python | .py | 46 | 29.086957 | 108 | 0.547059 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,674 | attachments.py | truenas_middleware/src/middlewared/middlewared/plugins/system_general/attachments.py | from middlewared.common.ports import ServicePortDelegate
class SystemGeneralServicePortDelegate(ServicePortDelegate):
bind_address_field = 'ui_address'
name = 'webui'
namespace = 'system.general'
port_fields = ['ui_port', 'ui_httpsport']
title = 'WebUI Service'
def bind_address(self, config):
addresses = []
for wildcard_ip, address_field in (
('0.0.0.0', 'ui_address'),
('::', 'ui_v6address'),
):
if config[address_field] and wildcard_ip not in config[address_field]:
addresses.extend(config[address_field])
else:
addresses.append(wildcard_ip)
return addresses
async def get_ports_internal(self):
await self.basic_checks()
config = await self.config()
ports = []
bind_addresses = self.bind_address(config)
for k in filter(lambda k: config.get(k), self.port_fields):
for bindip in bind_addresses:
ports.append((bindip, config[k]))
return ports
async def setup(middleware):
await middleware.call('port.register_attachment_delegate', SystemGeneralServicePortDelegate(middleware))
| 1,208 | Python | .py | 29 | 32.758621 | 108 | 0.639316 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,675 | update.py | truenas_middleware/src/middlewared/middlewared/plugins/system_general/update.py | import asyncio
import syslog
import middlewared.sqlalchemy as sa
from middlewared.async_validators import validate_port
from middlewared.schema import accepts, Bool, Datetime, Dict, Int, IPAddr, List, Patch, returns, Str
from middlewared.service import ConfigService, private, ValidationErrors
from middlewared.utils import run
from middlewared.validators import Range
from .utils import HTTPS_PROTOCOLS
class SystemGeneralModel(sa.Model):
__tablename__ = 'system_settings'
id = sa.Column(sa.Integer(), primary_key=True)
stg_guiaddress = sa.Column(sa.JSON(list), default=['0.0.0.0'])
stg_guiv6address = sa.Column(sa.JSON(list), default=['::'])
stg_guiallowlist = sa.Column(sa.JSON(list), default=[])
stg_guiport = sa.Column(sa.Integer(), default=80)
stg_guihttpsport = sa.Column(sa.Integer(), default=443)
stg_guihttpsredirect = sa.Column(sa.Boolean(), default=False)
stg_guihttpsprotocols = sa.Column(sa.JSON(list), default=['TLSv1', 'TLSv1.1', 'TLSv1.2', 'TLSv1.3'])
stg_guix_frame_options = sa.Column(sa.String(120), default='SAMEORIGIN')
stg_guiconsolemsg = sa.Column(sa.Boolean(), default=True)
stg_language = sa.Column(sa.String(120), default='en')
stg_kbdmap = sa.Column(sa.String(120), default='us')
stg_timezone = sa.Column(sa.String(120), default='America/Los_Angeles')
stg_wizardshown = sa.Column(sa.Boolean(), default=False)
stg_pwenc_check = sa.Column(sa.String(100))
stg_guicertificate_id = sa.Column(sa.ForeignKey('system_certificate.id'), index=True, nullable=True)
stg_usage_collection = sa.Column(sa.Boolean(), nullable=True)
stg_ds_auth = sa.Column(sa.Boolean(), default=False)
class SystemGeneralService(ConfigService):
class Config:
namespace = 'system.general'
datastore = 'system.settings'
datastore_prefix = 'stg_'
datastore_extend = 'system.general.general_system_extend'
cli_namespace = 'system.general'
role_prefix = 'SYSTEM_GENERAL'
ENTRY = Dict(
'system_general_entry',
Patch(
'certificate_entry', 'ui_certificate',
('attr', {'null': True, 'required': True, 'private': True}),
),
Int('ui_httpsport', validators=[Range(min_=1, max_=65535)], required=True),
Bool('ui_httpsredirect', required=True),
List(
'ui_httpsprotocols', items=[Str('protocol', enum=HTTPS_PROTOCOLS)],
empty=False, unique=True, required=True
),
Int('ui_port', validators=[Range(min_=1, max_=65535)], required=True),
List('ui_address', items=[IPAddr('addr')], empty=False, required=True),
List('ui_v6address', items=[IPAddr('addr')], empty=False, required=True),
List('ui_allowlist', items=[IPAddr('addr', network=True, network_strict=True)], required=True),
Bool('ui_consolemsg', required=True),
Str('ui_x_frame_options', enum=['SAMEORIGIN', 'DENY', 'ALLOW_ALL'], required=True),
Str('kbdmap', required=True),
Str('language', empty=False, required=True),
Str('timezone', empty=False, required=True),
Bool('usage_collection', null=True, required=True),
Bool('wizardshown', required=True),
Bool('usage_collection_is_set', required=True),
Bool('ds_auth', required=True),
Int('id', required=True),
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._original_datastore = {}
self._rollback_timer = None
@private
async def general_system_extend(self, data):
for key in list(data.keys()):
if key.startswith('gui'):
data['ui_' + key[3:]] = data.pop(key)
if data['ui_certificate']:
data['ui_certificate'] = await self.middleware.call(
'certificate.get_instance', data['ui_certificate']['id']
)
data['usage_collection_is_set'] = data['usage_collection'] is not None
if data['usage_collection'] is None:
data['usage_collection'] = True
data.pop('pwenc_check')
return data
@private
async def validate_general_settings(self, data, schema):
verrors = ValidationErrors()
for k in ('ui_port', 'ui_httpsport'):
for ui_address in data['ui_address']:
verrors.extend(await validate_port(
self.middleware, f'{schema}.{k}', data[k], 'system.general', ui_address
))
if data['ui_port'] == data['ui_httpsport']:
verrors.add(f'{schema}.ui_port', 'Must be different from "ui_httpsport"')
if data['ds_auth'] and not await self.middleware.call('system.is_enterprise'):
verrors.add(
f'{schema}.ds_auth',
'Directory services authentication for UI and API access requires an Enterprise license.'
)
language = data.get('language')
system_languages = await self.middleware.call('system.general.language_choices')
if language not in system_languages.keys():
verrors.add(
f'{schema}.language',
f'Specified "{language}" language unknown. Please select a valid language.'
)
if data['kbdmap'] not in await self.middleware.call('system.general.kbdmap_choices'):
verrors.add(
f'{schema}.kbdmap',
'Please enter a valid keyboard layout'
)
timezone = data.get('timezone')
timezones = await self.middleware.call('system.general.timezone_choices')
if timezone not in timezones:
verrors.add(
f'{schema}.timezone',
'Timezone not known. Please select a valid timezone.'
)
ip4_addresses_list = await self.middleware.call('system.general.ui_address_choices')
ip6_addresses_list = await self.middleware.call('system.general.ui_v6address_choices')
ip4_addresses = data.get('ui_address')
for ip4_address in ip4_addresses:
if ip4_address not in ip4_addresses_list:
verrors.add(
f'{schema}.ui_address',
f'{ip4_address} ipv4 address is not associated with this machine'
)
ip6_addresses = data.get('ui_v6address')
for ip6_address in ip6_addresses:
if ip6_address not in ip6_addresses_list:
verrors.add(
f'{schema}.ui_v6address',
f'{ip6_address} ipv6 address is not associated with this machine'
)
for key, wildcard, ips in [('ui_address', '0.0.0.0', ip4_addresses), ('ui_v6address', '::', ip6_addresses)]:
if wildcard in ips and len(ips) > 1:
verrors.add(
f'{schema}.{key}',
f'When "{wildcard}" has been selected, selection of other addresses is not allowed'
)
certificate_id = data.get('ui_certificate')
cert = await self.middleware.call(
'certificate.query',
[["id", "=", certificate_id]]
)
if not cert:
verrors.add(
f'{schema}.ui_certificate',
'Please specify a valid certificate which exists in the system'
)
else:
cert = cert[0]
verrors.extend(
await self.middleware.call(
'certificate.cert_services_validation', certificate_id, f'{schema}.ui_certificate', False
)
)
if cert['fingerprint']:
syslog.openlog(logoption=syslog.LOG_PID, facility=syslog.LOG_USER)
syslog.syslog(syslog.LOG_ERR, 'Fingerprint of the certificate used in UI : ' + cert['fingerprint'])
syslog.closelog()
return verrors
@accepts(
Patch(
'system_general_entry', 'general_settings',
('rm', {'name': 'usage_collection_is_set'}),
('rm', {'name': 'wizardshown'}),
('rm', {'name': 'id'}),
('replace', Int('ui_certificate', null=True)),
('add', Int('rollback_timeout', null=True)),
('add', Int('ui_restart_delay', null=True)),
('attr', {'update': True}),
),
audit='System general update'
)
async def do_update(self, data):
"""
Update System General Service Configuration.
`ui_certificate` is used to enable HTTPS access to the system. If `ui_certificate` is not configured on boot,
it is automatically created by the system.
`ui_httpsredirect` when set, makes sure that all HTTP requests are converted to HTTPS requests to better
enhance security.
`ui_address` and `ui_v6address` are a list of valid ipv4/ipv6 addresses respectively which the system will
listen on.
`ui_allowlist` is a list of IP addresses and networks that are allow to use API and UI. If this list is empty,
then all IP addresses are allowed to use API and UI.
`ds_auth` controls whether configured Directory Service users that are granted with Privileges are allowed to
log in to the Web UI or use TrueNAS API.
UI configuration is not applied automatically. Call `system.general.ui_restart` to apply new UI settings (all
HTTP connections will be aborted) or specify `ui_restart_delay` (in seconds) to automatically apply them after
some small amount of time necessary you might need to receive the response for your settings update request.
If incorrect UI configuration is applied, you might loss API connectivity and won't be able to fix the settings.
To avoid that, specify `rollback_timeout` (in seconds). It will automatically roll back UI configuration to the
previously working settings after `rollback_timeout` passes unless you call `system.general.checkin` in case
the new settings were correct and no rollback is necessary.
"""
rollback_timeout = data.pop('rollback_timeout', None)
ui_restart_delay = data.pop('ui_restart_delay', None)
original_datastore = await self.middleware.call('datastore.config', self._config.datastore)
original_datastore['stg_guicertificate'] = (
original_datastore['stg_guicertificate']['id']
if original_datastore['stg_guicertificate']
else None
)
config = await self.config()
config['ui_certificate'] = config['ui_certificate']['id'] if config['ui_certificate'] else None
if not config.pop('usage_collection_is_set'):
config['usage_collection'] = None
new_config = config.copy()
new_config.update(data)
verrors = await self.validate_general_settings(new_config, 'general_settings_update')
verrors.check()
db_config = new_config.copy()
for key in list(new_config.keys()):
if key.startswith('ui_'):
db_config['gui' + key[3:]] = db_config.pop(key)
await self.middleware.call(
'datastore.update',
self._config.datastore,
config['id'],
db_config,
{'prefix': 'stg_'}
)
if config['kbdmap'] != new_config['kbdmap']:
await self.set_kbdlayout(new_config['kbdmap'])
if config['timezone'] != new_config['timezone']:
await self.middleware.call('zettarepl.update_config', {'timezone': new_config['timezone']})
await self.middleware.call('service.reload', 'timeservices')
await self.middleware.call('service.restart', 'cron')
if config['language'] != new_config['language']:
await self.middleware.call('system.general.set_language')
if config['ds_auth'] != new_config['ds_auth']:
await self.middleware.call('etc.generate', 'pam_middleware')
await self.middleware.call('service.start', 'ssl')
if rollback_timeout is not None:
self._original_datastore = original_datastore
self._rollback_timer = asyncio.get_event_loop().call_later(
rollback_timeout,
lambda: self.middleware.create_task(self.rollback()),
)
if ui_restart_delay is not None:
await self.middleware.call('system.general.ui_restart', ui_restart_delay)
for key in ('ui_port', 'ui_httpsport', 'ui_httpsredirect', 'ui_address', 'ui_v6address'):
if config[key] != new_config[key]:
await self.middleware.call('system.reload_cli')
break
return await self.config()
@private
async def set_kbdlayout(self, kbdmap='us'):
await self.middleware.call('etc.generate', 'keyboard')
await run(['setupcon'], check=False)
await self.middleware.call('boot.update_initramfs', {'force': True})
@accepts()
@returns(Int('remaining_seconds', null=True))
async def checkin_waiting(self):
"""
Determines whether or not we are waiting user to check-in the applied UI settings changes before they are rolled
back. Returns a number of seconds before the automatic rollback or null if there are no changes pending.
"""
if self._rollback_timer:
remaining = self._rollback_timer.when() - asyncio.get_event_loop().time()
if remaining > 0:
return int(remaining)
@accepts()
@returns()
async def checkin(self):
"""
After UI settings are saved with `rollback_timeout` this method needs to be called within that timeout limit
to prevent reverting the changes.
This is to ensure user verifies the changes went as planned and its working.
"""
if self._rollback_timer:
self._rollback_timer.cancel()
self._rollback_timer = None
self._original_datastore = {}
@private
async def rollback(self):
if self._original_datastore:
await self.middleware.call(
'datastore.update',
self._config.datastore,
self._original_datastore['id'],
{k: v for k, v in self._original_datastore.items() if k.startswith('stg_gui')},
)
await self.middleware.call('system.general.ui_restart', 0)
self._rollback_timer = None
self._original_datastore = {}
| 14,503 | Python | .py | 285 | 40.308772 | 120 | 0.618405 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,676 | map2.py | truenas_middleware/src/middlewared/middlewared/plugins/enclosure_/map2.py | # Copyright (c) - iXsystems Inc.
#
# Licensed under the terms of the TrueNAS Enterprise License Agreement
# See the file LICENSE.IX for complete terms and conditions
import logging
from .constants import HEAD_UNIT_DISK_SLOT_START_NUMBER
from .enums import ControllerModels, JbofModels
logger = logging.getLogger(__name__)
def to_ignore(enclosure):
if not enclosure['controller']:
# this is a JBOD and doesn't need to
# be "combined" into any other object
return True
elif enclosure['model'].startswith((
ControllerModels.F60.value,
ControllerModels.F100.value,
ControllerModels.F130.value,
ControllerModels.R30.value,
)):
# these are all nvme flash systems and
# are treated as-is
return True
elif enclosure['model'] in (i.name for i in JbofModels):
# these are all nvme flash enclosures and
# are treated as-is
return True
else:
return False
def combine_enclosures(enclosures):
"""Purpose of this function is to combine certain enclosures
Array Device Slot elements into 1. For example, the MINIs/R20s
have their disk drives spread across multiple enclosures. We
need to map them all into 1 unit. Another example is that we
have platforms (M50/60, R50B) that have rear nvme drive bays.
NVMe doesn't get exposed via a traditional SES device because,
well, it's nvme. So we create a "fake" nvme "enclosure" that
mimics the drive slot information that a traditional enclosure
would do. We take these enclosure devices and simply add them
to the head-unit enclosure object.
NOTE: The array device slots have already been mapped to their
human-readable slot numbers. That logic is in the `Enclosure`
class in "enclosure_/enclosure_class.py"
"""
head_unit_idx, to_combine, to_remove = None, dict(), list()
r40_sas_ids = list()
for idx, enclosure in enumerate(enclosures):
if to_ignore(enclosure):
continue
elif enclosure['model'] == ControllerModels.R40.value:
r40_sas_ids.append((int(f'0x{enclosure["id"]}', 16), idx))
if len(r40_sas_ids) == 2:
# we've got no choice but to do this hack. The R40 has 2x HBAs
# in the head. One of those HBAs is disks 1-24, the other for 25-48.
# Unfortunately, however, this platform was shipped with both of
# those expanders flashed with the same firmware so there is no way
# to uniquely identify which expander gets mapped to 1-24 and the
# other to get mapped for 25-48. (Like we do with the R50)
#
# Instead, we take the sas address of the ses devices and check which
# enclosure device has the smaller value. The one with the smaller
# gets mapped as the "head-unit" (1-24) while the larger one gets
# mapped to drive slots 25-48.
if r40_sas_ids[0][0] < r40_sas_ids[1][0]:
head_unit_idx = r40_sas_ids[0][1]
_update_idx = r40_sas_ids[1][1]
else:
head_unit_idx = r40_sas_ids[1][0]
_update_idx = r40_sas_ids[0][1]
# we know which enclosure has the larger sas address so we'll update
# the array device slots so that they're 25-48.
for origslot, newslot in zip(range(1, 25), range(25, 49)):
orig_info = enclosures[_update_idx]['elements']['Array Device Slot'].pop(origslot)
enclosures[_update_idx]['elements']['Array Device Slot'][newslot] = orig_info
to_combine.update(enclosures[_update_idx]['elements'].pop('Array Device Slot'))
to_remove.append(_update_idx)
elif enclosure['elements']['Array Device Slot'].get(HEAD_UNIT_DISK_SLOT_START_NUMBER):
# the enclosure object whose disk slot has number 1
# will always be the head-unit
head_unit_idx = idx
else:
to_combine.update(enclosure['elements'].pop('Array Device Slot', dict()))
to_remove.append(idx)
if head_unit_idx is not None:
enclosures[head_unit_idx]['elements']['Array Device Slot'].update(to_combine)
enclosures[head_unit_idx]['elements']['Array Device Slot'] = {
k: v for k, v in sorted(enclosures[head_unit_idx]['elements']['Array Device Slot'].items())
}
for idx in reversed(to_remove):
# we've combined the enclosures into the
# main "head-unit" enclosure object so let's
# remove the objects we combined from
enclosures.pop(idx)
| 4,770 | Python | .py | 92 | 41.684783 | 103 | 0.632312 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,677 | enums.py | truenas_middleware/src/middlewared/middlewared/plugins/enclosure_/enums.py | from enum import Enum, unique
@unique
class ElementStatusesToIgnore(Enum):
UNSUPPORTED = 'unsupported'
@unique
class ElementDescriptorsToIgnore(Enum):
ADISE0 = 'arraydevicesinsubenclsr0'
ADS = 'array device slot'
EMPTY = '<empty>'
AD = 'arraydevices'
DS = 'drive slots'
@unique
class ControllerModels(Enum):
F60 = 'F60' # all nvme flash
F100 = 'F100' # all nvme flash
F130 = 'F130' # all nvme flash
H10 = 'H10'
H20 = 'H20'
H30 = 'H30'
M30 = 'M30'
M40 = 'M40'
M50 = 'M50'
M60 = 'M60'
MINI3E = 'MINI-3.0-E'
MINI3EP = 'MINI-3.0-E+'
MINI3X = 'MINI-3.0-X'
MINI3XP = 'MINI-3.0-X+'
MINI3XLP = 'MINI-3.0-XL+'
MINIR = 'MINI-R'
R10 = 'R10'
R20 = 'R20'
R20A = 'R20A'
R20B = 'R20B'
R30 = 'R30' # all nvme flash
R40 = 'R40'
R50 = 'R50'
R50B = 'R50B'
R50BM = 'R50BM'
X10 = 'X10'
X20 = 'X20'
@unique
class JbodModels(Enum):
ES12 = 'ES12'
ES24 = 'ES24'
ES24F = 'ES24F'
ES60 = 'ES60'
ES60G2 = 'ES60G2'
ES60G3 = 'ES60G3'
ES102 = 'ES102'
ES102G2 = 'ES102G2'
@unique
class JbofModels(Enum):
# name is iX's model (ES24N)
# while the value (VDS2249R2) is the OEM's model
ES24N = 'VDS2249R2'
# See SES-4 7.2.3 Status element format, Table 74 — ELEMENT STATUS CODE field
@unique
class ElementStatus(Enum):
UNSUPPORTED = 'Unsupported'
OK = 'OK'
CRITICAL = 'Critical'
NONCRITICAL = 'Noncritical'
UNRECOVERABLE = 'Unrecoverable'
NOT_INSTALLED = 'Not installed'
UNKNOWN = 'Unknown'
NOT_AVAILABLE = 'Not available'
NO_ACCESS_ALLOWED = 'No access allowed'
# See SES-4 7.1 Element definitions overview, Table 71 — Element type codes
@unique
class ElementType(Enum):
UNSPECIFIED = 'Unspecified'
DEVICE_SLOT = 'Device Slot'
POWER_SUPPLY = 'Power Supply'
COOLING = 'Cooling'
TEMPERATURE_SENSORS = 'Temperature Sensors'
DOOR_LOCK = 'Door Lock'
AUDIBLE_ALARM = 'Audible Alarm'
ENCLOSURE_SERVICES_CONTROLLER_ELECTRONICS = 'Enclosure Services Controller Electronics'
SCC_CONTROLLER_ELECTRONICS = 'SCC Controller Electronics'
NONVOLATILE_CACHE = 'Nonvolatile Cache'
INVALID_OPERATION_REASON = 'Invalid Operation Reason'
UNINTERRUPTIBLE_POWER_SUPPLY = 'Uninterruptible Power Supply'
DISPLAY = 'Display'
KEY_PAD_ENTRY = 'Key Pad Entry'
ENCLOSURE = 'Enclosure'
SCSI_PORT_TRANSCEIVER = 'SCSI Port/Transciever'
LANGUAGE = 'Language'
COMMUNICATION_PORT = 'Communication Port'
VOLTAGE_SENSOR = 'Voltage Sensor'
CURRENT_SENSOR = 'Current Sensor'
SCSI_TARGET_PORT = 'SCSI Target Port'
SCSI_INITIATOR_PORT = 'SCSI Initiator Port'
SIMPLE_SUBENCLOSURE = 'Simple Subenclosure'
ARRAY_DEVICE_SLOT = 'Array Device Slot'
SAS_EXPANDER = 'SAS Expander'
SAS_CONNECTOR = 'SAS Connector'
# See DSP0268_2023.1 4.16.3.1 Health (https://www.dmtf.org/dsp/DSP0268)
@unique
class RedfishStatusHealth(Enum):
CRITICAL = 'Critical'
OK = 'OK'
WARNING = 'Warning'
# See DSP0268_2023.1 4.16.3.4 State (https://www.dmtf.org/dsp/DSP0268)
@unique
class RedfishStatusState(Enum):
ABSENT = 'Absent'
DEFERRING = 'Deferring'
DISABLED = 'Disabled'
ENABLED = 'Enabled'
INTEST = 'InTest'
QUALIFIED = 'Qualified'
QUIESCED = 'Quiesced'
STANDBY_OFFLINE = 'StandbyOffline'
STANDBY_SPARE = 'StandbySpare'
STARTING = 'Starting'
UNAVAILABLE_OFFLINE = 'UnavailableOffline'
UPDATING = 'Updating'
| 3,506 | Python | .py | 117 | 25.564103 | 91 | 0.672112 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,678 | sysfs_disks.py | truenas_middleware/src/middlewared/middlewared/plugins/enclosure_/sysfs_disks.py | # Copyright (c) - iXsystems Inc.
#
# Licensed under the terms of the TrueNAS Enterprise License Agreement
# See the file LICENSE.IX for complete terms and conditions
from dataclasses import dataclass
from os import scandir
from pathlib import Path
from .enums import ControllerModels
@dataclass(slots=True, frozen=True, kw_only=True)
class BaseDev:
name: str | None = None
locate: str | None = None
def map_disks_to_enclosure_slots(enc) -> dict[int, BaseDev]:
"""The sysfs directory structure is dynamic based on the enclosure
that is attached.
Here are some examples of what we've seen on internal hardware:
/sys/class/enclosure/19:0:6:0/SLOT_001/
/sys/class/enclosure/13:0:0:0/Drive Slot #0_0000000000000000/
/sys/class/enclosure/13:0:0:0/Disk #00/
/sys/class/enclosure/13:0:0:0/Slot 00/
/sys/class/enclosure/13:0:0:0/slot00/
/sys/class/enclosure/13:0:0:0/slot00 / (yes those are spaces)
/sys/class/enclosure/0:0:0:0/0/
The safe assumption that we can make on whether or not the directory
represents a drive slot is looking for the file named "slot" underneath
each directory. (i.e. /sys/class/enclosure/13:0:0:0/Disk #00/slot)
If this file doesn't exist, then it means the directory is not a disk
slot and we move on. Once we've determined that there is a file named
"slot", we can read the contents of that file to get the slot number
associated to the disk device. The "slot" file is always an integer
so we don't need to convert to hexadecimal
Args:
enc: An instance of class Enclosure
"""
mapping = dict()
with scandir(f"/sys/class/enclosure/{enc.pci}") as sdir:
for i in filter(lambda x: x.is_dir(), sdir):
if enc.is_hseries and i.name in ("4", "5", "6", "7"):
continue
path = Path(i)
try:
slot = int((path / "slot").read_text().strip())
except (NotADirectoryError, FileNotFoundError, ValueError):
# not a slot directory
continue
else:
try:
name = next((path / "device/block").iterdir(), None).name
except (AttributeError, FileNotFoundError):
# no disk in this slot
name = None
try:
locate = (
"ON" if (path / "locate").read_text().strip() == "1" else "OFF"
)
except (ValueError, FileNotFoundError):
locate = None
mapping[slot] = BaseDev(name=name, locate=locate)
return mapping
def toggle_enclosure_slot_identifier(
sysfs_path, slot, action, by_dirname=False, model=None
):
"""Use sysfs to toggle the enclosure light indicator for a disk
slot.
Args:
sysfs_path: string (i.e. /sys/clas/enclosure/0:0:0:0)
slot: string | int
action: string
by_dirname: bool defaults to False, when set to True will treat the
parent directory _NAME_ as the drive "slot". For example,
/sys/class/enclosure/0:0:0:0/1 will be treated as slot "1".
Otherwise, the slot _FILE_ inside the parent directory will be
read and treated as the slot. For example,
cat /sys/class/enclosure/0:0:0:0/1/slot == 9. "9" is treated
as the slot.
Returns:
None
"""
pathobj = Path(sysfs_path)
if not pathobj.exists():
raise FileNotFoundError(f"Enclosure path: {sysfs_path!r} not found")
slot_errmsg = f"Slot: {slot!r} not found"
slot = str(slot)
if model in (ControllerModels.H10.value, ControllerModels.H20.value):
# kernel bug for hseries where the slot files report duplicate numbers
# between the array device slots so until that can be fixed, we have to
# use the directory name where the slot file exists. Only applies to 4
# slots on the HBA
match slot:
case "4":
slot = "12"
by_dirname = True
case "5":
slot = "13"
by_dirname = True
case "6":
slot = "14"
by_dirname = True
case "7":
slot = "15"
by_dirname = True
if by_dirname:
pathobj = Path(f"{sysfs_path}/{slot}")
if not pathobj.exists():
raise FileNotFoundError(slot_errmsg)
else:
for i in pathobj.iterdir():
slot_path = i / "slot"
if slot_path.exists() and slot_path.read_text().strip() == slot:
pathobj = i
break
else:
raise FileNotFoundError(slot_errmsg)
match action:
case "CLEAR" | "OFF":
value = "0"
case "ON":
value = "1"
case _:
raise ValueError(f"Invalid action ({action!r})")
(pathobj / "locate").write_text(value)
| 5,039 | Python | .py | 121 | 31.710744 | 87 | 0.590974 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,679 | jbof_enclosures.py | truenas_middleware/src/middlewared/middlewared/plugins/enclosure_/jbof_enclosures.py | import asyncio
from logging import getLogger
from middlewared.plugins.enclosure_.enums import JbofModels
from middlewared.plugins.enclosure_.jbof.es24n import (is_this_an_es24n,
map_es24n)
from middlewared.plugins.jbof.redfish import (AsyncRedfishClient,
InvalidCredentialsError)
LOGGER = getLogger(__name__)
JBOF_MODEL_ATTR = 'model'
JBOF_URI_ATTR = 'uri'
async def get_redfish_clients(jbofs):
clients = dict()
for jbof in jbofs:
try:
rclient = await AsyncRedfishClient.cache_get(jbof['uuid'], jbofs)
clients[jbof['uuid']] = rclient
except InvalidCredentialsError:
LOGGER.error('Failed to login to redfish ip %r %r', jbof['mgmt_ip1'], jbof['mgmt_ip2'])
except Exception:
LOGGER.error('Unexpected failure creating redfish client object', exc_info=True)
return clients
async def get_enclosure_model(rclient):
model = uri = None
try:
chassis = await rclient.chassis()
except Exception:
LOGGER.error('Unexpected failure enumerating chassis info', exc_info=True)
return model, uri
model, uri = await is_this_an_es24n(rclient)
if all((model, uri)):
return model, uri
try:
for _, uri in chassis.items():
info = await rclient.get(uri)
if info.ok:
try:
model = JbofModels(info.json().get('Model', '')).name
return model, uri
except ValueError:
# Using parenthesis on the enum checks the string BY VALUE
# and NOT BY NAME. If you were to use square brackets [],
# then a KeyError will be raised.
continue
except Exception:
LOGGER.error('Unexpected failure determing enclosure model', exc_info=True)
return model, uri
async def map_jbof(jbof_query):
result = list()
futures = []
for rclient in (await get_redfish_clients(jbof_query)).values():
# Since we're *already* keeping a client object around, cache a couple
# of attributes to make things faster after the first time.
model = rclient.get_attribute(JBOF_MODEL_ATTR)
uri = rclient.get_attribute(JBOF_URI_ATTR)
if not model or not uri:
model, uri = await get_enclosure_model(rclient)
rclient.set_attribute(JBOF_MODEL_ATTR, model)
rclient.set_attribute(JBOF_URI_ATTR, uri)
if model == JbofModels.ES24N.name:
futures.append(map_es24n(model, rclient, uri))
# Now fetch the data from each JBOF in parallel
for ans in await asyncio.gather(*futures, return_exceptions=True):
if ans and not isinstance(ans, Exception):
result.extend(ans)
return result
async def set_slot_status(ident, slot, status):
# For the time being we're assume that all models are using the same
# redfish mechanism. If this changes add a model parameter to the
# function so that we can dispatch here as required.
rclient = await AsyncRedfishClient.cache_get(ident)
uri = rclient.get_attribute(JBOF_URI_ATTR)
if not uri:
_, uri = await get_enclosure_model(rclient)
fulluri = f'{uri}/Drives/{slot}'
if status in ('CLEAR', 'OFF'):
await rclient.post(fulluri, data={'LocationIndicatorActive': False})
elif status in ('ON', 'IDENT', 'IDENTIFY'):
await rclient.post(fulluri, data={'LocationIndicatorActive': True})
else:
raise ValueError('Unsupported slot status', status)
| 3,653 | Python | .py | 80 | 36.25 | 99 | 0.640023 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,680 | nvme2.py | truenas_middleware/src/middlewared/middlewared/plugins/enclosure_/nvme2.py | # Copyright (c) - iXsystems Inc.
#
# Licensed under the terms of the TrueNAS Enterprise License Agreement
# See the file LICENSE.IX for complete terms and conditions
import pathlib
import re
from pyudev import Context, Devices, DeviceNotFoundAtPathError
from ixhardware import parse_dmi
from .constants import (
DISK_FRONT_KEY,
DISK_REAR_KEY,
DISK_TOP_KEY,
DISK_INTERNAL_KEY,
DRIVE_BAY_LIGHT_STATUS,
SUPPORTS_IDENTIFY_KEY,
SUPPORTS_IDENTIFY_STATUS_KEY,
)
from .enums import ControllerModels
from .slot_mappings import get_nvme_slot_info
RE_SLOT = re.compile(r'^0-([0-9]+)$')
def fake_nvme_enclosure(model, num_of_nvme_slots, mapped, ui_info=None):
"""This function takes the nvme devices that been mapped
to their respective slots and then creates a "fake" enclosure
device that matches (similarly) to what our real enclosure
mapping code does (map_enclosures()). It's _VERY_ important
that the keys in the `fake_enclosure` dictionary exist because
our generic enclosure mapping logic expects certain top-level
keys.
Furthermore, we generate DMI (SMBIOS) information for this
"fake" enclosure because our enclosure mapping logic has to have
a guaranteed unique key for each enclosure so it can properly
map the disks accordingly
"""
# TODO: The `fake_enclosure` object should be removed from this
# function and should be generated by the
# `plugins.enclosure_/enclosure_class.py:Enclosure` class so we
# can get rid of duplicate logic in this module and in that class
dmi = f'{model.lower()}_nvme_enclosure'
fake_enclosure = {
'id': dmi,
'dmi': dmi,
'model': model,
'should_ignore': False,
'sg': None,
'bsg': None,
'name': f'{model} NVMe Enclosure',
'controller': True,
'status': ['OK'],
'elements': {'Array Device Slot': {}}
}
if ui_info is not None:
fake_enclosure.update(ui_info)
disks_map = get_nvme_slot_info(model)
if not disks_map:
fake_enclosure['should_ignore'] = True
return [fake_enclosure]
for slot in range(1, num_of_nvme_slots + 1):
device = mapped.get(slot, None)
# the `value_raw` variables represent the
# value they would have if a device was
# inserted into a proper SES device (or not).
# Since this is NVMe (which deals with PCIe)
# that paradigm doesn't exist per se but we're
# "faking" a SES device, hence the hex values.
# The `status` variables use same logic.
if device is not None:
status = 'OK'
value_raw = 0x1000000
else:
status = 'Not installed'
value_raw = 0x5000000
mapped_slot = disks_map['versions']['DEFAULT']['id'][dmi][slot]['mapped_slot']
light = disks_map['versions']['DEFAULT']['id'][dmi][slot][SUPPORTS_IDENTIFY_KEY]
dfk = disks_map['versions']['DEFAULT']['id'][dmi][slot][DISK_FRONT_KEY]
drk = disks_map['versions']['DEFAULT']['id'][dmi][slot][DISK_REAR_KEY]
dtk = disks_map['versions']['DEFAULT']['id'][dmi][slot][DISK_TOP_KEY]
dik = disks_map['versions']['DEFAULT']['id'][dmi][slot][DISK_INTERNAL_KEY]
# light_status will follow light unless we explicitedly override
light_status = disks_map['versions']['DEFAULT']['id'][dmi][slot].get(SUPPORTS_IDENTIFY_STATUS_KEY, light)
if light_status:
# Currently do not have an nvme platform that supports retrieving IDENT status
raise NotImplementedError
else:
led = None
fake_enclosure['elements']['Array Device Slot'][mapped_slot] = {
'descriptor': f'Disk #{slot}',
'status': status,
'value': None,
'value_raw': value_raw,
'dev': device,
SUPPORTS_IDENTIFY_KEY: light,
DRIVE_BAY_LIGHT_STATUS: led,
DISK_FRONT_KEY: dfk,
DISK_REAR_KEY: drk,
DISK_TOP_KEY: dtk,
DISK_INTERNAL_KEY: dik,
'original': {
'enclosure_id': dmi,
'enclosure_sg': None,
'enclosure_bsg': None,
'descriptor': f'slot{slot}',
'slot': slot,
}
}
return [fake_enclosure]
def map_plx_nvme(model, ctx):
num_of_nvme_slots = 4 # nvme plx bridge used on m50/60 and r50bm have 4 nvme drive bays
addresses_to_slots = {
(slot / 'address').read_text().strip(): slot.name
for slot in pathlib.Path('/sys/bus/pci/slots').iterdir()
}
mapped = dict()
for i in filter(lambda x: x.attributes.get('path') == b'\\_SB_.PC03.BR3A', ctx.list_devices(subsystem='acpi')):
try:
physical_node = Devices.from_path(ctx, f'{i.sys_path}/physical_node')
except DeviceNotFoundAtPathError:
# happens when there are no rear-nvme drives plugged in
pass
else:
for child in physical_node.children:
if child.properties.get('SUBSYSTEM') != 'block':
continue
try:
controller_sys_name = child.parent.parent.sys_name
except AttributeError:
continue
if (slot := addresses_to_slots.get(controller_sys_name.split('.')[0])) is None:
continue
if not (m := re.match(RE_SLOT, slot)):
continue
slot = int(m.group(1))
if model == 'R50BM':
# When adding this code and testing on internal R50BM, the starting slot
# number for the rear nvme drive bays starts at 2 and goes to 5. This means
# we're always off by 1. The easiest solution is to just check for this
# specific platform and subtract 1 from the slot number to keep everything
# in check.
# To make things event more complicated, we found (by testing on internal hardware)
# that slot 2 on OS is actually slot 3 and vice versa. This means we need to swap
# those 2 numbers with each other to keep the webUI lined up with reality.
slot -= 1
if slot == 2:
slot = 3
elif slot == 3:
slot = 2
mapped[slot] = child.sys_name
return fake_nvme_enclosure(model, num_of_nvme_slots, mapped)
def map_r50_or_r50b(model, ctx):
num_of_nvme_slots = 3 if model == 'R50' else 2 # r50 has 3 rear nvme slots, r50b has 2
if model == 'R50':
acpihandles = {b'\\_SB_.PC00.RP01.PXSX': 3, b'\\_SB_.PC01.BR1A.OCL0': 1, b'\\_SB_.PC01.BR1B.OCL1': 2}
else:
acpihandles = {b'\\_SB_.PC03.BR3A': 2, b'\\_SB_.PC00.RP01.PXSX': 1}
mapped = dict()
for i in filter(lambda x: x.attributes.get('path') in acpihandles, ctx.list_devices(subsystem='acpi')):
acpi_handle = i.attributes.get('path')
try:
phys_node = Devices.from_path(ctx, f'{i.sys_path}/physical_node')
except DeviceNotFoundAtPathError:
break
slot = acpihandles[acpi_handle]
for nvme in filter(lambda x: x.sys_name.startswith('nvme') and x.subsystem == 'block', phys_node.children):
mapped[slot] = nvme.sys_name
break
else:
mapped[slot] = None
if len(mapped) == num_of_nvme_slots:
# there can be (and often is) TONS of acpi devices on
# any given system so once we've mapped the total # of
# nvme drives, we break out early as to be as efficient
# as possible
break
return fake_nvme_enclosure(model, num_of_nvme_slots, mapped)
def map_r30_or_fseries(model, ctx):
nvmes = {}
for i in ctx.list_devices(subsystem='nvme'):
for namespace_dev in i.children:
if namespace_dev.device_type != 'disk':
continue
try:
# i.parent.sys_name looks like 0000:80:40.0
# namespace_dev.sys_name looks like nvme1n1
nvmes[i.parent.sys_name[:-2]] = namespace_dev.sys_name
except (IndexError, AttributeError):
continue
# the keys in this dictionary are the physical pcie slot ids
# and the values are the slots that the webUI uses to map them
# to their physical locations in a human manageable way
if model == ControllerModels.R30.value:
webui_map = {
'27': 1, '26': 7, '25': 2, '24': 8,
'37': 3, '36': 9, '35': 4, '34': 10,
'45': 5, '47': 11, '40': 6, '41': 12,
'38': 14, '39': 16, '43': 13, '44': 15,
}
num_of_nvme_slots = len(webui_map)
else:
# f-series vendor is nice to us and nvme phys slots start at 1
# and increment in a human readable way already
webui_map = {
'1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6,
'7': 7, '8': 8, '9': 9, '10': 10, '11': 11, '12': 12,
'13': 13, '14': 14, '15': 15, '16': 16, '17': 17, '18': 18,
'19': 19, '20': 20, '21': 21, '22': 22, '23': 23, '24': 24,
}
num_of_nvme_slots = len(webui_map)
mapped = {}
for i in pathlib.Path('/sys/bus/pci/slots').iterdir():
addr = (i / 'address').read_text().strip()
if (nvme := nvmes.get(addr, None)) and (mapped_slot := webui_map.get(i.name, None)):
mapped[mapped_slot] = nvme
ui_info = {
'rackmount': True,
'top_loaded': False,
'front_slots': num_of_nvme_slots,
'rear_slots': 0,
'internal_slots': 0
}
return fake_nvme_enclosure(model, num_of_nvme_slots, mapped, ui_info)
def map_nvme():
model = parse_dmi().system_product_name.removeprefix('TRUENAS-')
model = model.removesuffix('-S').removesuffix('-HA')
ctx = Context()
if model in (
ControllerModels.R50.value,
ControllerModels.R50B.value,
):
return map_r50_or_r50b(model, ctx)
elif model in (
ControllerModels.R30.value,
ControllerModels.F60.value,
ControllerModels.F100.value,
ControllerModels.F130.value,
):
# all nvme systems which we need to handle separately
return map_r30_or_fseries(model, ctx)
elif model in (
ControllerModels.M30.value,
ControllerModels.M40.value,
ControllerModels.M50.value,
ControllerModels.M60.value,
ControllerModels.R50BM.value,
):
return map_plx_nvme(model, ctx)
else:
return []
| 10,748 | Python | .py | 246 | 33.947154 | 115 | 0.587083 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,681 | enclosure_class.py | truenas_middleware/src/middlewared/middlewared/plugins/enclosure_/enclosure_class.py | # Copyright (c) - iXsystems Inc.
#
# Licensed under the terms of the TrueNAS Enterprise License Agreement
# See the file LICENSE.IX for complete terms and conditions
import logging
from middlewared.utils.scsi_generic import inquiry
from ixhardware import parse_dmi
from .constants import (
MINI_MODEL_BASE,
MINIR_MODEL_BASE,
SYSFS_SLOT_KEY,
MAPPED_SLOT_KEY,
SUPPORTS_IDENTIFY_KEY,
SUPPORTS_IDENTIFY_STATUS_KEY,
DISK_FRONT_KEY,
DISK_REAR_KEY,
DISK_TOP_KEY,
DISK_INTERNAL_KEY,
DRIVE_BAY_LIGHT_STATUS,
)
from .element_types import ELEMENT_TYPES, ELEMENT_DESC
from .enums import ControllerModels, ElementDescriptorsToIgnore, ElementStatusesToIgnore, JbodModels
from .sysfs_disks import map_disks_to_enclosure_slots
from .slot_mappings import get_slot_info
logger = logging.getLogger(__name__)
class Enclosure:
def __init__(self, bsg, sg, enc_stat):
self.dmi = parse_dmi()
self.bsg, self.sg, self.pci, = bsg, sg, bsg.removeprefix('/dev/bsg/')
self.encid, self.status = enc_stat['id'], list(enc_stat['status'])
self.vendor, self.product, self.revision, self.encname = self._get_vendor_product_revision_and_encname()
self._get_model_and_controller()
self._should_ignore_enclosure()
self.sysfs_map, self.disks_map, self.elements = dict(), dict(), dict()
if not self.should_ignore:
self.sysfs_map = map_disks_to_enclosure_slots(self)
self.disks_map = self._get_array_device_mapping_info()
self.elements = self._parse_elements(enc_stat['elements'])
def asdict(self):
"""This method is what is returned in enclosure2.query"""
return {
'should_ignore': self.should_ignore, # enclosure device we dont need or expect
'name': self.encname, # vendor, product and revision joined by whitespace
'model': self.model, # M60, F100, MINI-R, etc
'controller': self.controller, # if True, represents the "head-unit"
'dmi': self.dmi.system_product_name,
'status': self.status, # the overall status reported by the enclosure
'id': self.encid,
'vendor': self.vendor, # t10 vendor from INQUIRY
'product': self.product, # product from INQUIRY
'revision': self.revision, # revision from INQUIRY
'bsg': self.bsg, # the path for which this maps to a bsg device (/dev/bsg/0:0:0:0)
'sg': self.sg, # the scsi generic device (/dev/sg0)
'pci': self.pci, # the pci info (0:0:0:0)
'rackmount': self.rackmount, # requested by UI team
'top_loaded': self.top_loaded, # requested by UI team
'top_slots': self.top_slots, # requested by UI team
'front_loaded': self.front_loaded, # requested by UI team
'front_slots': self.front_slots, # requested by UI team
'rear_slots': self.rear_slots, # requested by UI team
'internal_slots': self.internal_slots, # requested by UI team
'elements': self.elements # dictionary with all element types and their relevant information
}
def _should_ignore_enclosure(self):
if not self.model:
# being unable to determine the model means many other things will not work
self.should_ignore = True
elif all((
(not any((self.is_r20_series, self.is_mini))),
self.vendor == 'AHCI',
self.product == 'SGPIOEnclosure',
)):
# if this isn't an R20 or MINI platform and this is the Virtual AHCI
# enclosure, then we can ignore them
self.should_ignore = True
elif self.encid == '3000000000000002' and any((
self.is_r20_series,
(self.model in (
ControllerModels.MINI3XP.value,
ControllerModels.MINI3E.value,
)),
)):
# If this platform is a R20*, a MINI-3.0-X+, or MINI-3.0-E, there are
# 2x Virtual AHCI enclosure devices. However, the physical drive slots
# only get mapped to the Virtual AHCI enclosure of the 1st one. (i.e.
# the one whose enclosure id is "3000000000000001"). So we ignore the
# other enclosure device otherwise.
self.should_ignore = True
else:
self.should_ignore = False
def _get_vendor_product_revision_and_encname(self):
"""Sends a standard INQUIRY command to the enclosure device
so we can parse the vendor/prodcut/revision(and /serial if we ever wanted
to use that information) for the enclosure device. It's important
that we parse this information into their own top-level keys since we
base some of our drive mappings (potentially) on the "revision" (aka firmware)
for the enclosure
"""
inq = inquiry(self.sg)
data = [inq['vendor'], inq['product'], inq['revision']]
data.append(' '.join(data))
return data
def _get_model_and_controller(self):
"""This determines the model and whether or not this a controller enclosure.
The term "controller" refers to the enclosure device where the TrueNAS OS
is installed (sometimes referred to as the head-unit). We check 2 different
values to determine the model/controller.
1. We check SMBIOS DMI type "system" buffer, specifically the product name
2. We check the t10 vendor and product strings returned from the enclosure
using a standard inquiry command
"""
spn = self.dmi.system_product_name
model = spn.removeprefix('TRUENAS-').removeprefix('FREENAS-')
model = model.removesuffix('-HA').removesuffix('-S')
try:
dmi_model = ControllerModels[model]
except KeyError:
try:
# the member names of this enum just so happen to line
# up with the string we get from DMI, however, the MINIs
# get flashed with strings that have invalid characters
# for members of an enum. If we get here, then we change
# to using the parenthesis approach because that matches
# an entry in the enum by value
dmi_model = ControllerModels(model)
except ValueError:
# this shouldn't ever happen because the instantiator of this class
# checks DMI before we even get here but better safe than sorry
logger.warning('Unexpected model: %r from dmi: %r', model, spn)
self.model = ''
self.controller = False
return
t10vendor_product = f'{self.vendor}_{self.product}'
match t10vendor_product:
case 'ECStream_4024Sp' | 'ECStream_4024Ss' | 'iX_4024Sp' | 'iX_4024Ss':
# M series
self.model = dmi_model.value
self.controller = True
case 'CELESTIC_P3215-O' | 'CELESTIC_P3217-B':
# X series
self.model = dmi_model.value
self.controller = True
case 'BROADCOM_VirtualSES':
# H series
self.model = dmi_model.value
self.controller = True
case 'ECStream_FS1' | 'ECStream_FS2' | 'ECStream_DSS212Sp' | 'ECStream_DSS212Ss':
# R series
self.model = dmi_model.value
self.controller = True
case 'iX_FS1L' | 'iX_FS2' | 'iX_DSS212Sp' | 'iX_DSS212Ss':
# more R series
self.model = dmi_model.value
self.controller = True
case 'iX_TrueNASR20p' | 'iX_2012Sp' | 'iX_TrueNASSMCSC826-P':
# R20
self.model = dmi_model.value
self.controller = True
case 'AHCI_SGPIOEnclosure':
# R20 variants or MINIs
self.model = dmi_model.value
self.controller = True
case 'iX_eDrawer4048S1' | 'iX_eDrawer4048S2':
# R50
self.model = dmi_model.value
self.controller = True
case 'CELESTIC_X2012' | 'CELESTIC_X2012-MT':
self.model = JbodModels.ES12.value
self.controller = False
case 'ECStream_4024J' | 'iX_4024J':
self.model = JbodModels.ES24.value
self.controller = False
case 'ECStream_2024Jp' | 'ECStream_2024Js' | 'iX_2024Jp' | 'iX_2024Js':
self.model = JbodModels.ES24F.value
self.controller = False
case 'CELESTIC_R0904-F0001-01':
self.model = JbodModels.ES60.value
self.controller = False
case 'HGST_H4060-J':
self.model = JbodModels.ES60G2.value
self.controller = False
case 'WDC_UData60':
self.model = JbodModels.ES60G3.value
self.controller = False
case 'HGST_H4102-J':
self.model = JbodModels.ES102.value
self.controller = False
case 'VikingES_NDS-41022-BB' | 'VikingES_VDS-41022-BB':
self.model = JbodModels.ES102G2.value
self.controller = False
case _:
logger.warning(
'Unexpected t10 vendor: %r and product: %r combination',
self.vendor, self.product
)
self.model = ''
self.controller = False
def _ignore_element(self, parsed_element_status, element):
"""We ignore certain elements reported by the enclosure, for example,
elements that report as unsupported. Our alert system polls enclosures
for elements that report "bad" statuses and these elements need to be
ignored. NOTE: every hardware platform is different for knowing which
elements are to be ignored"""
desc = element['descriptor'].lower()
return any((
(parsed_element_status.lower() == ElementStatusesToIgnore.UNSUPPORTED.value),
(self.is_xseries and desc == ElementDescriptorsToIgnore.ADISE0.value),
(self.model == JbodModels.ES60.value and desc == ElementDescriptorsToIgnore.ADS.value),
(not self.is_hseries and desc in (
ElementDescriptorsToIgnore.EMPTY.value,
ElementDescriptorsToIgnore.AD.value,
ElementDescriptorsToIgnore.DS.value,
)),
))
def _get_array_device_mapping_info(self):
mapped_info = get_slot_info(self)
if not mapped_info:
return
# we've gotten the disk mapping information based on the
# enclosure but we need to check if this enclosure has
# different revisions
vers_key = 'DEFAULT'
if not mapped_info['any_version']:
for key, vers in mapped_info['versions'].items():
if self.dmi.system_version == key:
vers_key = vers
break
# Now we need to check this specific enclosure's disk slot
# mapping information
idkey, idvalue = 'model', self.model
if all((
self.vendor == 'AHCI',
self.product == 'SGPIOEnclosure',
any((self.is_mini, self.is_r20_series))
)):
idkey, idvalue = 'id', self.encid
elif self.is_r50_series:
idkey, idvalue = 'product', self.product
# Now we know the specific enclosure we're on and the specific
# key we need to use to pull out the drive slot mapping
for mapkey, mapslots in mapped_info['versions'][vers_key].items():
if mapkey == idkey and (found := mapslots.get(idvalue)):
return found
def _parse_elements(self, elements):
final = {}
disk_position_mapping = self.determine_disk_slot_positions()
for slot, element in elements.items():
try:
element_type = ELEMENT_TYPES[element['type']]
except KeyError:
# means the element type that's being
# reported to us is unknown so log it
# and continue on
logger.warning('Unknown element type: %r for %r', element['type'], self.devname)
continue
try:
element_status = ELEMENT_DESC[element['status'][0]]
except KeyError:
# means the elements status reported by the enclosure
# is not mapped so just report unknown
element_status = 'UNKNOWN'
if self._ignore_element(element_status, element):
continue
if element_type[0] not in final:
# first time seeing this element type so add it
final[element_type[0]] = {}
# convert list of integers representing the elements
# raw status to an integer so it can be converted
# appropriately based on the element type
value_raw = 0
for val in element['status']:
value_raw = (value_raw << 8) + val
mapped_slot = slot
parsed = {
'descriptor': element['descriptor'].strip(),
'status': element_status,
'value': element_type[1](value_raw),
'value_raw': value_raw,
}
if element_type[0] == 'Array Device Slot' and self.disks_map:
try:
dinfo = self.disks_map[slot]
sysfs_slot = dinfo[SYSFS_SLOT_KEY]
parsed['dev'] = self.sysfs_map[sysfs_slot].name
except KeyError:
# this happens on some of the MINI platforms, for example,
# the MINI-3.0-XL+ because we map the 1st drive and only
# the 1st drive from the Virtual AHCI controller with id
# that ends with 002. However, we send a standard enclosure
# diagnostics command so all the other elements will return
continue
# does this enclosure slot support identification? (i.e. lighting up LED)
parsed[SUPPORTS_IDENTIFY_KEY] = dinfo[SUPPORTS_IDENTIFY_KEY]
# does this enclosure slot support reporting identification status?
# (i.e. whether the LED is currently lit up)
if dinfo.get(SUPPORTS_IDENTIFY_STATUS_KEY, parsed[SUPPORTS_IDENTIFY_KEY]):
parsed[DRIVE_BAY_LIGHT_STATUS] = self.sysfs_map[sysfs_slot].locate
else:
parsed[DRIVE_BAY_LIGHT_STATUS] = None
mapped_slot = dinfo[MAPPED_SLOT_KEY]
# is this a front, rear or internal slot?
parsed.update(disk_position_mapping.get(mapped_slot, dict()))
parsed['original'] = {
'enclosure_id': self.encid,
'enclosure_sg': self.sg,
'enclosure_bsg': self.bsg,
'descriptor': f'slot{sysfs_slot}',
'slot': sysfs_slot,
}
final[element_type[0]].update({mapped_slot: parsed})
return final
@property
def model(self):
return self.__model
@model.setter
def model(self, val):
self.__model = val
@property
def controller(self):
return self.__controller
@controller.setter
def controller(self, val):
self.__controller = val
@property
def should_ignore(self):
"""This property serves as an easy way to determine if the enclosure
that we're parsing meets a certain set of criteria. If the criteria
is not met, then we set this value to False so that we can short-circuit
some of the parsing logic as well as provide a value to any caller of
this class to more easily apply filters as necessary.
"""
return self.__ignore
@should_ignore.setter
def should_ignore(self, val):
self.__ignore = val
@property
def is_jbod(self):
"""Determine if the enclosure device is a JBOD
(just a bunch of disks) unit.
Args:
Returns: bool
"""
return self.model in (i.value for i in JbodModels)
@property
def is_rseries(self):
"""Determine if the enclosure device is a r-series controller.
Args:
Returns: bool
"""
return all((self.controller, self.model and self.model[0] == 'R'))
@property
def is_r10(self):
"""Determine if the enclosure device is a r10 controller.
Args:
Returns: bool
"""
return all((
self.is_rseries,
(self.model == ControllerModels.R10.value),
))
@property
def is_r20_series(self):
"""Determine if the enclosure device is a r20-series controller.
Args:
Returns: bool
"""
return all((
self.is_rseries,
self.model.startswith((
ControllerModels.R20.value,
ControllerModels.R20A.value,
ControllerModels.R20B.value,
))
))
@property
def is_r30(self):
"""Determine if the enclosure device is a r30 controller.
Args:
Returns: bool
"""
return all((
self.is_rseries,
(self.model == ControllerModels.R30.value),
))
@property
def is_r40(self):
"""Determine if the enclosure device is a r40 controller.
Args:
Returns: bool
"""
return all((
self.is_rseries,
(self.model == ControllerModels.R40.value),
))
@property
def is_r50_series(self):
"""Determine if the enclosure device is a r50-series controller.
Args:
Returns: bool
"""
return all((
self.is_rseries,
self.model.startswith((
ControllerModels.R50.value,
ControllerModels.R50B.value,
ControllerModels.R50BM.value,
))
))
@property
def is_fseries(self):
"""Determine if the enclosure device is a f-series controller.
Args:
Returns: bool
"""
return all((self.controller, self.model and self.model[0] == 'F'))
@property
def is_hseries(self):
"""Determine if the enclosure device is a h-series controller.
Args:
Returns: bool
"""
return all((self.controller, self.model and self.model[0] == 'H'))
@property
def is_mseries(self):
"""Determine if the enclosure device is a m-series controller.
Args:
Returns: bool
"""
return all((
self.controller, not self.is_mini, self.model and self.model[0] == 'M'
))
@property
def is_xseries(self):
"""Determine if the enclosure device is a x-series controller.
Args:
Returns: bool
"""
return all((
self.controller, self.model and self.model[0] == 'X'
))
@property
def is_mini(self):
"""Determine if the enclosure device is a mini-series controller.
Args:
Returns: bool
"""
return all((
self.controller, self.model.startswith(MINI_MODEL_BASE)
))
@property
def is_mini_3e(self):
"""Determine if the enclosure device is a MINI-3.0-E.
Args:
Returns: bool
"""
return all((
self.is_mini,
self.model == ControllerModels.MINI3E.value
))
@property
def is_mini_3e_plus(self):
"""Determine if the enclosure device is a MINI-3.0-E+.
Args:
Returns: bool
"""
return all((
self.is_mini,
self.model == ControllerModels.MINI3EP.value
))
@property
def is_mini_3_x(self):
"""Determine if the enclosure device is a MINI-3.0-X.
Args:
Returns: bool
"""
return all((
self.is_mini,
self.model == ControllerModels.MINI3X.value
))
@property
def is_mini_3_x_plus(self):
"""Determine if the enclosure device is a MINI-3.0-X+.
Args:
Returns: bool
"""
return all((
self.is_mini,
self.model == ControllerModels.MINI3XP.value
))
@property
def is_mini_3_xl_plus(self):
"""Determine if the enclosure device is a MINI-3.0-XL+.
Args:
Returns: bool
"""
return all((
self.is_mini,
self.model == ControllerModels.MINI3XLP.value
))
@property
def is_mini_r(self):
"""Determine if the enclosure device is a mini-r-series controller.
Args:
Returns: bool
"""
return all((self.is_mini, self.model.startswith(MINIR_MODEL_BASE)))
@property
def is_12_bay_jbod(self):
"""Determine if the enclosure device is a 12 bay JBOD.
Args:
Returns: bool
"""
return all((
self.is_jbod,
(self.model == JbodModels.ES12.value),
))
@property
def is_24_bay_jbod(self):
"""Determine if the enclosure device is a 24 bay JBOD.
Args:
Returns: bool
"""
return all((
self.is_jbod,
self.model in (
JbodModels.ES24.value,
JbodModels.ES24F.value,
)
))
@property
def is_60_bay_jbod(self):
"""Determine if the enclosure device is a 60 bay JBOD.
Args:
Returns: bool
"""
return all((
self.is_jbod,
self.model in (
JbodModels.ES60.value,
JbodModels.ES60G2.value,
JbodModels.ES60G3.value,
)
))
@property
def is_102_bay_jbod(self):
"""Determine if the enclosure device is a 102 bay JBOD.
Args:
Returns: bool
"""
return all((
self.is_jbod,
self.model in (
JbodModels.ES102.value,
JbodModels.ES102G2.value,
)
))
@property
def rackmount(self):
"""Determine if the enclosure device is a rack mountable unit.
Args:
Returns: bool
"""
return any((
self.is_jbod,
self.is_mini_r,
self.is_rseries,
self.is_fseries,
self.is_hseries,
self.is_mseries,
self.is_xseries,
))
@property
def top_loaded(self):
"""Determine if the enclosure device has its disk slots loaded
from the top.
Args:
Returns: bool
"""
return any((
self.is_r50_series,
self.is_60_bay_jbod,
self.is_102_bay_jbod
))
@property
def top_slots(self):
"""Determine the total number of top drive bays.
Args:
Returns: int
"""
if self.top_loaded:
if self.is_r50_series:
return 48
elif self.is_60_bay_jbod:
return 60
elif self.is_102_bay_jbod:
return 102
else:
return 0
return 0
@property
def front_loaded(self):
"""Determine if the enclosure device has its disk slots loaded
from the front.
Args:
Returns: bool
"""
return any((
self.is_xseries,
self.is_r30,
self.is_r40,
self.is_12_bay_jbod,
self.is_r20_series,
self.is_hseries,
self.is_r10,
self.is_fseries,
self.is_mseries,
self.is_mini,
self.is_24_bay_jbod
))
@property
def front_slots(self):
"""Determine the total number of front drive bays.
Args:
Returns: int
"""
if self.front_loaded:
if any((self.is_mini_3e, self.is_mini_3e_plus)):
return 6
elif any((self.is_mini_3_x, self.is_mini_3_x_plus)):
return 7
elif self.is_mini_3_xl_plus:
return 10
elif any((self.is_mini_r, self.is_hseries, self.is_xseries, self.is_r30, self.is_12_bay_jbod)):
return 12
elif self.is_r20_series:
return 14
elif self.is_r10:
return 16
elif any((self.is_fseries, self.is_mseries, self.is_24_bay_jbod)):
return 24
elif self.is_rseries:
return 48
else:
return 0
return 0
@property
def rear_slots(self):
"""Determine the total number of rear drive bays.
Args:
Returns: int
"""
if not self.model:
return 0
elif self.model == ControllerModels.R50B.value:
return 2
elif self.model == ControllerModels.R50.value:
return 3
elif self.model in (
ControllerModels.M50.value,
ControllerModels.M60.value,
ControllerModels.R50BM.value,
):
return 4
else:
return 0
@property
def internal_slots(self):
"""Determine the total number of internal drive bays.
Args:
Returns: int
"""
return 4 if self.is_r30 else 0
def determine_disk_slot_positions(self):
"""Determine the disk slot positions in the enclosure.
Is this a front slot, rear slot or internal slot?
NOTE: requested by UI team so that when a user clicks on
a slot in the UI for a given enclosure, it will update the
picture to the rear of the machine if the slot chosen is
a rear slot (ditto for internal or front slots)
Args:
Returns: dict
"""
fs, rs, ins, ts = self.front_slots, self.rear_slots, self.internal_slots, self.top_slots
has_rear = has_internal = False
has_front = has_top = False
if fs:
has_front = True
total = fs
elif ts:
has_top = True
total = ts
else:
# huh? shouldn't happen
return dict()
if rs:
has_rear = True
total += rs
elif ins:
# NOTE: only 1 platform has internal slots
# and it DOES NOT have rear slots. If we
# ever have a platform that has both rear
# AND internal slots, this logic wont work
# and we'll need to fix it
has_internal = True
total += ins
rv = dict()
for slot in range(1, total + 1):
rv[slot] = {
DISK_FRONT_KEY: True if has_front and slot <= fs else False,
DISK_TOP_KEY: True if has_top and slot <= ts else False,
DISK_REAR_KEY: True if has_rear and slot > (fs or ts) else False,
DISK_INTERNAL_KEY: True if has_internal and slot > (fs or ts) else False,
}
return rv
| 27,577 | Python | .py | 714 | 27.372549 | 112 | 0.562323 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,682 | fseries_drive_identify.py | truenas_middleware/src/middlewared/middlewared/plugins/enclosure_/fseries_drive_identify.py | # Copyright (c) - iXsystems Inc.
#
# Licensed under the terms of the TrueNAS Enterprise License Agreement
# See the file LICENSE.IX for complete terms and conditions
from subprocess import run, PIPE, STDOUT
from middlewared.service_exception import CallError
class InsufficientPrivilege(Exception):
pass
def get_cmd(slot, status):
base = [
'ipmi-raw',
'0x0', # LUN
'0x3c', # NETFN
None, # CMD
'0x01', # SUBCMD
slot, # slot to perform action upon
None, # ACTION
]
final = []
if status in ('OFF', 'CLEAR'):
# command to clear identify led (blue)
clear_ident = base[:]
clear_ident[-4] = '0x22'
clear_ident[-1] = '0x00'
final.append(clear_ident[:])
# command to clear fault led (yellow)
clear_fault = base[:]
clear_fault[-4] = '0x39'
clear_fault[-1] = '0x00'
final.append(clear_fault[:])
elif status in ('ON', 'IDENT', 'IDENTIFY'):
# turn blue led on
ident = base[:]
ident[-4] = '0x22'
ident[-1] = '0x01'
final.append(ident[:])
elif status == 'FAULT':
# turn yellow led on
fault = base[:]
fault[-4] = '0x39'
fault[-1] = '0x01'
final.append(fault[:])
else:
raise ValueError(f'Invalid status: {status!r}')
return final
def set_slot_status(slot, status):
"""Will send a command `status` to toggle the LED drive bay for a given `slot`"""
if slot < 1 or slot > 24:
raise ValueError(f'Invalid slot: {slot!r}')
for cmd in get_cmd(hex(slot), status):
ret = run(cmd, stdout=PIPE, stderr=STDOUT)
if ret.returncode != 0:
raise CallError(f'Failed to run {cmd!r}: {ret.stderr.decode()}')
elif ret.stdout.decode().strip().split()[-1].lower() == 'd4':
raise InsufficientPrivilege()
| 1,908 | Python | .py | 55 | 27.527273 | 85 | 0.585776 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,683 | constants.py | truenas_middleware/src/middlewared/middlewared/plugins/enclosure_/constants.py | SYSFS_SLOT_KEY = 'sysfs_slot'
MAPPED_SLOT_KEY = 'mapped_slot'
SUPPORTS_IDENTIFY_KEY = 'supports_identify_light'
SUPPORTS_IDENTIFY_STATUS_KEY = 'supports_identify_light_status'
DRIVE_BAY_LIGHT_STATUS = 'drive_bay_light_status'
MINI_MODEL_BASE = 'MINI'
MINIR_MODEL_BASE = f'{MINI_MODEL_BASE}-R'
HEAD_UNIT_DISK_SLOT_START_NUMBER = 1
DISK_FRONT_KEY = 'is_front'
DISK_REAR_KEY = 'is_rear'
DISK_TOP_KEY = 'is_top'
DISK_INTERNAL_KEY = 'is_internal'
| 442 | Python | .py | 12 | 35.833333 | 63 | 0.753488 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,684 | element_types.py | truenas_middleware/src/middlewared/middlewared/plugins/enclosure_/element_types.py | from .enums import ElementStatus, ElementType
def alarm(value_raw):
"""See SES-4 7.3.8 Audible Alarm element, Table 98 — Audible Alarm status element
Returns a comma-separated string for each alarm bit set or None otherwise
"""
values = {
'Identify on': (value_raw >> 16) & 0x80,
'Fail on': (value_raw >> 16) & 0x40,
'RQST mute': value_raw & 0x80,
'Muted': value_raw & 0x40,
'Remind': value_raw & 0x10,
'INFO': value_raw & 0x08,
'NON-CRIT': value_raw & 0x04,
'CRIT': value_raw & 0x02,
'UNRECOV': value_raw & 0x01,
}
if (result := [k for k, v in values.items() if v]):
return ', '.join(result)
def comm(value_raw):
"""See SES-4 7.3.19 Communication Port element, Table 140 — Communication Port status element
Returns a comma-separated string for each comm port bit set or None otherwise
"""
values = {
'Identify on': (value_raw >> 16) & 0x80,
'Fail on': (value_raw >> 16) & 0x40,
'Disabled': value_raw & 0x01,
}
if (result := [k for k, v in values.items() if v]):
return ', '.join(result)
def current(value_raw):
"""See SES-4 7.3.21 Current Sensor element, Table 148 — Current Sensor status element
Returns a comma-separated string for each current sensor bit set or None otherwise
"""
values = {
'Identify on': (value_raw >> 16) & 0x80,
'Fail on': (value_raw >> 16) & 0x40,
'Warn over': (value_raw >> 16) & 0x8,
'Crit over': (value_raw >> 16) & 0x2,
}
return ', '.join([f'{(value_raw & 0xffff) / 100}A'] + [k for k, v in values.items() if v])
def enclosure(value_raw):
"""See SES-4 7.3.16 Enclosure element, Table 130 — Enclosure status element
Returns a comma-separated string for each status bit set as well as the
time until power cycle and the requested time to be powered off. Otherwise
if no bits are set, it will return None
"""
values = {
'Identify on': (value_raw >> 16) & 0x80,
'Fail on': (value_raw >> 8) & 0x02,
'Warn on': (value_raw >> 8) & 0x01,
'RQST fail': value_raw & 0x02,
'RQST warn': value_raw & 0x01,
}
result = [k for k, v in values.items() if v]
if (pctime := (value_raw >> 10) & 0x3f):
pctime = f'Power cycle {pctime}min'
potime = (value_raw >> 2) & 0x3f
if potime == 0:
potime = ', power off until manually restored'
else:
potime = f', power off for {potime}min'
result.append(f'{pctime}{potime}')
return ', '.join(result) or None
def volt(value_raw):
"""See SES-4 7.3.20 Voltage Sensor element, Table 144 — Voltage Sensor status element
Returns a comma-separated string for each voltage sensor bit set as well as the
current voltage being reported. If no voltage sensor bit is set, will return
the calculated voltage. (In Volts)
"""
values = {
'Identify on': (value_raw >> 16) & 0x80,
'Fail on': (value_raw >> 16) & 0x40,
'Warn over': (value_raw >> 16) & 0x8,
'Warn under': (value_raw >> 16) & 0x4,
'Crit over': (value_raw >> 16) & 0x2,
'Crit under': (value_raw >> 16) & 0x1,
}
return ', '.join([f'{((value_raw & 0xffff) / 100)}V'] + [k for k, v in values.items() if v])
def cooling(value_raw):
"""See SES-4 7.3.5 Cooling element, Table 89 — Cooling status element
Returns the rotations per minute (RPM). NOTE: we only care about these
bits for our implementation
"""
return f'{(((value_raw & 0x7ff00) >> 8) * 10)} RPM'
def temp(value_raw):
"""See SES-4 7.3.6 Temperature Sensor element, Table 94 — Temperature Sensor status element
Returns a string of the calculated temperature (in celsius) for the given element. If the
calculated temperature is 0, it would imply -20C so we return None
"""
if (temp := (value_raw & 0xff00) >> 8):
# 8 bits represents -19C to +235C
# value of 0 would imply -20C
return f'{temp - 20}C'
def psu(value_raw):
"""See SES-4 7.3.4 Power Supply element, Table 86 — Power Supply status element
Returns a comma-separated string for each psu element sensor bit set or None otherwise
"""
values = {
'Identify on': (value_raw >> 16) & 0x80,
'Do not remove': (value_raw >> 16) & 0x40,
'DC overvoltage': (value_raw >> 8) & 0x8,
'DC undervoltage': (value_raw >> 8) & 0x4,
'DC overcurrent': (value_raw >> 8) & 0x2,
'Hot swap': value_raw & 0x80,
'Fail on': value_raw & 0x40,
'RQST on': value_raw & 0x20,
'Off': value_raw & 0x10,
'Overtemp fail': value_raw & 0x8,
'Overtemp warn': value_raw & 0x4,
'AC fail': value_raw & 0x2,
'DC fail': value_raw & 0x1,
}
return ', '.join([k for k, v in values.items() if v]) or None
def array_dev(value_raw):
"""See SES-4 7.3.3 Array Device Slot element, Table 84 — Array Device Slot status element
Returns a comma-separated string for each array device element sensor set or None otherwise
NOTE: SES-4 spec informs us of _many_ other bits that can be set but we only care about
the IDENT and FAULT REQSTD bits for our implementation
"""
values = {
'Identify on': (value_raw >> 8) & 0x2,
'Fault on': value_raw & 0x20,
}
return ', '.join([k for k, v in values.items() if v]) or None
def sas_conn(value_raw):
"""See SES-4 7.3.26 SAS Connector element, Table 158 — SAS Connector status element and
Table 159 — CONNECTOR TYPE field.
Returns a comma separated string specifying the connector type as well as returning
whether or not the FAIL bit is set
"""
conn_type = (value_raw >> 16) & 0x7f
values = {
0x0: 'No information',
0x1: 'SAS 4x receptacle (SFF-8470) [max 4 phys]',
0x2: 'Mini SAS 4x receptacle (SFF-8088) [max 4 phys]',
0x3: 'QSFP+ receptacle (SFF-8436) [max 4 phys]',
0x4: 'Mini SAS 4x active receptacle (SFF-8088) [max 4 phys]',
0x5: 'Mini SAS HD 4x receptacle (SFF-8644) [max 4 phys]',
0x6: 'Mini SAS HD 8x receptacle (SFF-8644) [max 8 phys]',
0x7: 'Mini SAS HD 16x receptacle (SFF-8644) [max 16 phys]',
0xf: 'Vendor specific external connector',
0x10: 'SAS 4i plug (SFF-8484) [max 4 phys]',
0x11: 'Mini SAS 4i receptacle (SFF-8087) [max 4 phys]',
0x12: 'Mini SAS HD 4i receptacle (SFF-8643) [max 4 phys]',
0x13: 'Mini SAS HD 8i receptacle (SFF-8643) [max 8 phys]',
0x14: 'Mini SAS HD 16i receptacle (SFF-8643) [max 16 phys]',
0x15: 'SlimSAS 4i (SFF-8654) [max 4 phys]',
0x16: 'SlimSAS 8i (SFF-8654) [max 8 phys]',
0x17: 'SAS MiniLink 4i (SFF-8612) [max 4 phys]',
0x18: 'SAS MiniLink 8i (SFF-8612) [max 8 phys]',
0x19: 'unknown internal wide connector type: 0x19',
0x20: 'SAS Drive backplane receptacle (SFF-8482) [max 2 phys]',
0x21: 'SATA host plug [max 1 phy]',
0x22: 'SAS Drive plug (SFF-8482) [max 2 phys]',
0x23: 'SATA device plug [max 1 phy]',
0x24: 'Micro SAS receptacle [max 2 phys]',
0x25: 'Micro SATA device plug [max 1 phy]',
0x26: 'Micro SAS plug (SFF-8486) [max 2 phys]',
0x27: 'Micro SAS/SATA plug (SFF-8486) [max 2 phys]',
0x28: '12 Gbit/s SAS Drive backplane receptacle (SFF-8680) [max 2 phys]',
0x29: '12 Gbit/s SAS Drive Plug (SFF-8680) [max 2 phys]',
0x2a: 'Multifunction 12 Gbit/s 6x Unshielded receptacle connector receptacle (SFF-8639) [max 6 phys]',
0x2b: 'Multifunction 12 Gbit/s 6x Unshielded receptacle connector plug (SFF-8639) [max 6 phys]',
0x2c: 'SAS Multilink Drive backplane receptacle (SFF-8630) [max 4 phys]',
0x2d: 'SAS Multilink Drive backplane plug (SFF-8630) [max 4 phys]',
0x2e: 'unknown internal connector to end device type: 0x2e',
0x2f: 'SAS virtual connector [max 1 phy]',
0x3f: 'Vendor specific internal connector',
0x40: 'SAS High Density Drive backplane receptacle (SFF-8631) [max 8 phys]',
0x41: 'SAS High Density Drive backplane plug (SFF-8631) [max 8 phys]',
}
values.update({i: f'unknown external connector type: {hex(i)}' for i in range(0x8, 0xf)})
values.update({i: f'reserved for internal connector type: {hex(i)}' for i in range(0x30, 0x3f)})
values.update({i: f'reserved connector type: {hex(i)}' for i in range(0x42, 0x70)})
values.update({i: f'vendor specific connector type: {hex(i)}' for i in range(0x70, 0x80)})
formatted = [values.get(conn_type, f'unexpected connector type: {hex(conn_type)}')]
if value_raw & 0x40:
formatted.append('Fail on')
return ', '.join(formatted)
def sas_exp(value_raw):
"""See SES-4 7.3.25 SAS Expander element, Table 156 — SAS Expander status element
Returns a comma separated string for each bit set or None otherwise
"""
values = {
'Identify on': (value_raw >> 16) & 0x80,
'Fail on': (value_raw >> 16) & 0x40,
}
return ', '.join([k for k, v in values.items() if v]) or None
# See SES-4 7.2.3 Status element format, Table 74 — ELEMENT STATUS CODE field
ELEMENT_DESC = {
0: ElementStatus.UNSUPPORTED.value,
1: ElementStatus.OK.value,
2: ElementStatus.CRITICAL.value,
3: ElementStatus.NONCRITICAL.value,
4: ElementStatus.UNRECOVERABLE.value,
5: ElementStatus.NOT_INSTALLED.value,
6: ElementStatus.UNKNOWN.value,
7: ElementStatus.NOT_AVAILABLE.value,
8: ElementStatus.NO_ACCESS_ALLOWED.value,
9: 'reserved [9]',
10: 'reserved [10]',
11: 'reserved [11]',
12: 'reserved [12]',
13: 'reserved [13]',
14: 'reserved [14]',
15: 'reserved [15]',
# getencstat on CORE reports these last 2 statuses on the X-series enclosure
# so while it's not in the spec, we'll just maintain backwards compatible
# behavior
17: 'OK, Swapped',
21: 'Not Installed, Swapped',
}
# See SES-4 7.1 Element definitions overview, Table 71 — Element type codes
ELEMENT_TYPES = {
0: (ElementType.UNSPECIFIED.value, lambda *args: None),
1: (ElementType.DEVICE_SLOT.value, lambda *args: None),
2: (ElementType.POWER_SUPPLY.value, psu),
3: (ElementType.COOLING.value, cooling),
4: (ElementType.TEMPERATURE_SENSORS.value, temp),
5: (ElementType.DOOR_LOCK.value, lambda *args: None),
6: (ElementType.AUDIBLE_ALARM.value, alarm),
7: (ElementType.ENCLOSURE_SERVICES_CONTROLLER_ELECTRONICS.value, lambda *args: None),
8: (ElementType.SCC_CONTROLLER_ELECTRONICS.value, lambda *args: None),
9: (ElementType.NONVOLATILE_CACHE.value, lambda *args: None),
10: (ElementType.INVALID_OPERATION_REASON.value, lambda *args: None),
11: (ElementType.UNINTERRUPTIBLE_POWER_SUPPLY.value, lambda *args: None),
12: (ElementType.DISPLAY.value, lambda *args: None),
13: (ElementType.KEY_PAD_ENTRY.value, lambda *args: None),
14: (ElementType.ENCLOSURE.value, enclosure),
15: (ElementType.SCSI_PORT_TRANSCEIVER.value, lambda *args: None),
16: (ElementType.LANGUAGE.value, lambda *args: None),
17: (ElementType.COMMUNICATION_PORT.value, comm),
18: (ElementType.VOLTAGE_SENSOR.value, volt),
19: (ElementType.CURRENT_SENSOR.value, current),
20: (ElementType.SCSI_TARGET_PORT.value, lambda *args: None),
21: (ElementType.SCSI_INITIATOR_PORT.value, lambda *args: None),
22: (ElementType.SIMPLE_SUBENCLOSURE.value, lambda *args: None),
23: (ElementType.ARRAY_DEVICE_SLOT.value, array_dev),
24: (ElementType.SAS_EXPANDER.value, sas_exp),
25: (ElementType.SAS_CONNECTOR.value, sas_conn),
}
| 11,744 | Python | .py | 241 | 42.136929 | 110 | 0.637866 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,685 | slot_mappings.py | truenas_middleware/src/middlewared/middlewared/plugins/enclosure_/slot_mappings.py | # Copyright (c) - iXsystems Inc.
#
# Licensed under the terms of the TrueNAS Enterprise License Agreement
# See the file LICENSE.IX for complete terms and conditions
from .constants import (
DISK_FRONT_KEY,
DISK_TOP_KEY,
DISK_REAR_KEY,
DISK_INTERNAL_KEY,
SYSFS_SLOT_KEY,
MAPPED_SLOT_KEY,
SUPPORTS_IDENTIFY_KEY,
SUPPORTS_IDENTIFY_STATUS_KEY
)
from .enums import ControllerModels, JbodModels, JbofModels
# If SUPPORTS_IDENTIFY_STATUS_KEY is absent from a slot mapping then
# its value will be the same as SUPPORTS_IDENTIFY_KEY for that slot.
# (Usually, if IDENT is enabled, then its status can also be read.)
def get_jbof_slot_info(model):
"""This function returns a dictionary that maps
nvme drives that are mounted via NVMe over Fabrics"""
if model == JbofModels.ES24N.name:
return {
'any_version': True,
'versions': {
'DEFAULT': {
'model': {
model: {
i: {
SYSFS_SLOT_KEY: i,
MAPPED_SLOT_KEY: i,
SUPPORTS_IDENTIFY_KEY: True,
DISK_FRONT_KEY: True,
DISK_TOP_KEY: False,
DISK_REAR_KEY: False,
DISK_INTERNAL_KEY: False
} for i in range(1, 25)
},
}
}
}
}
def get_nvme_slot_info(model):
"""This functions returns a dictionary that maps
nvme drives from their original slots to their
mapped slots. Since we sell all nvme flash systems
as well as systems with nvme drive bays, we need
to map them just like we do with traditional SES
enclosures. We handle these separately because,
well, it's NVMe
NOTE: SYSFS_SLOT_KEY is always == 1 for the slot
simply for readability. NVMe devices don't get
their slot information the same way that we do in
SES since it's nvme.
"""
if model in (
ControllerModels.F60.value,
ControllerModels.F100.value,
ControllerModels.F130.value,
ControllerModels.M30.value,
ControllerModels.M40.value,
ControllerModels.M50.value,
ControllerModels.M60.value,
ControllerModels.R30.value,
ControllerModels.R50.value,
ControllerModels.R50B.value,
ControllerModels.R50BM.value,
):
return {
'any_version': True,
'versions': {
'DEFAULT': {
'id': {
'f60_nvme_enclosure': {
i: {
SYSFS_SLOT_KEY: i,
MAPPED_SLOT_KEY: i,
SUPPORTS_IDENTIFY_KEY: True,
SUPPORTS_IDENTIFY_STATUS_KEY: False,
DISK_FRONT_KEY: True,
DISK_TOP_KEY: False,
DISK_REAR_KEY: False,
DISK_INTERNAL_KEY: False
} for i in range(1, 25)
},
'f100_nvme_enclosure': {
i: {
SYSFS_SLOT_KEY: i,
MAPPED_SLOT_KEY: i,
SUPPORTS_IDENTIFY_KEY: True,
SUPPORTS_IDENTIFY_STATUS_KEY: False,
DISK_FRONT_KEY: True,
DISK_TOP_KEY: False,
DISK_REAR_KEY: False,
DISK_INTERNAL_KEY: False
} for i in range(1, 25)
},
'f130_nvme_enclosure': {
i: {
SYSFS_SLOT_KEY: i,
MAPPED_SLOT_KEY: i,
SUPPORTS_IDENTIFY_KEY: True,
SUPPORTS_IDENTIFY_STATUS_KEY: False,
DISK_FRONT_KEY: True,
DISK_TOP_KEY: False,
DISK_REAR_KEY: False,
DISK_INTERNAL_KEY: False
} for i in range(1, 25)
},
# ALL m-series platforms have 4 rear nvme drive bays.
# The R50BM platform does as well and uses same plx
# nvme bridge. HOWEVER, the m30 and the gen1/2 m40's do
# NOT have the pcie switch that connects those 4 drive
# bays but the gen3 M40 with 192GB RAM does. Since we
# don't have (at time of writing) a proper way to track
# generational m40's, we will always return 4 nvme drive
# bays for all m-series platforms and they will be empty
# on the platforms that can't physically support these drives
'm30_nvme_enclosure': {
i: {
SYSFS_SLOT_KEY: i,
MAPPED_SLOT_KEY: j,
SUPPORTS_IDENTIFY_KEY: False,
DISK_FRONT_KEY: False,
DISK_TOP_KEY: False,
DISK_REAR_KEY: False,
DISK_INTERNAL_KEY: False
} for i, j in zip(range(1, 5), range(25, 29))
},
'm40_nvme_enclosure': {
i: {
SYSFS_SLOT_KEY: i,
MAPPED_SLOT_KEY: j,
SUPPORTS_IDENTIFY_KEY: False,
DISK_FRONT_KEY: False,
DISK_TOP_KEY: False,
DISK_REAR_KEY: True,
DISK_INTERNAL_KEY: False
} for i, j in zip(range(1, 5), range(25, 29))
},
'm50_nvme_enclosure': {
i: {
SYSFS_SLOT_KEY: i,
MAPPED_SLOT_KEY: j,
SUPPORTS_IDENTIFY_KEY: False,
DISK_FRONT_KEY: False,
DISK_TOP_KEY: False,
DISK_REAR_KEY: True,
DISK_INTERNAL_KEY: False
} for i, j in zip(range(1, 5), range(25, 29))
},
'm60_nvme_enclosure': {
i: {
SYSFS_SLOT_KEY: i,
MAPPED_SLOT_KEY: j,
SUPPORTS_IDENTIFY_KEY: False,
DISK_FRONT_KEY: False,
DISK_TOP_KEY: False,
DISK_REAR_KEY: True,
DISK_INTERNAL_KEY: False
} for i, j in zip(range(1, 5), range(25, 29))
},
'r30_nvme_enclosure': {
i: {
SYSFS_SLOT_KEY: i,
MAPPED_SLOT_KEY: i,
SUPPORTS_IDENTIFY_KEY: True,
SUPPORTS_IDENTIFY_STATUS_KEY: False,
DISK_FRONT_KEY: True if i <= 12 else False,
DISK_TOP_KEY: False,
DISK_REAR_KEY: False,
# at time of writing, r30 is only platform with "internal"
# drive slots
DISK_INTERNAL_KEY: True if i > 12 else False,
} for i in range(1, 17)
},
'r50_nvme_enclosure': {
i: {
SYSFS_SLOT_KEY: i,
MAPPED_SLOT_KEY: j,
SUPPORTS_IDENTIFY_KEY: False,
DISK_FRONT_KEY: False,
DISK_TOP_KEY: False,
DISK_REAR_KEY: True,
DISK_INTERNAL_KEY: False
} for i, j in zip(range(1, 4), range(49, 52))
},
'r50b_nvme_enclosure': {
i: {
SYSFS_SLOT_KEY: i,
MAPPED_SLOT_KEY: j,
SUPPORTS_IDENTIFY_KEY: False,
DISK_FRONT_KEY: False,
DISK_TOP_KEY: False,
DISK_REAR_KEY: True,
DISK_INTERNAL_KEY: False
} for i, j in zip(range(1, 3), range(49, 51))
},
'r50bm_nvme_enclosure': {
i: {
SYSFS_SLOT_KEY: i,
MAPPED_SLOT_KEY: j,
SUPPORTS_IDENTIFY_KEY: False,
DISK_FRONT_KEY: False,
DISK_TOP_KEY: False,
DISK_REAR_KEY: True,
DISK_INTERNAL_KEY: False
} for i, j in zip(range(1, 5), range(49, 53))
},
}
}
}
}
def get_slot_info(enc):
"""This function returns a dictionary that maps
drives from their original slots to their mapped slots. This
is done solely for the purpose of displaying the enclosure
information to the end-user in a logical way.
(i.e. /dev/sda is cabled to slot 5 at OS level, so we need
to map it back to slot 1, etc).
The keys of the dictionary serve a very particular purpose
and will be described as follows:
`any_versions When set to `True`, it means that versions of the
platform DO NOT MATTER and all versions (there may only be 1)
ship with the same drive mapping.
`versions` is a dictionary with many nested keys that
represent different versions of the same platform.
Sometimes (not often) we have to make a change to a
platform because, for example, a particlar part is
no longer available. We keep the same platform, but
instead ship with a different piece of hardware.
Completely transparent to the end-user but, obviously,
needs to be tracked on our side.
`versions->[vers_key]` is a dictionary that represents the
version. So, for example, if we ship an R20 and the
`any_versions` key is True, then we will access the
`versions->DEFAULT` key by "default". However, if
`any_versions` is False, then there should be another
top-level key that represents the identifier for that
version on the platform.
(i.e. {'versions':
'DEFAULT': ...
'1.0': ...
'2.0': ...
etc ...
}
)
NOTE: the version key has to be obtained via SMBIOS
since we need a value that isn't dynamic and gurantees
uniqueness. There are exceptions, of course, but this
is the preferred way of determining the version.
`versions->[vers_key]->[unique_identifier]` is a top-level
key that represents a non-changing, guaranteed unique
identifier for the enclosure that needs to be mapped.
For example:
{'versions': {
'DEFAULT': {
'product': {}
}
}}
The `product` key up above represents the top-level key
that we can use to access the dictionary that is returned
from `map_enclosures` function. In this example, the
`product` key represents the "product" string that is returned
from a standard INQUIRY command sent to the enclosure device.
It is VERY important that the key placed here is using some
identifier that is _GUARANTEED_ to be unique for the enclosure
that you're trying to map. If this is not unique, then the
entire mapping process will NOT work. It's almost a necessity
to use a key that is from the hardware (INQUIRY or SMBIOS).
There is 1 exception to this and that's when we're mapping
the systems that we sell that utilize the virtual AHCI enclosure
driver. This enumerates the disks using an `id` that is
hard-coded in the kernel module which guarantees its uniqueness.
`versions->[vers_key]->[unique_identifier]->[unique_id_value]` is a
top-level key that represents the value that is returned by accessing
the object from the `map_enclosures` function via the unique id
key that was discussed up above. For example:
{'versions': {
'DEFAULT': {
'product': {'eDrawer4048S1' : {}}
}
}}
In this example the `eDrawer4048S1` is the value expected to be returned
from the `product` key from the dictionary returned in the `map_enclosures`
function. Again, the `product` key is found via an INQUIRY response
and the `eDrawer4048S1` is the value that is returned from said INQUIRY.
`versions->[vers_key]->[unique_identifier]->[unique_id_value]->[slot_mapping] is
a dictionary that is used to map the original drive slots to their mapped
slots. For example:
{'versions': {
'DEFAULT': {
'product': {'eDrawer4048S1' : {
1: {'sysfs_slot': 1, 'mapped_slot': 1, 'supports_identify_light': True},
5: {'sysfs_slot': 5, 'mapped_slot': 2, 'supports_identify_light': True},
}}
}
}}
The `1` key represents what we get from libsg3.ses.EnclosureDevice().status().
The values returned from that function are _INDEX_ values from sg3_utils api.
These are _NOT_ the device slot numbers that the HBA reports. Most of the time,
the index values map 1 to 1 with the `/sys/class/enclosure/*/slot00/slot` value
from sysfs. NOTE: sysfs reports the drive slot numbers BY DEFAULT which means
they have the possibility of NOT mapping to the response we get from the sg3_utils
api function that we wrote. Of course, if sysfs can't determine the drive slot
number from the HBA (this happens with Virtual AHCI device) then sysfs will just
enumerate the `slot` files starting at 0 (mimicking what sg3_utils does).
For example, a returned response from `EnclosureDevice().status()` looks like this
>>> pprint(EnclosureDevice('/dev/bsg/0:0:0:0').status())
{'elements': {
0: {'descriptor': '<empty>', 'status': [0, 0, 0, 0], 'type': 23},
1: {'descriptor': '<empty>', 'status': [5, 0, 0, 0], 'type': 23},
2: {'descriptor': '<empty>', 'status': [5, 0, 0, 0], 'type': 23},
3: {'descriptor': '<empty>', 'status': [1, 0, 0, 0], 'type': 23},
4: {'descriptor': '<empty>', 'status': [5, 0, 0, 0], 'type': 23},
}}
If we take look at the elements[1] key we might think that the device slot is 1 but
it's not guaranteed. If we compare what sysfs gives us for slot 1 we see something
like this:
root@truenas[~]# cat /sys/class/enclosure/0:0:0:0/9/slot
root@truenas[~]# 1
If we compare the sysfs output with the dictionary response, we can see that the directory
name actually is `9` which represents the drive slot reported by the HBA. This means
what we get from sg3_utils does not match 1 to 1 with sysfs. So how we determine how to
"map" the drives to their "original slots" is doing 2 things:
1. platform team needs to give us a sysfs `slot` mapping (i.e. what `slot` maps to
what physical slot in the enclosure) (i.e. `slot` 0 is physical slot 8, etc)
2. take the `EnclosureDevice().status()` output and map the index values to their
respective sysfs `slot` files
The `supports_identify_light` key represents whether or not the enclosure slot can be lit
up to "identify" the drive slot. Most often the user doesn't really care about the enclosure
slot being lit up, they want to "light up slot with disk sda".
NOTE: Knowing whether or not a slot in a particular system can
be lit up MUST BE provided by the platform team. It is not something
the developer can assume to know.
We use a complex nested dictionary for a couple reasons.
1. performance is good when accessing the top-level keys
2. flexibility is also good since we're able to essentially
add any type of "key" at any point in the nested object
to represent a particular change in any of our platforms
that need it.
3. necessity because the logic that is required to map all of
our enclosures is quite complex and this was the best mix
of performance/maintability.
NOTE: 99% of all HBAs for the platforms we sell report their drive slot numbers
starting at 0 which is what sysfs uses for the `slot` file in sysfs.
"""
if enc.model == ControllerModels.R40.value:
# The astute reader might notice that the R40 has 48 drives while
# this is only mapping 1-24. Please see `combine_enclosures()`
# for why this is.
return {
'any_version': True,
'versions': {
'DEFAULT': {
'model': {
enc.model: {
i: {SYSFS_SLOT_KEY: i - 1, MAPPED_SLOT_KEY: i, SUPPORTS_IDENTIFY_KEY: False}
for i in range(1, 25)
},
}
}
}
}
elif enc.is_r50_series:
# these platforms share same enclosure and mapping
# but it's important to always map the eDrawer4048S1
# enclosure device to drives 1 - 24
return {
'any_version': True,
'versions': {
'DEFAULT': {
'product': {
'eDrawer4048S1': {
# 1 - 24
i: {SYSFS_SLOT_KEY: i - 1, MAPPED_SLOT_KEY: i, SUPPORTS_IDENTIFY_KEY: False}
for i in range(1, 25)
},
'eDrawer4048S2': {
# 25 - 48
i: {SYSFS_SLOT_KEY: i - 1, MAPPED_SLOT_KEY: j, SUPPORTS_IDENTIFY_KEY: False}
for i, j in zip(range(1, 25), range(25, 49))
}
},
}
}
}
elif enc.model == ControllerModels.R10.value:
return {
'any_version': True,
'versions': {
'DEFAULT': {
'model': {
enc.model: {
1: {SYSFS_SLOT_KEY: 0, MAPPED_SLOT_KEY: 1, SUPPORTS_IDENTIFY_KEY: False},
5: {SYSFS_SLOT_KEY: 4, MAPPED_SLOT_KEY: 2, SUPPORTS_IDENTIFY_KEY: False},
9: {SYSFS_SLOT_KEY: 8, MAPPED_SLOT_KEY: 3, SUPPORTS_IDENTIFY_KEY: False},
13: {SYSFS_SLOT_KEY: 12, MAPPED_SLOT_KEY: 4, SUPPORTS_IDENTIFY_KEY: False},
2: {SYSFS_SLOT_KEY: 1, MAPPED_SLOT_KEY: 5, SUPPORTS_IDENTIFY_KEY: False},
6: {SYSFS_SLOT_KEY: 5, MAPPED_SLOT_KEY: 6, SUPPORTS_IDENTIFY_KEY: False},
10: {SYSFS_SLOT_KEY: 9, MAPPED_SLOT_KEY: 7, SUPPORTS_IDENTIFY_KEY: False},
14: {SYSFS_SLOT_KEY: 13, MAPPED_SLOT_KEY: 8, SUPPORTS_IDENTIFY_KEY: False},
3: {SYSFS_SLOT_KEY: 2, MAPPED_SLOT_KEY: 9, SUPPORTS_IDENTIFY_KEY: False},
7: {SYSFS_SLOT_KEY: 6, MAPPED_SLOT_KEY: 10, SUPPORTS_IDENTIFY_KEY: False},
11: {SYSFS_SLOT_KEY: 10, MAPPED_SLOT_KEY: 11, SUPPORTS_IDENTIFY_KEY: False},
15: {SYSFS_SLOT_KEY: 14, MAPPED_SLOT_KEY: 12, SUPPORTS_IDENTIFY_KEY: False},
4: {SYSFS_SLOT_KEY: 3, MAPPED_SLOT_KEY: 13, SUPPORTS_IDENTIFY_KEY: False},
8: {SYSFS_SLOT_KEY: 7, MAPPED_SLOT_KEY: 14, SUPPORTS_IDENTIFY_KEY: False},
12: {SYSFS_SLOT_KEY: 11, MAPPED_SLOT_KEY: 15, SUPPORTS_IDENTIFY_KEY: False},
16: {SYSFS_SLOT_KEY: 15, MAPPED_SLOT_KEY: 16, SUPPORTS_IDENTIFY_KEY: False}
}
}
}
}
}
elif enc.model in (ControllerModels.R20.value, ControllerModels.R20B.value):
return {
'any_version': True,
'versions': {
'DEFAULT': {
'model': {
enc.model: {
1: {SYSFS_SLOT_KEY: 0, MAPPED_SLOT_KEY: 1, SUPPORTS_IDENTIFY_KEY: True},
2: {SYSFS_SLOT_KEY: 1, MAPPED_SLOT_KEY: 2, SUPPORTS_IDENTIFY_KEY: True},
3: {SYSFS_SLOT_KEY: 2, MAPPED_SLOT_KEY: 3, SUPPORTS_IDENTIFY_KEY: True},
4: {SYSFS_SLOT_KEY: 3, MAPPED_SLOT_KEY: 4, SUPPORTS_IDENTIFY_KEY: True},
5: {SYSFS_SLOT_KEY: 4, MAPPED_SLOT_KEY: 5, SUPPORTS_IDENTIFY_KEY: True},
6: {SYSFS_SLOT_KEY: 5, MAPPED_SLOT_KEY: 6, SUPPORTS_IDENTIFY_KEY: True},
7: {SYSFS_SLOT_KEY: 6, MAPPED_SLOT_KEY: 7, SUPPORTS_IDENTIFY_KEY: True},
8: {SYSFS_SLOT_KEY: 7, MAPPED_SLOT_KEY: 8, SUPPORTS_IDENTIFY_KEY: True},
9: {SYSFS_SLOT_KEY: 8, MAPPED_SLOT_KEY: 9, SUPPORTS_IDENTIFY_KEY: True},
10: {SYSFS_SLOT_KEY: 9, MAPPED_SLOT_KEY: 10, SUPPORTS_IDENTIFY_KEY: True},
11: {SYSFS_SLOT_KEY: 10, MAPPED_SLOT_KEY: 11, SUPPORTS_IDENTIFY_KEY: True},
12: {SYSFS_SLOT_KEY: 11, MAPPED_SLOT_KEY: 12, SUPPORTS_IDENTIFY_KEY: True}
}
},
'id': {
# on the rear of the system (do not support identify lights)
'3000000000000001': {
1: {SYSFS_SLOT_KEY: 0, MAPPED_SLOT_KEY: 13, SUPPORTS_IDENTIFY_KEY: False},
2: {SYSFS_SLOT_KEY: 1, MAPPED_SLOT_KEY: 14, SUPPORTS_IDENTIFY_KEY: False}
}
}
}
}
}
elif enc.model == ControllerModels.R20A.value:
return {
'any_version': True,
'versions': {
'DEFAULT': {
'model': {
enc.model: {
3: {SYSFS_SLOT_KEY: 2, MAPPED_SLOT_KEY: 1, SUPPORTS_IDENTIFY_KEY: False},
6: {SYSFS_SLOT_KEY: 5, MAPPED_SLOT_KEY: 2, SUPPORTS_IDENTIFY_KEY: False},
9: {SYSFS_SLOT_KEY: 8, MAPPED_SLOT_KEY: 3, SUPPORTS_IDENTIFY_KEY: False},
12: {SYSFS_SLOT_KEY: 11, MAPPED_SLOT_KEY: 4, SUPPORTS_IDENTIFY_KEY: False},
2: {SYSFS_SLOT_KEY: 1, MAPPED_SLOT_KEY: 5, SUPPORTS_IDENTIFY_KEY: False},
5: {SYSFS_SLOT_KEY: 4, MAPPED_SLOT_KEY: 6, SUPPORTS_IDENTIFY_KEY: False},
8: {SYSFS_SLOT_KEY: 7, MAPPED_SLOT_KEY: 7, SUPPORTS_IDENTIFY_KEY: False},
11: {SYSFS_SLOT_KEY: 10, MAPPED_SLOT_KEY: 8, SUPPORTS_IDENTIFY_KEY: False},
1: {SYSFS_SLOT_KEY: 0, MAPPED_SLOT_KEY: 9, SUPPORTS_IDENTIFY_KEY: False},
4: {SYSFS_SLOT_KEY: 3, MAPPED_SLOT_KEY: 10, SUPPORTS_IDENTIFY_KEY: False},
7: {SYSFS_SLOT_KEY: 6, MAPPED_SLOT_KEY: 11, SUPPORTS_IDENTIFY_KEY: False},
10: {SYSFS_SLOT_KEY: 9, MAPPED_SLOT_KEY: 12, SUPPORTS_IDENTIFY_KEY: False}
}
},
'id': {
'3000000000000001': {
1: {SYSFS_SLOT_KEY: 0, MAPPED_SLOT_KEY: 13, SUPPORTS_IDENTIFY_KEY: False},
2: {SYSFS_SLOT_KEY: 1, MAPPED_SLOT_KEY: 14, SUPPORTS_IDENTIFY_KEY: False}
}
}
}
}
}
elif enc.model == ControllerModels.MINI3E.value:
return {
'any_version': True,
'versions': {
'DEFAULT': {
'id': {
'3000000000000001': {
i: {SYSFS_SLOT_KEY: i - 1, MAPPED_SLOT_KEY: i, SUPPORTS_IDENTIFY_KEY: False}
for i in range(1, 7)
}
}
}
}
}
elif enc.model == ControllerModels.MINI3EP.value:
return {
'any_version': True,
'versions': {
'DEFAULT': {
'id': {
'3000000000000001': {
i: {SYSFS_SLOT_KEY: i - 1, MAPPED_SLOT_KEY: i, SUPPORTS_IDENTIFY_KEY: False}
for i in range(1, 5)
},
'3000000000000002': {
1: {SYSFS_SLOT_KEY: 0, MAPPED_SLOT_KEY: 5, SUPPORTS_IDENTIFY_KEY: False},
2: {SYSFS_SLOT_KEY: 1, MAPPED_SLOT_KEY: 6, SUPPORTS_IDENTIFY_KEY: False}
}
}
}
}
}
elif enc.model == ControllerModels.MINI3X.value:
return {
'any_version': False,
'versions': {
'DEFAULT': {
'id': {
'3000000000000001': {
i: {SYSFS_SLOT_KEY: i - 1, MAPPED_SLOT_KEY: i, SUPPORTS_IDENTIFY_KEY: False}
for i in range(1, 5)
},
'3000000000000002': {
1: {SYSFS_SLOT_KEY: 0, MAPPED_SLOT_KEY: 5, SUPPORTS_IDENTIFY_KEY: False},
2: {SYSFS_SLOT_KEY: 1, MAPPED_SLOT_KEY: 6, SUPPORTS_IDENTIFY_KEY: False},
4: {SYSFS_SLOT_KEY: 5, MAPPED_SLOT_KEY: 7, SUPPORTS_IDENTIFY_KEY: False}
}
}
},
'1.0': {
'id': {
'3000000000000002': {
1: {SYSFS_SLOT_KEY: 0, MAPPED_SLOT_KEY: 1, SUPPORTS_IDENTIFY_KEY: False},
2: {SYSFS_SLOT_KEY: 1, MAPPED_SLOT_KEY: 2, SUPPORTS_IDENTIFY_KEY: False},
4: {SYSFS_SLOT_KEY: 3, MAPPED_SLOT_KEY: 3, SUPPORTS_IDENTIFY_KEY: False},
5: {SYSFS_SLOT_KEY: 4, MAPPED_SLOT_KEY: 4, SUPPORTS_IDENTIFY_KEY: False}
},
'3000000000000001': {
1: {SYSFS_SLOT_KEY: 0, MAPPED_SLOT_KEY: 5, SUPPORTS_IDENTIFY_KEY: False},
2: {SYSFS_SLOT_KEY: 1, MAPPED_SLOT_KEY: 6, SUPPORTS_IDENTIFY_KEY: False},
3: {SYSFS_SLOT_KEY: 2, MAPPED_SLOT_KEY: 7, SUPPORTS_IDENTIFY_KEY: False}
}
}
}
}
}
elif enc.model == ControllerModels.MINI3XP.value:
return {
'any_version': True,
'versions': {
'DEFAULT': {
'id': {
'3000000000000001': {
i: {SYSFS_SLOT_KEY: i - 1, MAPPED_SLOT_KEY: i, SUPPORTS_IDENTIFY_KEY: False}
for i in range(1, 8)
}
}
}
}
}
elif enc.model == ControllerModels.MINI3XLP.value:
return {
'any_version': True,
'versions': {
'DEFAULT': {
'id': {
'3000000000000002': {
6: {SYSFS_SLOT_KEY: 5, MAPPED_SLOT_KEY: 1, SUPPORTS_IDENTIFY_KEY: False},
5: {SYSFS_SLOT_KEY: 4, MAPPED_SLOT_KEY: 10, SUPPORTS_IDENTIFY_KEY: False},
},
'3000000000000001': {
i: {SYSFS_SLOT_KEY: i - 1, MAPPED_SLOT_KEY: i + 1, SUPPORTS_IDENTIFY_KEY: False}
for i in range(1, 9)
}
}
}
}
}
elif enc.model == ControllerModels.MINIR.value:
return {
'any_version': True,
'versions': {
'DEFAULT': {
'id': {
'3000000000000001': {
i: {SYSFS_SLOT_KEY: i - 1, MAPPED_SLOT_KEY: i, SUPPORTS_IDENTIFY_KEY: False}
for i in range(1, 9)
},
'3000000000000002': {
4: {SYSFS_SLOT_KEY: 3, MAPPED_SLOT_KEY: 9, SUPPORTS_IDENTIFY_KEY: False},
5: {SYSFS_SLOT_KEY: 4, MAPPED_SLOT_KEY: 10, SUPPORTS_IDENTIFY_KEY: False},
6: {SYSFS_SLOT_KEY: 5, MAPPED_SLOT_KEY: 11, SUPPORTS_IDENTIFY_KEY: False},
7: {SYSFS_SLOT_KEY: 6, MAPPED_SLOT_KEY: 12, SUPPORTS_IDENTIFY_KEY: False}
}
}
}
}
}
elif enc.is_hseries:
return {
'any_version': True,
'versions': {
'DEFAULT': {
'model': {
enc.model: {
1: {SYSFS_SLOT_KEY: 8, MAPPED_SLOT_KEY: 9, SUPPORTS_IDENTIFY_KEY: True},
2: {SYSFS_SLOT_KEY: 9, MAPPED_SLOT_KEY: 10, SUPPORTS_IDENTIFY_KEY: True},
3: {SYSFS_SLOT_KEY: 10, MAPPED_SLOT_KEY: 11, SUPPORTS_IDENTIFY_KEY: True},
4: {SYSFS_SLOT_KEY: 11, MAPPED_SLOT_KEY: 12, SUPPORTS_IDENTIFY_KEY: True},
# 5, 6, 7, 8 unused/unsupported
9: {SYSFS_SLOT_KEY: 0, MAPPED_SLOT_KEY: 1, SUPPORTS_IDENTIFY_KEY: True},
10: {SYSFS_SLOT_KEY: 1, MAPPED_SLOT_KEY: 2, SUPPORTS_IDENTIFY_KEY: True},
11: {SYSFS_SLOT_KEY: 2, MAPPED_SLOT_KEY: 3, SUPPORTS_IDENTIFY_KEY: True},
12: {SYSFS_SLOT_KEY: 3, MAPPED_SLOT_KEY: 4, SUPPORTS_IDENTIFY_KEY: True},
13: {SYSFS_SLOT_KEY: 4, MAPPED_SLOT_KEY: 5, SUPPORTS_IDENTIFY_KEY: True},
14: {SYSFS_SLOT_KEY: 5, MAPPED_SLOT_KEY: 6, SUPPORTS_IDENTIFY_KEY: True},
15: {SYSFS_SLOT_KEY: 6, MAPPED_SLOT_KEY: 7, SUPPORTS_IDENTIFY_KEY: True},
16: {SYSFS_SLOT_KEY: 7, MAPPED_SLOT_KEY: 8, SUPPORTS_IDENTIFY_KEY: True},
},
}
}
}
}
elif enc.is_mseries:
return {
'any_version': True,
'versions': {
'DEFAULT': {
'model': {
enc.model: {
# 1 - 24
i: {SYSFS_SLOT_KEY: i - 1, MAPPED_SLOT_KEY: i, SUPPORTS_IDENTIFY_KEY: True}
for i in range(1, 25)
},
},
}
}
}
elif enc.model == JbodModels.ES12.value or enc.is_xseries:
return {
'any_version': True,
'versions': {
'DEFAULT': {
'model': {
enc.model: {
i: {SYSFS_SLOT_KEY: i - 1, MAPPED_SLOT_KEY: i, SUPPORTS_IDENTIFY_KEY: True}
for i in range(1, 13)
},
}
}
}
}
elif enc.is_24_bay_jbod:
return {
'any_version': True,
'versions': {
'DEFAULT': {
'model': {
enc.model: {
i: {SYSFS_SLOT_KEY: i - 1, MAPPED_SLOT_KEY: i, SUPPORTS_IDENTIFY_KEY: True}
for i in range(1, 25)
},
}
}
}
}
elif enc.is_60_bay_jbod:
return {
'any_version': True,
'versions': {
'DEFAULT': {
'model': {
enc.model: {
i: {SYSFS_SLOT_KEY: i - 1, MAPPED_SLOT_KEY: i, SUPPORTS_IDENTIFY_KEY: True}
for i in range(1, 61)
},
}
}
}
}
elif enc.model == JbodModels.ES102.value:
return {
'any_version': True,
'versions': {
'DEFAULT': {
'model': {
enc.model: {
i: {SYSFS_SLOT_KEY: i - 1, MAPPED_SLOT_KEY: i, SUPPORTS_IDENTIFY_KEY: True}
for i in range(1, 103)
},
}
}
}
}
elif enc.model == JbodModels.ES102G2.value:
return {
'any_version': True,
'versions': {
'DEFAULT': {
'model': {
enc.model: {
# drives actually start at index 1 (not 0)
i: {SYSFS_SLOT_KEY: i, MAPPED_SLOT_KEY: i, SUPPORTS_IDENTIFY_KEY: True}
for i in range(1, 103)
},
}
}
}
}
else:
return
| 35,514 | Python | .py | 707 | 30.363508 | 108 | 0.452028 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,686 | nvme.py | truenas_middleware/src/middlewared/middlewared/plugins/enclosure_/nvme.py | # Copyright (c) - iXsystems Inc.
#
# Licensed under the terms of the TrueNAS Enterprise License Agreement
# See the file LICENSE.IX for complete terms and conditions
import re
from pathlib import Path
from pyudev import Context, Devices, DeviceNotFoundAtPathError
from middlewared.service import Service, private
class EnclosureService(Service):
RE_SLOT = re.compile(r"^0-([0-9]+)$")
@private
def fake_nvme_enclosure(self, id_, name, model, count, slot_to_nvme):
elements = []
for slot in range(1, 1 + count):
device = slot_to_nvme.get(slot, None)
if device is not None:
status = "OK"
value_raw = "0x1000000"
else:
status = "Not Installed"
value_raw = "0x05000000"
elements.append({
"slot": slot,
"data": {
"Descriptor": f"Disk #{slot}",
"Status": status,
"Value": "None",
"Device": device,
},
"name": "Array Device Slot",
"descriptor": f"Disk #{slot}",
"status": status,
"value": "None",
"value_raw": value_raw,
})
return [
{
"id": id_,
"bsg": None,
"name": name,
"model": model,
"controller": True,
"elements": [
{
"name": "Array Device Slot",
"descriptor": "Drive Slots",
"header": ["Descriptor", "Status", "Value", "Device"],
"elements": elements,
"has_slot_status": False,
},
],
}
]
@private
def map_plx_nvme_impl(self, prod):
enc_name = prod
enclosure_id = f"{enc_name.lower()}_plx_enclosure"
enclosure_model = f"{enc_name} Series"
addresses_to_slots = {
(slot / "address").read_text().strip(): slot.name
for slot in Path("/sys/bus/pci/slots").iterdir()
}
slot_to_nvme = {}
ctx = Context()
for i in filter(lambda x: x.attributes.get("path") == b"\\_SB_.PC03.BR3A", ctx.list_devices(subsystem="acpi")):
try:
physical_node = Devices.from_path(ctx, f"{i.sys_path}/physical_node")
except DeviceNotFoundAtPathError:
# happens when there are no rear-nvme drives plugged in
pass
else:
for child in physical_node.children:
if child.properties.get("SUBSYSTEM") != "block":
continue
try:
controller_sys_name = child.parent.parent.sys_name
except AttributeError:
continue
if (slot := addresses_to_slots.get(controller_sys_name.split(".")[0])) is None:
continue
if not (m := re.match(self.RE_SLOT, slot)):
continue
slot = int(m.group(1))
if enc_name == 'R50BM':
# When adding this code and testing on internal R50BM, the starting slot
# number for the rear nvme drive bays starts at 2 and goes to 5. This means
# we're always off by 1. The easiest solution is to just check for this
# specific platform and subtract 1 from the slot number to keep everything
# in check.
# To make things event more complicated, we found (by testing on internal hardware)
# that slot 2 on OS is actually slot 3 and vice versa. This means we need to swap
# those 2 numbers with each other to keep the webUI lined up with reality.
slot -= 1
if slot == 2:
slot = 3
elif slot == 3:
slot = 2
slot_to_nvme[slot] = child.sys_name
return [
enclosure_id,
'Rear NVME U.2 Hotswap Bays',
enclosure_model,
4, # nvme plx bridge used on m50/60 and r50bm have 4 nvme drive bays
slot_to_nvme
]
@private
def map_plx_nvme(self, prod):
return self.fake_nvme_enclosure(*self.map_plx_nvme_impl(prod))
@private
def map_r50_or_r50b_impl(self, info, acpihandles):
mapped = info[-1]
num_of_nvme_slots = info[-2]
ctx = Context()
for i in filter(lambda x: x.attributes.get('path') in acpihandles, ctx.list_devices(subsystem='acpi')):
acpi_handle = i.attributes.get('path')
try:
phys_node = Devices.from_path(ctx, f'{i.sys_path}/physical_node')
except DeviceNotFoundAtPathError:
return info
slot = acpihandles[acpi_handle]
for nvme in filter(lambda x: x.sys_name.startswith('nvme') and x.subsystem == 'block', phys_node.children):
mapped[slot] = nvme.sys_name
break
else:
mapped[slot] = None
if len(mapped) == num_of_nvme_slots:
return info
return info
@private
def map_r50_or_r50b(self, prod):
if prod == 'R50':
info = [
'r50_nvme_enclosure',
'R50 NVMe Enclosure',
'R50, Drawer #3',
3, # r50 has 3 rear nvme
{},
]
acpihandles = {b'\\_SB_.PC00.RP01.PXSX': 3, b'\\_SB_.PC01.BR1A.OCL0': 1, b'\\_SB_.PC01.BR1B.OCL1': 2}
else:
info = [
'r50b_nvme_enclosure',
'R50B NVMe Enclosure',
'R50B, Drawer #3',
2, # r50b has 2 rear nvme
{},
]
acpihandles = {b'\\_SB_.PC03.BR3A': 2, b'\\_SB_.PC00.RP01.PXSX': 1}
return self.fake_nvme_enclosure(*self.map_r50_or_r50b_impl(info, acpihandles))
@private
def map_r30_or_fseries(self, prod):
if prod == 'R30':
_id = 'r30_nvme_enclosure'
name = 'R30 NVMe Enclosure'
model = 'R30'
count = 16 # r30 has 16 nvme drive bays in head-unit (all nvme flash system)
else:
_id = f'{prod.lower()}_nvme_enclosure'
name = f'{prod} NVMe Enclosure'
model = prod
count = 24 # f-series has 24 nvme drive bays in head-unit (all nvme flash system)
ctx = Context()
nvmes = {}
for i in ctx.list_devices(subsystem='nvme'):
for namespace_dev in i.children:
if namespace_dev.device_type != 'disk':
continue
try:
# i.parent.sys_name looks like 0000:80:40.0
# namespace_dev.sys_name looks like nvme1n1
nvmes[i.parent.sys_name[:-2]] = namespace_dev.sys_name
except (IndexError, AttributeError):
continue
# the keys in this dictionary are the physical pcie slot ids
# and the values are the slots that the webUI uses to map them
# to their physical locations in a human manageable way
if prod == 'R30':
webui_map = {
'27': 1, '26': 7, '25': 2, '24': 8,
'37': 3, '36': 9, '35': 4, '34': 10,
'45': 5, '47': 11, '40': 6, '41': 12,
'38': 14, '39': 16, '43': 13, '44': 15,
}
else:
# f-series vendor is nice to us and nvme phys slots start at 1
# and increment in a human readable way already
webui_map = {
'1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6,
'7': 7, '8': 8, '9': 9, '10': 10, '11': 11, '12': 12,
'13': 13, '14': 14, '15': 15, '16': 16, '17': 17, '18': 18,
'19': 19, '20': 20, '21': 21, '22': 22, '23': 23, '24': 24,
}
mapped = {}
for i in Path('/sys/bus/pci/slots').iterdir():
addr = (i / 'address').read_text().strip()
if (nvme := nvmes.get(addr, None)) and (mapped_slot := webui_map.get(i.name, None)):
mapped[mapped_slot] = nvme
return self.fake_nvme_enclosure(_id, name, model, count, mapped)
@private
def valid_hardware(self, prod):
prefix = 'TRUENAS-'
models = ['R30', 'R50', 'R50B', 'R50BM', 'M50', 'M60', 'F60', 'F100', 'F130']
if prod != 'TRUENAS-' and any((j in prod for j in [f'{prefix}{i}' for i in models])):
return prod.split('-')[1]
@private
def map_nvme(self):
prod = self.valid_hardware(self.middleware.call_sync('system.dmidecode_info')['system-product-name'])
if not prod:
return []
if prod in ('R50', 'R50B'):
return self.map_r50_or_r50b(prod)
elif prod in ('R30', 'F60', 'F100', 'F130'):
# all nvme systems which we need to handle separately
return self.map_r30_or_fseries(prod)
else:
# M50/60 and R50BM use same plx nvme bridge
return self.map_plx_nvme(prod)
| 9,496 | Python | .py | 217 | 29.599078 | 119 | 0.490321 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,687 | ses_enclosure.py | truenas_middleware/src/middlewared/middlewared/plugins/enclosure_/ses_enclosure.py | # Copyright (c) - iXsystems Inc.
#
# Licensed under the terms of the TrueNAS Enterprise License Agreement
# See the file LICENSE.IX for complete terms and conditions
from pyudev import Context
from libsg3.ses import EnclosureDevice
from middlewared.service import private, Service
class EnclosureService(Service):
@private
def list_ses_enclosures(self):
ctx = Context()
return [f'/dev/bsg/{i.sys_name}' for i in ctx.list_devices(subsystem='enclosure')]
@private
def get_ses_enclosures(self):
output = {}
for i, name in enumerate(self.list_ses_enclosures()):
dev = EnclosureDevice(name)
try:
cf = dev.get_configuration()
except OSError:
self.logger.warning('Error querying configuration page for %r', name, exc_info=True)
continue
try:
es = dev.get_enclosure_status()
except OSError:
self.logger.warning('Error querying enclosure status page for %r', name, exc_info=True)
continue
output[i] = (name.removeprefix('/dev/'), (cf, es))
return output
| 1,178 | Python | .py | 29 | 31.586207 | 103 | 0.633333 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,688 | ses_enclosures2.py | truenas_middleware/src/middlewared/middlewared/plugins/enclosure_/ses_enclosures2.py | from contextlib import suppress
from logging import getLogger
from pathlib import Path
from libsg3.ses import EnclosureDevice
from .enclosure_class import Enclosure
logger = getLogger(__name__)
def get_ses_enclosure_status(bsg_path):
try:
return EnclosureDevice(bsg_path).status()
except OSError:
logger.error('Error querying enclosure status for %r', bsg_path, exc_info=True)
def get_ses_enclosures(asdict=True):
rv = list()
with suppress(FileNotFoundError):
for i in Path('/sys/class/enclosure').iterdir():
bsg = f'/dev/bsg/{i.name}'
if (status := get_ses_enclosure_status(bsg)):
sg = next((i / 'device/scsi_generic').iterdir())
enc = Enclosure(bsg, f'/dev/{sg.name}', status)
if asdict:
rv.append(enc.asdict())
else:
rv.append(enc)
return rv
| 924 | Python | .py | 24 | 30.041667 | 87 | 0.62486 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,689 | map.py | truenas_middleware/src/middlewared/middlewared/plugins/enclosure_/map.py | # Copyright (c) - iXsystems Inc.
#
# Licensed under the terms of the TrueNAS Enterprise License Agreement
# See the file LICENSE.IX for complete terms and conditions
from collections import namedtuple
import re
from middlewared.service import Service, private
ProductMapping = namedtuple("ProductMapping", ["product_re", "mappings"])
VersionMapping = namedtuple("VersionMapping", ["version_re", "slots"])
MappingSlot = namedtuple("MappingSlot", ["num", "slot", "identify"])
MAPPINGS = [
ProductMapping(re.compile(r"(TRUE|FREE)NAS-MINI-3.0-E$"), [
VersionMapping(re.compile(".*"), [
MappingSlot(0, 0, False),
MappingSlot(0, 1, False),
MappingSlot(0, 2, False),
MappingSlot(0, 3, False),
MappingSlot(0, 5, False),
MappingSlot(0, 4, False),
]),
]),
ProductMapping(re.compile(r"(TRUE|FREE)NAS-MINI-3.0-E\+$"), [
VersionMapping(re.compile(".*"), [
MappingSlot(0, 0, False),
MappingSlot(0, 1, False),
MappingSlot(0, 2, False),
MappingSlot(0, 3, False),
MappingSlot(0, 0, False),
MappingSlot(0, 1, False),
]),
]),
ProductMapping(re.compile(r"(TRUE|FREE)NAS-MINI-3.0-X$"), [
VersionMapping(re.compile(r"1\.0"), [
MappingSlot(1, 0, False),
MappingSlot(1, 1, False),
MappingSlot(1, 3, False),
MappingSlot(1, 4, False),
MappingSlot(0, 0, False),
MappingSlot(0, 1, False),
MappingSlot(0, 2, False),
]),
]),
ProductMapping(re.compile(r"(TRUE|FREE)NAS-MINI-3.0-X$"), [
VersionMapping(re.compile(".*"), [
MappingSlot(0, 0, False),
MappingSlot(0, 1, False),
MappingSlot(0, 2, False),
MappingSlot(0, 3, False),
MappingSlot(1, 0, False),
MappingSlot(1, 1, False),
MappingSlot(1, 3, False),
]),
]),
ProductMapping(re.compile(r"(TRUE|FREE)NAS-MINI-3.0-X\+$"), [
VersionMapping(re.compile(".*"), [
MappingSlot(0, 0, False),
MappingSlot(0, 1, False),
MappingSlot(0, 2, False),
MappingSlot(0, 3, False),
MappingSlot(0, 4, False),
MappingSlot(0, 5, False),
MappingSlot(0, 6, False),
]),
]),
ProductMapping(re.compile(r"(TRUE|FREE)NAS-MINI-3.0-XL\+$"), [
VersionMapping(re.compile(".*"), [
MappingSlot(1, 5, False),
MappingSlot(0, 0, False),
MappingSlot(0, 1, False),
MappingSlot(0, 2, False),
MappingSlot(0, 3, False),
MappingSlot(0, 4, False),
MappingSlot(0, 5, False),
MappingSlot(0, 6, False),
MappingSlot(0, 7, False),
MappingSlot(1, 4, False),
]),
]),
ProductMapping(re.compile(r"(TRUE|FREE)NAS-MINI-R"), [
VersionMapping(re.compile(".*"), [
MappingSlot(0, 0, False),
MappingSlot(0, 1, False),
MappingSlot(0, 2, False),
MappingSlot(0, 3, False),
MappingSlot(0, 4, False),
MappingSlot(0, 5, False),
MappingSlot(0, 6, False),
MappingSlot(0, 7, False),
MappingSlot(1, 3, False),
MappingSlot(1, 4, False),
MappingSlot(1, 5, False),
MappingSlot(1, 6, False),
]),
]),
ProductMapping(re.compile(r"TRUENAS-R10$"), [
VersionMapping(re.compile(".*"), [
MappingSlot(0, 0, False),
MappingSlot(0, 4, False),
MappingSlot(0, 8, False),
MappingSlot(0, 12, False),
MappingSlot(0, 1, False),
MappingSlot(0, 5, False),
MappingSlot(0, 9, False),
MappingSlot(0, 13, False),
MappingSlot(0, 2, False),
MappingSlot(0, 6, False),
MappingSlot(0, 10, False),
MappingSlot(0, 14, False),
MappingSlot(0, 3, False),
MappingSlot(0, 7, False),
MappingSlot(0, 11, False),
MappingSlot(0, 15, False),
]),
]),
# R20 and R20B share chassis and mapping
ProductMapping(re.compile(r"TRUENAS-R20B?$"), [
VersionMapping(re.compile(".*"), [
MappingSlot(2, 0, False),
MappingSlot(2, 1, False),
MappingSlot(2, 2, False),
MappingSlot(2, 3, False),
MappingSlot(2, 4, False),
MappingSlot(2, 5, False),
MappingSlot(2, 6, False),
MappingSlot(2, 7, False),
MappingSlot(2, 8, False),
MappingSlot(2, 9, False),
MappingSlot(2, 10, False),
MappingSlot(2, 11, False),
MappingSlot(0, 0, False),
MappingSlot(0, 1, False),
]),
]),
ProductMapping(re.compile(r"TRUENAS-R20A$"), [
VersionMapping(re.compile(".*"), [
MappingSlot(2, 2, False),
MappingSlot(2, 5, False),
MappingSlot(2, 8, False),
MappingSlot(2, 11, False),
MappingSlot(2, 1, False),
MappingSlot(2, 4, False),
MappingSlot(2, 7, False),
MappingSlot(2, 10, False),
MappingSlot(2, 0, False),
MappingSlot(2, 3, False),
MappingSlot(2, 6, False),
MappingSlot(2, 9, False),
MappingSlot(0, 0, False),
MappingSlot(0, 1, False),
]),
]),
ProductMapping(re.compile(r"TRUENAS-R40$"), [
VersionMapping(re.compile(".*"), [
MappingSlot(0, 0, False),
MappingSlot(0, 1, False),
MappingSlot(0, 2, False),
MappingSlot(0, 3, False),
MappingSlot(0, 4, False),
MappingSlot(0, 5, False),
MappingSlot(0, 6, False),
MappingSlot(0, 7, False),
MappingSlot(0, 8, False),
MappingSlot(0, 9, False),
MappingSlot(0, 10, False),
MappingSlot(0, 11, False),
MappingSlot(0, 12, False),
MappingSlot(0, 13, False),
MappingSlot(0, 14, False),
MappingSlot(0, 15, False),
MappingSlot(0, 16, False),
MappingSlot(0, 17, False),
MappingSlot(0, 18, False),
MappingSlot(0, 19, False),
MappingSlot(0, 20, False),
MappingSlot(0, 21, False),
MappingSlot(0, 22, False),
MappingSlot(0, 23, False),
MappingSlot(1, 0, False),
MappingSlot(1, 1, False),
MappingSlot(1, 2, False),
MappingSlot(1, 3, False),
MappingSlot(1, 4, False),
MappingSlot(1, 5, False),
MappingSlot(1, 6, False),
MappingSlot(1, 7, False),
MappingSlot(1, 8, False),
MappingSlot(1, 9, False),
MappingSlot(1, 10, False),
MappingSlot(1, 11, False),
MappingSlot(1, 12, False),
MappingSlot(1, 13, False),
MappingSlot(1, 14, False),
MappingSlot(1, 15, False),
MappingSlot(1, 16, False),
MappingSlot(1, 17, False),
MappingSlot(1, 18, False),
MappingSlot(1, 19, False),
MappingSlot(1, 20, False),
MappingSlot(1, 21, False),
MappingSlot(1, 22, False),
MappingSlot(1, 23, False),
]),
]),
# R50 has 3 rear nvme drives
ProductMapping(re.compile(r"TRUENAS-R50$"), [
VersionMapping(re.compile(".*"), [
MappingSlot(0, 0, False),
MappingSlot(0, 1, False),
MappingSlot(0, 2, False),
MappingSlot(0, 3, False),
MappingSlot(0, 4, False),
MappingSlot(0, 5, False),
MappingSlot(0, 6, False),
MappingSlot(0, 7, False),
MappingSlot(0, 8, False),
MappingSlot(0, 9, False),
MappingSlot(0, 10, False),
MappingSlot(0, 11, False),
MappingSlot(0, 12, False),
MappingSlot(0, 13, False),
MappingSlot(0, 14, False),
MappingSlot(0, 15, False),
MappingSlot(0, 16, False),
MappingSlot(0, 17, False),
MappingSlot(0, 18, False),
MappingSlot(0, 19, False),
MappingSlot(0, 20, False),
MappingSlot(0, 21, False),
MappingSlot(0, 22, False),
MappingSlot(0, 23, False),
MappingSlot(1, 0, False),
MappingSlot(1, 1, False),
MappingSlot(1, 2, False),
MappingSlot(1, 3, False),
MappingSlot(1, 4, False),
MappingSlot(1, 5, False),
MappingSlot(1, 6, False),
MappingSlot(1, 7, False),
MappingSlot(1, 8, False),
MappingSlot(1, 9, False),
MappingSlot(1, 10, False),
MappingSlot(1, 11, False),
MappingSlot(1, 12, False),
MappingSlot(1, 13, False),
MappingSlot(1, 14, False),
MappingSlot(1, 15, False),
MappingSlot(1, 16, False),
MappingSlot(1, 17, False),
MappingSlot(1, 18, False),
MappingSlot(1, 19, False),
MappingSlot(1, 20, False),
MappingSlot(1, 21, False),
MappingSlot(1, 22, False),
MappingSlot(1, 23, False),
MappingSlot(2, 0, False), # rear nvme
MappingSlot(2, 1, False), # rear nvme
MappingSlot(2, 2, False), # rear nvme
]),
]),
# R50b has 2 rear nvme drives
ProductMapping(re.compile(r"TRUENAS-R50B$"), [
VersionMapping(re.compile(".*"), [
MappingSlot(0, 0, False),
MappingSlot(0, 1, False),
MappingSlot(0, 2, False),
MappingSlot(0, 3, False),
MappingSlot(0, 4, False),
MappingSlot(0, 5, False),
MappingSlot(0, 6, False),
MappingSlot(0, 7, False),
MappingSlot(0, 8, False),
MappingSlot(0, 9, False),
MappingSlot(0, 10, False),
MappingSlot(0, 11, False),
MappingSlot(0, 12, False),
MappingSlot(0, 13, False),
MappingSlot(0, 14, False),
MappingSlot(0, 15, False),
MappingSlot(0, 16, False),
MappingSlot(0, 17, False),
MappingSlot(0, 18, False),
MappingSlot(0, 19, False),
MappingSlot(0, 20, False),
MappingSlot(0, 21, False),
MappingSlot(0, 22, False),
MappingSlot(0, 23, False),
MappingSlot(1, 0, False),
MappingSlot(1, 1, False),
MappingSlot(1, 2, False),
MappingSlot(1, 3, False),
MappingSlot(1, 4, False),
MappingSlot(1, 5, False),
MappingSlot(1, 6, False),
MappingSlot(1, 7, False),
MappingSlot(1, 8, False),
MappingSlot(1, 9, False),
MappingSlot(1, 10, False),
MappingSlot(1, 11, False),
MappingSlot(1, 12, False),
MappingSlot(1, 13, False),
MappingSlot(1, 14, False),
MappingSlot(1, 15, False),
MappingSlot(1, 16, False),
MappingSlot(1, 17, False),
MappingSlot(1, 18, False),
MappingSlot(1, 19, False),
MappingSlot(1, 20, False),
MappingSlot(1, 21, False),
MappingSlot(1, 22, False),
MappingSlot(1, 23, False),
MappingSlot(2, 0, False), # rear nvme
MappingSlot(2, 1, False), # rear nvme
]),
]),
# R50BM has 4 rear nvme drives (uses same plx bridge as m50/60 series)
ProductMapping(re.compile(r"TRUENAS-R50BM$"), [
VersionMapping(re.compile(".*"), [
MappingSlot(0, 0, False),
MappingSlot(0, 1, False),
MappingSlot(0, 2, False),
MappingSlot(0, 3, False),
MappingSlot(0, 4, False),
MappingSlot(0, 5, False),
MappingSlot(0, 6, False),
MappingSlot(0, 7, False),
MappingSlot(0, 8, False),
MappingSlot(0, 9, False),
MappingSlot(0, 10, False),
MappingSlot(0, 11, False),
MappingSlot(0, 12, False),
MappingSlot(0, 13, False),
MappingSlot(0, 14, False),
MappingSlot(0, 15, False),
MappingSlot(0, 16, False),
MappingSlot(0, 17, False),
MappingSlot(0, 18, False),
MappingSlot(0, 19, False),
MappingSlot(0, 20, False),
MappingSlot(0, 21, False),
MappingSlot(0, 22, False),
MappingSlot(0, 23, False),
MappingSlot(1, 0, False),
MappingSlot(1, 1, False),
MappingSlot(1, 2, False),
MappingSlot(1, 3, False),
MappingSlot(1, 4, False),
MappingSlot(1, 5, False),
MappingSlot(1, 6, False),
MappingSlot(1, 7, False),
MappingSlot(1, 8, False),
MappingSlot(1, 9, False),
MappingSlot(1, 10, False),
MappingSlot(1, 11, False),
MappingSlot(1, 12, False),
MappingSlot(1, 13, False),
MappingSlot(1, 14, False),
MappingSlot(1, 15, False),
MappingSlot(1, 16, False),
MappingSlot(1, 17, False),
MappingSlot(1, 18, False),
MappingSlot(1, 19, False),
MappingSlot(1, 20, False),
MappingSlot(1, 21, False),
MappingSlot(1, 22, False),
MappingSlot(1, 23, False),
MappingSlot(2, 0, False), # rear nvme
MappingSlot(2, 1, False), # rear nvme
MappingSlot(2, 2, False), # rear nvme
MappingSlot(2, 3, False), # rear nvme
]),
]),
]
class EnclosureService(Service):
@private
async def map_enclosures(self, enclosures):
info = await self.middleware.call("system.dmidecode_info")
if info["system-product-name"] is not None:
for product_mapping in MAPPINGS:
if product_mapping.product_re.match(info["system-product-name"]):
for version_mapping in product_mapping.mappings:
if version_mapping.version_re.match(info["system-version"]):
return await self._map_enclosures(enclosures, version_mapping.slots)
return enclosures
async def _map_enclosures(self, enclosures, slots):
# Ensure JBODs don't effect ordering by filtering them out
controller_enclosures = list(filter(lambda x: x['controller'], enclosures))
elements = []
has_slot_status = False
model = bsg = None
for slot, mapping in enumerate(slots, 1):
try:
original_enclosure = controller_enclosures[mapping.num]
except IndexError:
self.logger.error("Mapping referenced enclosure %d but it is not present on this system",
mapping.num)
return []
original_slots = list(filter(lambda element: element["name"] == "Array Device Slot",
original_enclosure["elements"]))[0]["elements"]
try:
original_slot = original_slots[mapping.slot]
except IndexError:
self.logger.error("Mapping referenced slot %d in enclosure %d but it is not present on this system",
mapping.slot, mapping.num)
return []
element = {
"slot": slot,
"data": dict(original_slot["data"], **{
"Descriptor": f"Disk #{slot}",
}),
"name": "Array Device Slot",
"descriptor": f"Disk #{slot}",
"status": original_slot["status"],
"value": original_slot["value"],
"value_raw": original_slot["value_raw"],
"original": {
"enclosure_id": original_enclosure["id"],
"enclosure_bsg": original_enclosure["bsg"],
"slot": original_slot["slot"],
},
}
if mapping.identify:
has_slot_status = True
for k in ["fault", "identify"]:
if k in original_slot:
element[k] = original_slot[k]
else:
self.logger.warning("Mapping referenced slot %d in enclosure %d as identifiable but key %r "
"is not present on this system", mapping.slot, mapping.num, k)
has_slot_status = False
if model is None and not original_enclosure["id"].endswith(("plx_enclosure", "nvme_enclosure")):
model = original_enclosure["model"]
elements.append(element)
mapped = [
{
"id": "mapped_enclosure_0",
"bsg": bsg,
"name": "Drive Bays",
"model": model,
"controller": True,
"elements": [
{
"name": "Array Device Slot",
"descriptor": "Drive Slots",
"header": ["Descriptor", "Status", "Value", "Device"],
"elements": elements,
"has_slot_status": has_slot_status,
},
],
}
]
# if we have future products that need to be mapped and/or have the
# ability to support expansion shelves, then we need to add them
# back in here so drive identification works
for enclosure in enclosures:
if not enclosure["controller"]:
mapped.append(enclosure)
return mapped
| 18,125 | Python | .py | 457 | 27.059081 | 116 | 0.50898 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,690 | enclosure2.py | truenas_middleware/src/middlewared/middlewared/plugins/enclosure_/enclosure2.py | # Copyright (c) - iXsystems Inc.
#
# Licensed under the terms of the TrueNAS Enterprise License Agreement
# See the file LICENSE.IX for complete terms and conditions
import errno
from middlewared.schema import Dict, Int, Str, accepts
from middlewared.service import Service, filterable
from middlewared.service_exception import CallError, MatchNotFound, ValidationError
from middlewared.utils import filter_list
from .constants import SUPPORTS_IDENTIFY_KEY
from .enums import JbofModels
from .fseries_drive_identify import InsufficientPrivilege, set_slot_status as fseries_set_slot_status
from .jbof_enclosures import map_jbof, set_slot_status as _jbof_set_slot_status
from .map2 import combine_enclosures
from .nvme2 import map_nvme
from .r30_drive_identify import set_slot_status as r30_set_slot_status
from .ses_enclosures2 import get_ses_enclosures
from .sysfs_disks import toggle_enclosure_slot_identifier
class Enclosure2Service(Service):
class Config:
cli_namespace = 'storage.enclosure2'
private = True
def get_ses_enclosures(self):
"""This generates the "raw" list of enclosures detected on the system. It
serves as the "entry" point to "enclosure2.query" and is foundational in
how all of the structuring of the final data object is returned. We use
SCSI commands (issued directly to the enclosure) to generate an object of
all elements and the information associated to each element. The `Enclosure`
class is where all the magic happens wrt to taking in all the raw data and
formatting it into a structured object that will be consumed by the webUI
team as well as on the backend (alerts, drive identifiction, etc).
"""
return get_ses_enclosures()
async def map_jbof(self, jbof_qry=None):
"""This method serves as an endpoint to easily be able to test
the JBOF mapping logic specifically without having to call enclosure2.query
which includes the head-unit and all other attached JBO{D/F}s.
"""
if jbof_qry is None:
jbof_qry = await self.middleware.call('jbof.query')
return await map_jbof(jbof_qry)
def map_nvme(self):
"""This method serves as an endpoint to easily be able to test
the nvme mapping logic specifically without having to call enclosure2.query
which includes the head-unit and all attached JBODs.
"""
return map_nvme()
def get_original_disk_slot(self, slot, enc_info):
"""Get the original slot based on the `slot` passed to us via the end-user.
NOTE: Most drives original slot will match their "mapped" slot because there
is no need to map them. We always include an "original" slot key for all
enclosures as to keep this for loop as simple as possible and it also allows
more flexbiility when we do get an enclosure that maps drives differently.
(i.e. the ES102G2 is a prime example of this (enumerates drives at 1 instead of 0))
"""
origslot, supports_identify = None, False
for encslot, devinfo in filter(lambda x: x[0] == slot, enc_info['elements']['Array Device Slot'].items()):
origslot = devinfo['original']['slot']
supports_identify = devinfo[SUPPORTS_IDENTIFY_KEY]
return origslot, supports_identify
@accepts(Dict(
Str('enclosure_id', required=True),
Int('slot', required=True),
Str('status', required=True, enum=['CLEAR', 'ON', 'OFF'])
))
def set_slot_status(self, data):
"""Set enclosure bay number `slot` to `status` for `enclosure_id`.
`enclosure_id` str: represents the enclosure logical identifier of the enclosure
`slot` int: the enclosure drive bay number to send the status command
`status` str: the status for which to send to the command.
"""
try:
enc_info = self.middleware.call_sync(
'enclosure2.query', [['id', '=', data['enclosure_id']]], {'get': True}
)
except MatchNotFound:
raise ValidationError('enclosure2.set_slot_status', f'Enclosure with id: {data["enclosure_id"]} not found')
if enc_info['id'].endswith('_nvme_enclosure'):
if enc_info['id'].startswith('r30'):
# an all nvme flash system so drive identification is handled
# in a completely different way than sata/scsi
return r30_set_slot_status(data['slot'], data['status'])
elif enc_info['id'].startswith(('f60', 'f100', 'f130')):
try:
return fseries_set_slot_status(data['slot'], data['status'])
except InsufficientPrivilege:
if self.middleware.call_sync('failover.licensed'):
opts = {'raise_connect_error': False}
return self.middleware.call_sync(
'failover.call_remote', 'enclosure2.set_slot_status', [data], opts
)
else:
# mseries, and some rseries have mapped nvme enclosures but they
# don't support drive LED identification
return
elif enc_info['model'] == JbofModels.ES24N.name:
return self.middleware.call_sync(
'enclosure2.jbof_set_slot_status', data['enclosure_id'], data['slot'], data['status']
)
if enc_info['pci'] is None:
raise ValidationError('enclosure2.set_slot_status', 'Unable to determine PCI address for enclosure')
else:
origslot, supported = self.get_original_disk_slot(data['slot'], enc_info)
if origslot is None:
raise ValidationError('enclosure2.set_slot_status', f'Slot {data["slot"]} not found in enclosure')
elif not supported:
raise ValidationError(
'enclosure2.set_slot_status', f'Slot {data["slot"]} does not support identification'
)
else:
try:
toggle_enclosure_slot_identifier(
f'/sys/class/enclosure/{enc_info["pci"]}', origslot, data['status'], False, enc_info['model']
)
except FileNotFoundError:
raise CallError(f'Slot: {data["slot"]!r} not found', errno.ENOENT)
async def jbof_set_slot_status(self, ident, slot, status):
return await _jbof_set_slot_status(ident, slot, status)
@filterable
def query(self, filters, options):
enclosures = []
if not self.middleware.call_sync('truenas.is_ix_hardware'):
# this feature is only available on hardware that ix sells
return enclosures
labels = {
label['encid']: label['label']
for label in self.middleware.call_sync('datastore.query', 'truenas.enclosurelabel')
}
for i in self.get_ses_enclosures() + self.map_nvme() + self.middleware.call_sync('enclosure2.map_jbof'):
if i.pop('should_ignore'):
continue
# this is a user-provided string to label the enclosures so we'll add it at as a
# top-level dictionary key "label", if the user hasn't provided a label then we'll
# fill in the info with whatever is in the "name" key. The "name" key is the
# t10 vendor, product and revision information combined as a single space separated
# string reported by the enclosure itself via a standard inquiry command
i['label'] = labels.get(i['id']) or i['name']
enclosures.append(i)
combine_enclosures(enclosures)
enclosures = sorted(enclosures, key=lambda enclosure: (0 if enclosure["controller"] else 1, enclosure['id']))
return filter_list(enclosures, filters, options)
| 7,901 | Python | .py | 141 | 45.468085 | 119 | 0.64582 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,691 | r30_drive_identify.py | truenas_middleware/src/middlewared/middlewared/plugins/enclosure_/r30_drive_identify.py | # Copyright (c) - iXsystems Inc.
#
# Licensed under the terms of the TrueNAS Enterprise License Agreement
# See the file LICENSE.IX for complete terms and conditions
from subprocess import run
NVME_CONTROLLERS = ('0xc0', '0xc2', '0xc4')
def slot_to_controller_and_bay_mapping(slot):
mapping = {
# bays 1-8
1: (NVME_CONTROLLERS[0], '0x01'),
2: (NVME_CONTROLLERS[0], '0x04'),
3: (NVME_CONTROLLERS[0], '0x10'),
4: (NVME_CONTROLLERS[0], '0x40'),
5: (NVME_CONTROLLERS[1], '0x01'),
6: (NVME_CONTROLLERS[1], '0x04'),
7: (NVME_CONTROLLERS[0], '0x02'),
8: (NVME_CONTROLLERS[0], '0x08'),
# bays 9-12
9: (NVME_CONTROLLERS[0], '0x20'),
10: (NVME_CONTROLLERS[0], '0x80'),
11: (NVME_CONTROLLERS[1], '0x02'),
12: (NVME_CONTROLLERS[1], '0x08'),
# bays 13-16
13: (NVME_CONTROLLERS[2], '0x04'),
14: (NVME_CONTROLLERS[2], '0x01'),
15: (NVME_CONTROLLERS[2], '0x08'),
16: (NVME_CONTROLLERS[2], '0x02'),
}
try:
return mapping[slot]
except KeyError:
raise ValueError(f'{slot!r} is invalid')
def led_status_mapping(status):
mapping = {
'OFF': '0x00',
'CLEAR': '0x00', # turn off red led
'IDENTIFY': '0x42', # red and green led blink fast
'ON': '0x42', # same as IDENTIFY
}
try:
return mapping[status]
except KeyError:
raise ValueError(f'{status!r} is invalid')
def set_slot_status(slot, status):
"""
Unfortunately, there is no way to query current drive identification status.
Also, there is no way to turn off a singular LED bay, you have to clear the
controller (nvme bank) of drives.
"""
ctrl, bay = slot_to_controller_and_bay_mapping(slot)
led_status_mapping(status) # will crash if invalid status is passed to us
# always disable BMC sensor scan
run('ipmitool raw 0x30 0x02 0x00', check=False, shell=True)
# always switch to SMBUS
run('ipmitool raw 0x06 0x52 0x07 0xe6 0x0 0x4 0x4', check=False, shell=True)
if status in ('OFF', 'CLEAR'):
for i in ('0xc0', '0xc2', '0xc4'):
# set to manual mode for the nvme controller
run(f'ipmitool raw 0x06 0x52 0x07 {i} 0x00 0x3c 0xff', shell=True)
# clear all the bank of LEDs on the controller (no way to turn off specific drive)
run(f'ipmitool raw 0x06 0x52 0x07 {i} 0x00 {led_status_mapping("ON")} 0x00', shell=True)
else:
# set to manual mode for the nvme controller
run(f'ipmitool raw 0x06 0x52 0x07 {ctrl} 0x00 0x3c 0xff', shell=True)
# light up the slot
run(f'ipmitool raw 0x06 0x52 0x07 {ctrl} 0x00 {led_status_mapping("ON")} {bay}', shell=True)
| 2,774 | Python | .py | 66 | 35.015152 | 100 | 0.618377 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,692 | es24n.py | truenas_middleware/src/middlewared/middlewared/plugins/enclosure_/jbof/es24n.py | from logging import getLogger
from middlewared.plugins.enclosure_.enums import ElementType, JbofModels
from middlewared.plugins.enclosure_.jbof.utils import (fake_jbof_enclosure,
map_cooling,
map_power_supplies,
map_temperature_sensors,
map_voltage_sensors)
from middlewared.plugins.jbof.functions import get_sys_class_nvme
ES24N_EXPECTED_URI = '/redfish/v1/Chassis/2U24'
LOGGER = getLogger(__name__)
async def map_es24n(model, rclient, uri):
data = {}
urls = {'Drives': f'{uri}/Drives?$expand=*',
'PowerSubsystem': f'{uri}/PowerSubsystem?$expand=*($levels=2)',
'Sensors': f'{uri}/Sensors?$expand=*',
'ThermalSubsystem': f'{uri}/ThermalSubsystem?$expand=*($levels=2)'
}
try:
# Unfortunately the ES24n response doesn't lend itself it issuing a single query.
#
# Furthermore, experiments have shown that executing the queries in series
# is just as fast as executing in parallel, so we'll do the former here for
# simplicity.
for key, uri2 in urls.items():
info = await rclient.get(uri2)
if not info:
LOGGER.error('Unexpected failure fetching %r info', key)
return
data[key] = info
except Exception:
LOGGER.error('Unexpected failure enumerating all enclosure info', exc_info=True)
return
return do_map_es24n(model, rclient.uuid, data)
def do_map_es24n(model, uuid, data):
#
# Drives
#
try:
all_disks = data['Drives']
except KeyError:
LOGGER.error('Unexpected failure extracting all disk info', exc_info=True)
return
num_of_slots = len(all_disks['Members'])
ui_info = {
'rackmount': True,
'top_loaded': False,
'front_slots': num_of_slots,
'rear_slots': 0,
'internal_slots': 0
}
mounted_disks = {
v['serial']: (k, v) for k, v in get_sys_class_nvme().items()
if v['serial'] and v['transport_protocol'] == 'rdma'
}
mapped = dict()
drive_bay_light_status = dict()
for disk in all_disks['Members']:
slot = disk.get('Id', '')
if not slot or not slot.isdigit():
# shouldn't happen but need to catch edge-case
continue
else:
slot = int(slot)
# Check the LocationIndicatorActive before other items as we want the value
# even if no disk is present.
match disk.get('LocationIndicatorActive'):
case True:
drive_bay_light_status[slot] = 'ON'
case False:
drive_bay_light_status[slot] = 'OFF'
case None:
drive_bay_light_status[slot] = None
case _:
LOGGER.error('Unexpected drive bay light status')
drive_bay_light_status[slot] = None
state = disk.get('Status', {}).get('State')
if not state or state == 'Absent':
mapped[slot] = None
continue
sn = disk.get('SerialNumber')
if not sn:
mapped[slot] = None
continue
if found := mounted_disks.get(sn):
try:
# we expect namespace 1 for the device (i.e. nvme1n1)
idx = found[1]['namespaces'].index(f'{found[0]}n1')
mapped[slot] = found[1]['namespaces'][idx]
except ValueError:
mapped[slot] = None
else:
mapped[slot] = None
elements = {}
if psus := map_power_supplies(data):
elements[ElementType.POWER_SUPPLY.value] = psus
if cooling := map_cooling(data):
elements[ElementType.COOLING.value] = cooling
if temperature := map_temperature_sensors(data):
elements[ElementType.TEMPERATURE_SENSORS.value] = temperature
if voltage := map_voltage_sensors(data):
elements[ElementType.VOLTAGE_SENSOR.value] = voltage
# No Current Sensors reported
return fake_jbof_enclosure(model, uuid, num_of_slots, mapped, ui_info, elements, drive_bay_light_status)
async def is_this_an_es24n(rclient):
"""At time of writing, we've discovered that OEM of the ES24N
does not give us predictable model names. Seems to be random
which is unfortunate but there isn't much we can do about it
at the moment. We know what the URI _should_ be for this
platform and we _thought_ we knew what the model should be so
we'll hard-code these values and check for the specific URI
and then check if the model at the URI at least has some
semblance of an ES24N"""
# FIXME: This function shouldn't exist and the OEM should fix
# this at some point. When they do (hopefully) fix the model,
# remove this function
expected_uri = ES24N_EXPECTED_URI
expected_model = JbofModels.ES24N.value
try:
info = await rclient.get(expected_uri)
if info:
found_model = info.get('Model', '').lower()
eml = expected_model.lower()
if any((
eml in found_model,
found_model.startswith(eml),
found_model.startswith(eml[:-1])
)):
# 1. the model string is inside the found model
# 2. or the model string startswith what we expect
# 3. or the model string startswith what we expect
# with the exception of the last character
# (The reason why we chop off last character is
# because internal conversation concluded that the
# last digit coorrelates to "generation" so we're
# going to be extra lenient and ignore it)
return JbofModels.ES24N.name, expected_uri
except Exception:
LOGGER.error('Unexpected failure determining if this is an ES24N', exc_info=True)
return None, None
| 6,111 | Python | .py | 139 | 33.151079 | 108 | 0.593015 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,693 | utils.py | truenas_middleware/src/middlewared/middlewared/plugins/enclosure_/jbof/utils.py | from logging import getLogger
from middlewared.plugins.enclosure_.constants import (
DISK_FRONT_KEY,
DISK_REAR_KEY,
DISK_TOP_KEY,
DISK_INTERNAL_KEY,
DRIVE_BAY_LIGHT_STATUS,
SUPPORTS_IDENTIFY_KEY,
SUPPORTS_IDENTIFY_STATUS_KEY,
)
from middlewared.plugins.enclosure_.enums import (
ElementStatus,
RedfishStatusHealth,
RedfishStatusState
)
from middlewared.plugins.enclosure_.slot_mappings import get_jbof_slot_info
LOGGER = getLogger(__name__)
def fake_jbof_enclosure(model, uuid, num_of_slots, mapped, ui_info, elements={}, drive_bay_light_status={}):
"""This function takes the nvme devices that been mapped
to their respective slots and then creates a "fake" enclosure
device that matches (similarly) to what our real enclosure
mapping code does (get_ses_enclosures()). It's _VERY_ important
that the keys in the `fake_enclosure` dictionary exist because
our generic enclosure mapping logic expects certain top-level
keys.
Furthermore, we generate DMI (SMBIOS) information for this
"fake" enclosure because our enclosure mapping logic has to have
a guaranteed unique key for each enclosure so it can properly
map the disks accordingly
"""
# TODO: The `fake_enclosure` object should be removed from this
# function and should be generated by the
# `plugins.enclosure_/enclosure_class.py:Enclosure` class so we
# can get rid of duplicate logic in this module and in that class
fake_enclosure = {
'id': uuid,
'dmi': uuid,
'model': model,
'should_ignore': False,
'sg': None,
'bsg': None,
'name': f'{model} JBoF Enclosure',
'controller': False,
'status': ['OK'],
'elements': {'Array Device Slot': {}}
}
disks_map = get_jbof_slot_info(model)
if not disks_map:
fake_enclosure['should_ignore'] = True
return [fake_enclosure]
fake_enclosure.update(ui_info)
for slot in range(1, num_of_slots + 1):
device = mapped.get(slot, None)
# the `value_raw` variables represent the
# value they would have if a device was
# inserted into a proper SES device (or not).
# Since this is NVMe (which deals with PCIe)
# that paradigm doesn't exist per se but we're
# "faking" a SES device, hence the hex values.
# The `status` variables use same logic.
if device is not None:
status = 'OK'
value_raw = 0x1000000
else:
status = 'Not installed'
value_raw = 0x5000000
mapped_slot = disks_map['versions']['DEFAULT']['model'][model][slot]['mapped_slot']
light = disks_map['versions']['DEFAULT']['model'][model][slot][SUPPORTS_IDENTIFY_KEY]
dfk = disks_map['versions']['DEFAULT']['model'][model][slot][DISK_FRONT_KEY]
drk = disks_map['versions']['DEFAULT']['model'][model][slot][DISK_REAR_KEY]
dtk = disks_map['versions']['DEFAULT']['model'][model][slot][DISK_TOP_KEY]
dik = disks_map['versions']['DEFAULT']['model'][model][slot][DISK_INTERNAL_KEY]
# light_status will follow light unless explicitedly overridden
light_status = disks_map['versions']['DEFAULT']['model'][model][slot].get(SUPPORTS_IDENTIFY_STATUS_KEY, light)
if light_status:
led = drive_bay_light_status.get(slot, None)
else:
led = None
fake_enclosure['elements']['Array Device Slot'][mapped_slot] = {
'descriptor': f'Disk #{slot}',
'status': status,
'value': None,
'value_raw': value_raw,
'dev': device,
SUPPORTS_IDENTIFY_KEY: light,
DISK_FRONT_KEY: dfk,
DISK_REAR_KEY: drk,
DISK_TOP_KEY: dtk,
DISK_INTERNAL_KEY: dik,
DRIVE_BAY_LIGHT_STATUS: led,
'original': {
'enclosure_id': uuid,
'enclosure_sg': None,
'enclosure_bsg': None,
'descriptor': f'slot{slot}',
'slot': slot,
}
}
for element_type in elements:
if elements[element_type]:
fake_enclosure['elements'][element_type] = elements[element_type]
return [fake_enclosure]
def map_redfish_status_to_status(status):
"""Return a status string based upon the Redfish Status"""
if state := status.get('State'):
if state == RedfishStatusState.ABSENT.value:
return ElementStatus.NOT_INSTALLED.value
if health := status.get('Health'):
match health:
case RedfishStatusHealth.CRITICAL.value:
return ElementStatus.CRITICAL.value
case RedfishStatusHealth.OK.value:
return ElementStatus.OK.value
case RedfishStatusHealth.WARNING.value:
return ElementStatus.NONCRITICAL.value
case _:
return ElementStatus.UNKNOWN.value
return ElementStatus.UNKNOWN.value
def map_redfish_to_value(data, keys):
"""Return a value which is a comma seperated string of all the values
present in data."""
# It was decided NOT to try to map these to SES-like values, as this
# would introduce an impedance mismatch when we circle back to the
# Redfish provider again.
values = []
for key in keys:
if val := data.get(key):
values.append(val)
return ', '.join(values) or None
def map_redfish_psu_to_value(psu):
"""Return a value string corresponding to the redfish data"""
# Just use LineInputStatus (DSP0268_2024.1 6.103.5.2 LineInputStatus)
return map_redfish_to_value(psu, ['LineInputStatus'])
def map_redfish_psu(psu):
"""Utility function to map a Redfish PSU data to our enclosure services format"""
# Redfish Data Model Specification https://www.dmtf.org/dsp/DSP0268
# DSP0268_2024.1 6.103 PowerSupply 1.6.0
# DSP0268_2023.2 6.103 PowerSupply 1.5.2
# DSP0268_2023.1 6.97 PowerSupply 1.5.1
# ...
# For ES24n implemented with @odata.type = #PowerSupply.v1_5_1.PowerSupply
#
# Example data from redfish
# {'@odata.id': '/redfish/v1/Chassis/2U24/PowerSubsystem/PowerSupplies/PSU1',
# '@odata.type': '#PowerSupply.v1_5_1.PowerSupply',
# 'Actions': {
# '#PowerSupply.Reset': {
# 'ResetType@Redfish.AllowableValues': ['On','ForceOff'],
# 'target': '/redfish/v1/Chassis/2U24/PowerSubsystem/PowerSupplies/PSU1/Actions/PowerSupply.Reset'
# }
# },
# 'FirmwareVersion': 'A00',
# 'Id': 'PSU1',
# 'LineInputStatus': 'Normal',
# 'Manufacturer': '3Y POWER',
# 'Model': 'YSEF1600EM-2A01P10',
# 'Name': 'PSU1',
# 'PowerCapacityWatts': 1600,
# 'SerialNumber': 'S0A00A3032029000265',
# 'Status': {'Health': 'OK',
# 'State': 'Enabled'}},
desc_fields = ['Name', 'Model', 'SerialNumber', 'FirmwareVersion', 'Manufacturer']
desc = [psu.get(k, '') for k in desc_fields]
if watt := psu.get('PowerCapacityWatts'):
desc.append(f'{watt}W')
return {
'descriptor': ','.join(desc),
"status": map_redfish_status_to_status(psu['Status']),
"value": map_redfish_psu_to_value(psu),
"value_raw": None
}
def map_power_supplies(data):
result = {}
for member in data['PowerSubsystem']['PowerSupplies']['Members']:
ident = member.get('Id')
if ident:
result[ident] = map_redfish_psu(member)
return result
def map_redfish_fan_to_value(data):
values = []
if speedpercent := data.get('SpeedPercent'):
if speedrpm := speedpercent.get('SpeedRPM'):
values.append(f'SpeedRPM={speedrpm}')
if location_indicator_active := data.get('LocationIndicatorActive'):
if location_indicator_active:
values.append('LocationIndicatorActive')
return ', '.join(values) or None
def map_redfish_fan(data):
# Example data from redfish
# {'@odata.id': '/redfish/v1/Chassis/2U24/ThermalSubsystem/Fans/Fan1',
# '@odata.type': '#Fan.v1_4_0.Fan',
# 'Id': 'Fan1',
# 'LocationIndicatorActive': False,
# 'Name': 'Fan1',
# 'SpeedPercent': {'DataSourceUri': '/redfish/v1/Chassis/2U24/Sensors/Fan1', 'SpeedRPM': 9920.0},
# 'Status': {'Health': 'OK', 'State': 'Enabled'}}
return {
'descriptor': data.get('Name'),
"status": map_redfish_status_to_status(data['Status']),
"value": map_redfish_fan_to_value(data),
"value_raw": None
}
def map_cooling(data):
result = {}
for member in data['ThermalSubsystem']['Fans']['Members']:
ident = member.get('Id')
if ident:
result[ident] = map_redfish_fan(member)
return result
def map_redfish_sensor_to_value(data):
if reading := data.get('Reading'):
if units := data.get('ReadingUnits'):
return f'{reading} {units}'
else:
# Make sure it's a string
return f'{reading}'
def map_redfish_temperature_sensor(data):
# Example data from redfish
# {'@odata.id': '/redfish/v1/Chassis/2U24/Sensors/TempDrive1',
# '@odata.type': '#Sensor.v1_6_0.Sensor',
# 'Id': 'TempDrive1',
# 'Name': 'Temperature Sensor Drive 1',
# 'Reading': 26.0,
# 'ReadingType': 'Temperature',
# 'ReadingUnits': 'C',
# 'Status': {'Health': 'OK', 'State': 'Enabled'}},
return {
'descriptor': data.get('Name'),
"status": map_redfish_status_to_status(data['Status']),
"value": map_redfish_sensor_to_value(data),
"value_raw": None
}
def map_temperature_sensors(data):
result = {}
for member in data['Sensors']['Members']:
ident = member.get('Id')
reading_type = member.get('ReadingType')
if ident and reading_type == 'Temperature':
result[ident] = map_redfish_temperature_sensor(member)
return result
def map_redfish_voltage_sensor(data):
# Example data from redfish
# {'@odata.id': '/redfish/v1/Chassis/2U24/Sensors/VoltPS1Vin',
# '@odata.type': '#Sensor.v1_6_0.Sensor',
# 'Id': 'VoltPS1Vin',
# 'Name': 'VoltPS1Vin',
# 'Reading': 206.0,
# 'ReadingType': 'Voltage',
# 'Status': {'Health': 'OK', 'State': 'Enabled'}},
return {
'descriptor': data.get('Name'),
"status": map_redfish_status_to_status(data['Status']),
"value": map_redfish_sensor_to_value(data),
"value_raw": None
}
def map_voltage_sensors(data):
result = {}
for member in data['Sensors']['Members']:
ident = member.get('Id')
reading_type = member.get('ReadingType')
if ident and reading_type == 'Voltage':
result[ident] = map_redfish_voltage_sensor(member)
return result
| 10,848 | Python | .py | 263 | 33.889734 | 118 | 0.623139 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,694 | enclosure.py | truenas_middleware/src/middlewared/middlewared/plugins/webui/enclosure.py | from middlewared.schema import accepts
from middlewared.service import Service
class WebUIEnclosureService(Service):
class Config:
namespace = 'webui.enclosure'
private = True
cli_private = True
role_prefix = 'ENCLOSURE'
def disk_detail_dict(self):
return {
'size': None,
'model': None,
'serial': None,
'type': None,
'rotationrate': None,
}
def map_disk_details(self, slot_info, disk_deets):
for key in self.disk_detail_dict():
slot_info[key] = disk_deets.get(slot_info['dev'], {}).get(key)
def map_zpool_info(self, enc_id, disk_slot, dev, pool_info):
info = {'enclosure_id': enc_id, 'slot': int(disk_slot), 'dev': dev}
try:
index = pool_info['vdev_disks'].index(dev)
pool_info['vdev_disks'][index] = info
except ValueError:
# it means the disk's status in zfs land != ONLINE
# (i.e. it could be OFFLINE) and so it won't show
# up in the `vdev_disks` key, so it's best to catch
# this error and still append the disk to the list
# The `pool_info['disk_status']` key will be added
# which will give more insight into what's going on
pool_info['vdev_disks'].append(info)
def dashboard_impl(self):
enclosures = self.middleware.call_sync('enclosure2.query')
if enclosures:
disk_deets = self.middleware.call_sync('device.get_disks')
disks_to_pools = self.middleware.call_sync('zpool.status', {'real_paths': True})
for enc in enclosures:
for disk_slot, slot_info in enc['elements']['Array Device Slot'].items():
for to_pop in ('original', 'value', 'value_raw'):
# remove some values that webUI doesn't use
slot_info.pop(to_pop)
pool_info = None
slot_info.update({'drive_bay_number': int(disk_slot), **self.disk_detail_dict()})
if slot_info['dev']:
# map disk details
# NOTE: some of these fields need to be removed
# work with UI to remove unnecessary ones
self.map_disk_details(slot_info, disk_deets)
if pool_info := disks_to_pools['disks'].get(slot_info['dev']):
# now map zpool info
self.map_zpool_info(enc['id'], disk_slot, slot_info['dev'], pool_info)
slot_info.update({'pool_info': pool_info})
return enclosures
@accepts(roles=['ENCLOSURE_READ'])
def dashboard(self):
"""This endpoint is used exclusively by the webUI team for
the enclosure dashboard page for iX sold hardware.
An example of what this returns looks like the following:
(NOTE: some redundant information cut out for brevity)
[{
"name": "iX 4024Sp c205",
"model": "M40",
"controller": true,
"dmi": "TRUENAS-M40-HA",
"status": ["OK"],
"id": "5b0bd6d1a30714bf",
"vendor": "iX",
"product": "4024Sp",
"revision": "c205",
"bsg": "/dev/bsg/0:0:23:0",
"sg": "/dev/sg25",
"pci": "0:0:23:0",
"rackmount": true,
"top_loaded": false,
"front_slots": 24,
"rear_slots": 0,
"internal_slots": 0,
"elements": {
"Array Device Slot": {
"1": {
"descriptor": "slot00",
"status": "OK",
"dev": "sda",
"supports_identify_light": true,
"name": "sda",
"size": 12000138625024,
"model": "HUH721212AL4200",
"serial": "XXXXX",
"advpowermgmt": "DISABLED",
"togglesmart": true,
"smartoptions": "",
"transfermode": "Auto",
"hddstandby": "ALWAYS ON",
"description": "",
"rotationrate": 7200,
"pool_info": {
"pool_name": "test",
"disk_status": "ONLINE",
"disk_read_errors": 0,
"disk_write_errors": 0,
"disk_checksum_errors": 0,
"vdev_name": "mirror-0",
"vdev_type": "data",
"vdev_disks": [
{
"enclosure_id": "5b0bd6d1a30714bf",
"slot": 1,
"dev": "sda"
},
{
"enclosure_id": "5b0bd6d1a30714bf",
"slot": 2,
"dev": "sdb"
},
{
"enclosure_id": "5b0bd6d1a30714bf",
"slot": 3,
"dev": "sdc"
}
]
}
}
}
}
}]
"""
return self.dashboard_impl()
| 5,704 | Python | .py | 128 | 26.03125 | 101 | 0.427211 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,695 | main_dashboard.py | truenas_middleware/src/middlewared/middlewared/plugins/webui/main_dashboard.py | from datetime import datetime, timezone
from time import clock_gettime, CLOCK_MONOTONIC_RAW, time
from middlewared.schema import accepts
from middlewared.service import Service
from middlewared.utils import sw_version, sw_codename
class WebUIMainDashboardService(Service):
class Config:
namespace = 'webui.main.dashboard'
cli_private = True
private = True
def sys_info_impl(self):
dmi = self.middleware.call_sync('system.dmidecode_info')
platform = 'Generic'
if dmi['system-product-name'].startswith(('FREENAS-', 'TRUENAS-')):
platform = dmi['system-product-name']
# we query database table directly because using the standard
# `network.configuration.config` is just too slow and too heavy
# for simply determining the hostname.
# NOTE: we show what is written to the database which doesn't
# necessarily mean that's what the hostname is on OS side
nc = self.middleware.call_sync('datastore.query', 'network.globalconfiguration')
if self.middleware.call_sync('failover.node') in ('A', 'MANUAL'):
hostname = nc[0]['gc_hostname']
else:
hostname = nc[0]['gc_hostname_b']
return {
'platform': platform,
'version': sw_version(),
'codename': sw_codename(),
'license': self.middleware.call_sync('system.license'),
'system_serial': dmi['system-serial-number'],
'hostname': hostname,
'uptime_seconds': clock_gettime(CLOCK_MONOTONIC_RAW),
'datetime': datetime.fromtimestamp(time(), timezone.utc),
}
@accepts(roles=['READONLY_ADMIN'])
def sys_info(self):
"""This endpoint was designed to be exclusively
consumed by the webUI team. This is what makes
up the System Information card on the main
dashboard after a user logs in.
"""
info = self.sys_info_impl()
try:
info['remote_info'] = self.middleware.call_sync(
'failover.call_remote', 'webui.main.dashboard.sys_info_impl'
)
except Exception:
# could be ENOMETHOD (fresh upgrade) or we could
# be on a non-HA system. Either way, doesn't matter
# we just need to try and get the information and
# set the key to None if we fail
info['remote_info'] = None
return info
| 2,461 | Python | .py | 54 | 36.12963 | 88 | 0.63193 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,696 | crypto.py | truenas_middleware/src/middlewared/middlewared/plugins/webui/crypto.py | from middlewared.schema import accepts, Int
from middlewared.service import Service
class WebUICryptoService(Service):
class Config:
namespace = 'webui.crypto'
private = True
cli_private = True
@accepts(roles=['READONLY_ADMIN'])
async def certificate_profiles(self):
return await self.middleware.call('certificate.profiles')
@accepts(roles=['READONLY_ADMIN'])
async def certificateauthority_profiles(self):
return await self.middleware.call('certificateauthority.profiles')
@accepts(Int('cert_id'), roles=['READONLY_ADMIN'])
async def get_certificate_domain_names(self, cert_id):
return await self.middleware.call('certificate.get_domain_names', cert_id)
@accepts(roles=['READONLY_ADMIN'])
async def csr_profiles(self):
return await self.middleware.call('certificate.certificate_signing_requests_profiles')
| 906 | Python | .py | 19 | 41.473684 | 94 | 0.730682 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,697 | journal_exceptions.py | truenas_middleware/src/middlewared/middlewared/plugins/failover_/journal_exceptions.py | # Copyright (c) - iXsystems Inc.
#
# Licensed under the terms of the TrueNAS Enterprise License Agreement
# See the file LICENSE.IX for complete terms and conditions
class UnableToDetermineOSVersion(Exception):
"""
Raised in JournalSync thread when we're unable
to detect the remote node's OS version.
(i.e. if remote node goes down (upgrade/reboot, etc)
"""
pass
class OSVersionMismatch(Exception):
"""
Raised in JournalSync thread when the remote nodes OS version
does not match the local nodes OS version.
"""
pass
| 565 | Python | .py | 17 | 29.470588 | 70 | 0.737615 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,698 | detect_enclosure.py | truenas_middleware/src/middlewared/middlewared/plugins/failover_/detect_enclosure.py | # Copyright (c) - iXsystems Inc.
#
# Licensed under the terms of the TrueNAS Enterprise License Agreement
# See the file LICENSE.IX for complete terms and conditions
import pathlib
import subprocess
from pyudev import Context
from ixhardware import PLATFORM_PREFIXES
from middlewared.service import Service
from middlewared.utils.functools_ import cache
from middlewared.plugins.enclosure_.ses_enclosures2 import get_ses_enclosures
class EnclosureDetectionService(Service):
class Config:
namespace = 'failover.enclosure'
private = True
@cache
def detect(self):
HARDWARE = NODE = 'MANUAL'
dmi = self.middleware.call_sync('system.dmidecode_info')
product = dmi['system-product-name']
if not product:
# no reason to continue since we've got no path forward
return HARDWARE, NODE
elif dmi['system-manufacturer'] == 'QEMU':
serial = dmi['system-serial-number']
if not serial.startswith('ha') and not serial.endswith(('_c1', '_c2')):
# truenas is often installed in KVM so we need to check our specific
# strings in DMI and bail out early here
return HARDWARE, NODE
else:
HARDWARE = 'IXKVM'
NODE = 'A' if serial[-1] == '1' else 'B'
return HARDWARE, NODE
elif product == 'BHYVE':
# bhyve host configures a scsi_generic device that when sent an inquiry will
# respond with a string that we use to determine the position of the node
ctx = Context()
for i in ctx.list_devices(subsystem='scsi_generic'):
if (model := i.attributes.get('device/model')) is not None:
model = model.decode().strip() if isinstance(model, bytes) else model.strip()
if model == 'TrueNAS_A':
NODE = 'A'
HARDWARE = 'BHYVE'
break
elif model == 'TrueNAS_B':
NODE = 'B'
HARDWARE = 'BHYVE'
break
return HARDWARE, NODE
elif product.startswith('TRUENAS-F'):
HARDWARE = 'LAJOLLA2'
rv = subprocess.run(['ipmi-raw', '0', '3c', '0e'], stdout=subprocess.PIPE)
if rv.stdout:
# Viking info via VSS2249RQ Management Over IPMI document Section 5.5 page 15
NODE = 'A' if rv.stdout.decode().strip()[-1] == '0' else 'B'
return HARDWARE, NODE
elif product.startswith('TRUENAS-H'):
HARDWARE = 'SUBLIGHT'
rv = subprocess.run(['ipmi-raw', '0', '6', '52', 'b', 'b2', '9', '0'], stdout=subprocess.PIPE)
if rv.stdout:
if (val := (int(rv.stdout.decode().strip()[-2:], base=16) & 1)) not in (0, 1):
# h-series is a unique platform so best to have messages like these for ease of
# troubleshooting if we're to hit something unexpected
self.logger.error('Unexpected value returned from MCU: %d (expected 0 or 1)', val)
return HARDWARE, NODE
# (platform team has documentation if needed)
# Bit 1 of 10th byte is 1 when "primary" controller from MCU 0xb2
NODE = 'A' if val == 1 else 'B'
return HARDWARE, NODE
elif not product.startswith(PLATFORM_PREFIXES):
# users run TrueNAS on all kinds of exotic hardware. Most of the time, the
# exotic hardware doesn't respond to standards conforming requests. Furthermore,
# the enclosure feature is specific to our HA appliances so no reason to continue
# down this path.
return HARDWARE, NODE
for enc in get_ses_enclosures(False):
if enc.is_mseries:
HARDWARE = 'ECHOWARP'
if enc.product == '4024Sp':
return HARDWARE, 'A'
elif enc.product == '4024Ss':
return HARDWARE, 'B'
elif enc.is_xseries:
HARDWARE = 'PUMA'
esce = enc.elements.get('Enclosure Services Controller Electronics', {})
for i in esce.values():
if i['descriptor'].find('ESCE A_') != -1:
node = 'A'
elif i['descriptor'].find('ESCE B_') != -1:
node = 'B'
else:
break
# We then cast the SES address (deduced from SES VPD pages)
# to an integer and subtract 1. Then cast it back to hexadecimal.
# We then compare if the SAS expander's SAS address is the same
# as the SAS expanders SES address.
ses_addr = hex(int(i['descriptor'][7:].strip(), 16) - 1)
sas_addr = pathlib.Path(
f'/sys/class/enclosure/{enc.pci}/device/sas_address'
).read_text().strip()
if ses_addr == sas_addr:
return HARDWARE, node
return HARDWARE, NODE
| 5,254 | Python | .py | 103 | 36.417476 | 106 | 0.548199 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,699 | event.py | truenas_middleware/src/middlewared/middlewared/plugins/failover_/event.py | # Copyright (c) - iXsystems Inc.
#
# Licensed under the terms of the TrueNAS Enterprise License Agreement
# See the file LICENSE.IX for complete terms and conditions
import asyncio
import os
import time
import contextlib
import threading
import logging
import errno
from collections import defaultdict
from middlewared.utils import filter_list
from middlewared.service import Service, job, accepts
from middlewared.service_exception import CallError
from middlewared.schema import Dict, Bool, Int
from middlewared.plugins.docker.state_utils import Status as DockerStatus
# from middlewared.plugins.failover_.zpool_cachefile import ZPOOL_CACHE_FILE
from middlewared.plugins.failover_.event_exceptions import AllZpoolsFailedToImport, IgnoreFailoverEvent, FencedError
from middlewared.plugins.failover_.scheduled_reboot_alert import WATCHDOG_ALERT_FILE
from middlewared.plugins.virt.utils import Status as VirtStatus
logger = logging.getLogger('failover')
FAILOVER_LOCK_NAME = 'vrrp_event'
# When we get to the point of transitioning to MASTER or BACKUP
# we wrap the associated methods (`vrrp_master` and `vrrp_backup`)
# in a job (lock) so that we can protect the failover event.
#
# This does a few things:
#
# 1. protects us if we have an interface that has a
# rapid succession of state changes
#
# 2. if we have a near simultaneous amount of
# events get triggered for all interfaces
# --this can happen on external network failure
# --this happens when one node reboots
# --this happens when keepalived service is restarted
#
# If any of the above scenarios occur, we want to ensure
# that only one thread is trying to run fenced or import the
# zpools.
class FailoverEventsService(Service):
class Config:
private = True
namespace = 'failover.events'
# represents if a failover event was successful or not
FAILOVER_RESULT = None
# list of critical services that get restarted first
# before the other services during a failover event
CRITICAL_SERVICES = ['iscsitarget', 'cifs', 'nfs']
# list of services that use service.become_active instead of
# service.restart during a failover on the MASTER node.
BECOME_ACTIVE_SERVICES = ['iscsitarget']
# option to be given when changing the state of a service
# during a failover event, we do not want to replicate
# the state of a service to the other controller since
# that's being handled by us explicitly
HA_PROPAGATE = {'ha_propagate': False}
# this is the time limit we place on exporting the
# zpool(s) when becoming the BACKUP node
ZPOOL_EXPORT_TIMEOUT = 4 # seconds
async def restart_service(self, service, timeout):
logger.info('Restarting %s', service)
return await asyncio.wait_for(
self.middleware.create_task(self.middleware.call('service.restart', service, self.HA_PROPAGATE)),
timeout=timeout,
)
async def become_active_service(self, service, timeout):
logger.info('Become active %s', service)
return await asyncio.wait_for(
self.middleware.create_task(self.middleware.call('service.become_active', service)),
timeout=timeout,
)
@accepts(Dict(
'restart_services',
Bool('critical', default=False),
Int('timeout', default=15),
))
async def restart_services(self, data):
"""
Concurrently restart services during a failover
master event.
`critical` Boolean when True will only restart the
critical services.
`timeout` Integer representing the maximum amount
of time to wait for a given service to (re)start.
"""
to_restart = await self.middleware.call('datastore.query', 'services_services')
to_restart = [i['srv_service'] for i in to_restart if i['srv_enable']]
if data['critical']:
to_restart = [i for i in to_restart if i in self.CRITICAL_SERVICES]
else:
to_restart = [i for i in to_restart if i not in self.CRITICAL_SERVICES]
exceptions = await asyncio.gather(
*[
self.become_active_service(svc, data['timeout'])
if svc in self.BECOME_ACTIVE_SERVICES
else self.restart_service(svc, data['timeout'])
for svc in to_restart
],
return_exceptions=True
)
for svc, exc in zip(to_restart, exceptions):
if isinstance(exc, asyncio.TimeoutError):
logger.error(
'Failed to restart service "%s" after %d seconds',
svc, data['timeout']
)
async def refresh_failover_status(self, jobid, event):
# this is called in a background task so we need to make sure that
# we wait on the current failover job to complete before we try
# and update the failover status
try:
wait_id = await self.middleware.call('core.job_wait', jobid)
await wait_id.wait(raise_error=True)
except (CallError, KeyError):
# `CallError` means the failover job didn't complete successfully
# but we still want to refresh status in this scenario
# `KeyError` shouldn't be possible but there exists a hypothetical
# race condition...but we still want to refresh status
pass
except Exception:
self.logger.error('Unhandled failover status exception', exc_info=True)
return
# update HA status on this controller
await self.middleware.call('failover.status_refresh')
if event == 'BACKUP':
try:
# we need to refresh status on the active node since webui subscribes
# to failover.disabled.reasons which is responsible for showing the
# various components on the dashboard as well as the HA status icon
await self.middleware.call('failover.call_remote', 'failover.status_refresh')
except Exception:
self.logger.warning('Failed to refresh failover status on active node')
def run_call(self, method, *args):
try:
return self.middleware.call_sync(method, *args)
except IgnoreFailoverEvent:
# `self.validate()` calls this method
raise
except Exception:
raise
def event(self, ifname, event):
refresh, job = True, None
try:
job = self._event(ifname, event)
return job
except IgnoreFailoverEvent:
refresh = False
except Exception:
self.logger.error('Unhandled exception processing failover event', exc_info=True)
finally:
# refreshing the failover status can cause delays in failover
# there is no reason to refresh it if the event has been ignored
if refresh and job is not None:
self.middleware.create_task(self.refresh_failover_status(job.id, event))
def _export_zpools(self, volumes):
# export the zpool(s)
try:
for vol in volumes:
if vol['status'] != 'OFFLINE':
self.middleware.call_sync('zfs.pool.export', vol['name'], {'force': True})
logger.info('Exported "%s"', vol['name'])
except Exception as e:
# catch any exception that could be raised
# We sleep for 5 seconds here because this is
# in its own thread. The calling thread waits
# for self.ZPOOL_EXPORT_TIMEOUT and if this
# thread is_alive(), then we violently reboot
# the node
logger.error('Error exporting "%s" with error %s', vol['name'], e)
time.sleep(self.ZPOOL_EXPORT_TIMEOUT + 1)
def generate_failover_data(self):
# only care about name, guid, and status
volumes = self.run_call(
'pool.query', [], {
'select': ['name', 'guid', 'status']
}
)
failovercfg = self.run_call('failover.config')
interfaces = self.run_call('interface.query')
internal_ints = self.run_call('failover.internal_interfaces')
data = {
'disabled': failovercfg['disabled'],
'master': failovercfg['master'],
'timeout': failovercfg['timeout'],
'groups': defaultdict(list),
'volumes': volumes,
'non_crit_interfaces': [
i['id'] for i in filter_list(interfaces, [
('failover_critical', '!=', True),
])
],
'internal_interfaces': internal_ints,
}
for i in filter_list(interfaces, [('failover_critical', '=', True)]):
data['groups'][i['failover_group']].append(i['id'])
return data
def validate(self, ifname, event):
"""
When a failover event is generated we need to account for a few
scenarios.
1. if we are currently processing a failover event and then
receive another event and the new event is a _different_
event than the current one, we will wait for the current
job to finish. Once that job is finished, we'll begin to
process the next job that came in behind it. This is
particularly important when an HA system is booted up for
the first time (both controllers) OR if one controller is
powered off and only one is powered on. In either of these
scenarios, keepalived will send a BACKUP event and then 2
seconds middlewared updates the config and reloads keepalived
which sends another BACKUP event and then finally another 2
seconds later, a MASTER event will be sent. In testing,
the BACKUP event had not finished when we received the MASTER
event so we ignored it therefore leaving the controller(s) in
a busted state (both are BACKUP or the single controller would
never promote itself)
2. if we are currently processing a failover event and then
receive another event and the new event is the _same_
event as the current one, we log an informational message
and raise an `IgnoreFailoverEvent` exception.
"""
current_events = self.run_call(
'core.get_jobs', [
('OR', [
('method', '=', 'failover.events.vrrp_master'),
('method', '=', 'failover.events.vrrp_backup')
]),
]
)
for i in current_events:
cur_iface = i['arguments'][1]
if i['state'] == 'RUNNING' and i['arguments'][2] == event:
msg = f'Received {event!r} event for {ifname!r} but '
msg += f'a duplicate event is currently running for {cur_iface!r}. Ignoring.'
logger.info(msg)
raise IgnoreFailoverEvent()
def _event(self, ifname, event):
# generate data to be used during the failover event
fobj = self.generate_failover_data()
if event != 'forcetakeover':
if fobj['disabled'] and not fobj['master']:
# if forcetakeover is false, and failover is disabled
# and we're not set as the master controller, then
# there is nothing we need to do.
logger.warning('Failover is disabled but this node is marked as the BACKUP node. Assuming BACKUP.')
raise IgnoreFailoverEvent()
elif fobj['disabled']:
raise IgnoreFailoverEvent()
# If there is a state change on a non-critical interface then
# ignore the event and return
ignore = [i for i in fobj['non_crit_interfaces'] if i in ifname]
if ignore:
logger.warning('Ignoring state change on non-critical interface "%s".', ifname)
raise IgnoreFailoverEvent()
needs_imported = False
for pool in self.run_call('pool.query', [('name', 'in', [i['name'] for i in fobj['volumes']])]):
if pool['status'] == 'OFFLINE':
needs_imported = True
break
# means all zpools are already imported
if fobj['volumes'] and event == 'MASTER' and not needs_imported:
logger.warning(
'Received a MASTER event on %r but zpools are already imported, ignoring.',
ifname
)
raise IgnoreFailoverEvent()
# if we get here then the last verification step that
# we need to do is ensure there aren't any current ongoing failover events
self.run_call('failover.events.validate', ifname, event)
# start the MASTER failover event
if event in ('MASTER', 'forcetakeover'):
return self.run_call('failover.events.vrrp_master', fobj, ifname, event)
# start the BACKUP failover event
elif event == 'BACKUP':
return self.run_call('failover.events.vrrp_backup', fobj, ifname, event)
def fenced_start_loop(self, max_retries=4):
# When active node is rebooted administratively from shell, the
# fenced process will continue running on the node until systemd
# finishes terminating services and actually reboots. Hence, we may
# need to retry a few times before fenced goes away on the remote
# node. NOTE: fenced waits for ~11 or so seconds to see if the
# reservation keys change.
total_time_waited = 0
for i in range(1, max_retries + 1):
start = time.time()
fenced_error = self.run_call('failover.fenced.start')
if fenced_error != 2:
break
else:
total_time_waited += int(time.time() - start)
retrying = ', retrying.' if i < max_retries else ''
logger.warning(
'Fenced is running on remote node after waiting %d seconds%s',
total_time_waited,
retrying
)
return fenced_error
def iscsi_cleanup_alua_state(self):
"""
Cleanup iSCSI ALUA state if we are now becoming ACTIVE node, and
previously were STANDBY node.
"""
# We will suspend iSCSI and then close any existing iSCSI sessions
# to avoid inflight I/O interfering with the LUN replacement during
# become_active. Suspending iSCSI means BUSY will be returned.
suspended = cleaned = False
try:
try:
logger.info('Suspending iSCSI')
self.run_call('iscsi.scst.suspend', 30)
suspended = True
logger.info('Suspended iSCSI')
except FileNotFoundError:
# This can occur if we are booting into ACTIVE node
# rather than becoming ACTIVE from STANDBY.
logger.info('Did not suspend iSCSI')
else:
logger.info('Closing iSCSI sessions')
self.run_call('iscsi.alua.force_close_sessions')
logger.info('Closed iSCSI sessions')
logger.info('calling iscsi ALUA active elected')
self.run_call('iscsi.alua.active_elected')
logger.info('done calling iscsi ALUA active elected')
cleaned = True
except Exception:
logger.exception('Unexpected failure setting up iscsi')
return (suspended, cleaned)
@job(lock=FAILOVER_LOCK_NAME)
def vrrp_master(self, job, fobj, ifname, event):
# vrrp does the "election" for us. If we've gotten this far
# then the specified timeout for NOT receiving an advertisement
# has elapsed. Setting the progress to ELECTING is to prevent
# extensive API breakage with the platform indepedent failover plugin
# as well as the front-end (webUI) even though the term is misleading
# in this use case
job.set_progress(None, description='ELECTING')
# Attach NVMe/RoCE - wait up to 10 seconds
logger.info('Start bring up of NVMe/RoCE')
try:
# Request fenced_reload just in case the job does not complete in time
jbof_job = self.run_call('jbof.configure_job', True)
jbof_job.wait_sync(timeout=10)
if jbof_job.error:
logger.error(f'Error attaching JBOFs: {jbof_job.error}')
elif jbof_job.result['failed']:
logger.error(f'Failed to attach JBOFs:{jbof_job.result["message"]}')
else:
logger.info(jbof_job.result['message'])
except TimeoutError:
logger.error('Timed out attaching JBOFs - will continue in background')
except Exception:
logger.error('Unexpected error', exc_info=True)
else:
logger.info('Done bring up of NVMe/RoCE')
fenced_error = None
if event == 'forcetakeover':
# reserve the disks forcefully ignoring if the other node has the disks
logger.warning('Forcefully taking over as the MASTER node.')
# need to stop fenced just in case it's running already
logger.warning('Forcefully stopping fenced')
self.run_call('failover.fenced.stop')
logger.warning('Done forcefully stopping fenced')
logger.warning('Forcefully starting fenced')
fenced_error = self.run_call('failover.fenced.start', True)
logger.warning('Done forcefully starting fenced')
else:
# if we're here then we need to check a couple things before we start fenced
# and start the process of becoming master
#
# 1. if the interface that we've received a MASTER event for is
# in a failover group with other interfaces and ANY of the
# other members in the failover group are still BACKUP,
# then we need to ignore the event.
#
# TODO: Not sure how keepalived and laggs operate so need to test this
# (maybe the event only gets triggered if the lagg goes down)
#
logger.info('Checking VIP failover groups')
_, backups, offline = self.run_call(
'failover.vip.check_failover_group', ifname, fobj['groups']
)
logger.info('Done checking VIP failover groups')
if offline:
# this isn't common but we're very verbose in this file so let's
# log the offline interfaces while we're here
logger.warning('Offline interfaces detected: %r', ', '.join(offline))
# this means that we received a master event and the interface was
# in a failover group. And in that failover group, there were other
# interfaces that were still in the BACKUP state which means the
# other node has them as MASTER so ignore the event.
if backups:
logger.warning(
'Received MASTER event for %r, but other '
'interfaces (%s) are still working on the '
'MASTER node. Ignoring event.', ifname, ', '.join(backups),
)
job.set_progress(None, description='IGNORED')
raise IgnoreFailoverEvent()
logger.warning('Entering MASTER on "%s".', ifname)
# need to stop fenced just in case it's running already
logger.warning('Stopping fenced')
self.run_call('failover.fenced.stop')
logger.warning('Done stopping fenced')
logger.warning('Restarting fenced')
fenced_error = self.fenced_start_loop()
logger.warning('Done restarting fenced')
# starting fenced daemon failed....which is bad
# emit an error and exit
if fenced_error != 0:
if fenced_error == 1:
logger.error('Failed to register keys on disks, exiting!')
elif fenced_error == 2:
logger.error('Fenced is running on the remote node, exiting!')
elif fenced_error == 3:
logger.error('10% or more of the disks failed to be reserved, exiting!')
elif fenced_error == 5:
logger.error('Fenced encountered an unexpected fatal error, exiting!')
else:
logger.error(f'Fenced exited with code "{fenced_error}" which should never happen, exiting!')
job.set_progress(None, description='ERROR')
raise FencedError()
# fenced is now running, so we *are* the ACTIVE/MASTER node
# if 2x interfaces are in the same failover group and 1 of them goes
# down, the VIP will float to the other controller. However, a failover
# won't happen because the other interface is still UP on the master.
# If the down'ed interface comes back online, the VIP needs to float
# back to the original master controller. Reloading keepalived service
# re-generates the configuration file which ensures the config has the
# right priority set.
logger.info('Pausing failover event processing')
self.run_call('vrrpthread.pause_events')
logger.info('Taking ownership of all VIPs')
self.run_call('service.reload', 'keepalived', self.HA_PROPAGATE)
logger.info('Unpausing failover event processing')
self.run_call('vrrpthread.unpause_events')
logger.info('Done unpausing failover event processing')
# Kick off a job to clean up any left-over ALUA state from when we were STANDBY/BACKUP.
logger.info('Verifying iSCSI service')
iscsi_suspended = iscsi_cleaned = False
if self.run_call('service.started_or_enabled', 'iscsitarget'):
logger.info('Checking if ALUA is enabled')
handle_alua = self.run_call('iscsi.global.alua_enabled')
logger.info('Done checking if ALUA is enabled')
if handle_alua:
iscsi_suspended, iscsi_cleaned = self.iscsi_cleanup_alua_state()
else:
handle_alua = False
logger.info('Done verifying iSCSI service')
if not fobj['volumes']:
# means we received a master event but there are no zpools to import
# (happens when the box is initially licensed for HA and being setup)
# there is nothing else to do so just log a warning and return early
logger.warning('No zpools to import, exiting failover event')
self.FAILOVER_RESULT = 'INFO'
return self.FAILOVER_RESULT
# unlock SED disks
logger.info('Unlocking all SED disks (if any)')
maybe_unlocked = False
try:
maybe_unlocked = self.run_call('disk.sed_unlock_all', True)
except Exception as e:
# failing here doesn't mean the zpool won't import
# we could have failed on only 1 disk so log an
# error and move on
logger.error('Failed to unlock SED disk(s) with error: %r', e)
if maybe_unlocked:
logger.info('Done unlocking all SED disks (if any)')
try:
logger.info('Retasting disks on standby node')
self.run_call('failover.call_remote', 'disk.retaste', [], {'raise_connect_error': False})
logger.info('Done retasting disks on standby node')
except Exception:
logger.exception('Unexpected failure retasting disks on standby node')
# setup the zpool cachefile TODO: see comment below about cachefile usage
# self.run_call('failover.zpool.cachefile.setup', 'MASTER')
# set the progress to IMPORTING
job.set_progress(None, description='IMPORTING')
failed = []
options = {'altroot': '/mnt'}
import_options = {'missing_log': True}
any_host = True
# TODO: maintaing zpool cachefile is very fragile and can
# ruin the ability to successfully import a zpool on failover
# event.... Until we can truly dig into this problem, we'll
# ignore the cache file for now
# cachefile = ZPOOL_CACHE_FILE
new_name = cachefile = None
for vol in fobj['volumes']:
logger.info('Importing %r', vol['name'])
# import the zpool(s)
try_again = False
try:
self.run_call(
'zfs.pool.import_pool', vol['guid'], options, any_host, cachefile, new_name, import_options
)
except Exception as e:
if e.errno == errno.ENOENT:
try_again = True
# logger.warning('Failed importing %r using cachefile so trying without it.', vol['name'])
logger.warning('Failed importing %r with ENOENT.', vol['name'])
else:
vol['error'] = str(e)
failed.append(vol)
continue
else:
logger.info('Successfully imported %r', vol['name'])
if try_again:
# means the cachefile is "stale" or invalid which will prevent
# an import so let's try to import without it
logger.warning('Retrying import of %r', vol['name'])
try:
self.run_call(
'zfs.pool.import_pool', vol['guid'], options, any_host, None, new_name, import_options
)
except Exception as e:
vol['error'] = str(e)
failed.append(vol)
continue
else:
logger.info('Successful retry import of %r', vol['name'])
# TODO: come back and fix this once we figure out how to properly manage zpool cachefile
# (i.e. we need a cachefile per zpool, and not a global one)
"""
try:
# make sure the zpool cachefile property is set appropriately
self.run_call(
'zfs.pool.update', vol['name'], {'properties': {'cachefile': {'value': ZPOOL_CACHE_FILE}}}
)
except Exception:
logger.warning('Failed to set cachefile property for %r', vol['name'], exc_info=True)
"""
# If root dataset was encrypted, it would not be mounted at this point regardless of it being
# key/passphrase encrypted - so we make sure that nothing at this point in time is mounted beneath it
# if that pool has datasets which are unencrypted
logger.info('Handling unencrypted datasets on import (if any) for %r', vol['name'])
self.run_call('pool.handle_unencrypted_datasets_on_import', vol['name'])
logger.info('Successfully handled unencrypted datasets on import (if any) for %r', vol['name'])
# try to unlock the zfs datasets (if any)
logger.info('Unlocking zfs datasets (if any) for %r', vol['name'])
unlock_job = self.run_call('failover.unlock_zfs_datasets', vol['name'])
unlock_job.wait_sync()
if unlock_job.error:
logger.error(f'Error unlocking ZFS encrypted datasets: {unlock_job.error}')
elif unlock_job.result['failed']:
logger.error('Failed to unlock %s ZFS encrypted dataset(s)', ','.join(unlock_job.result['failed']))
else:
logger.info('Successfully completed unlock for %r', vol['name'])
# if we fail to import all zpools then alert the user because nothing
# is going to work at this point
if len(failed) == len(fobj['volumes']):
for i in failed:
logger.error(
'Failed to import volume with name %r with guid %r with error:\n %r',
i['name'], i['guid'], i['error'],
)
logger.error('All volumes failed to import!')
job.set_progress(None, description='ERROR')
raise AllZpoolsFailedToImport()
elif len(failed):
# if we fail to import any of the zpools then alert the user but continue the process
for i in failed:
logger.error(
'Failed to import volume with name %r with guid %r with error:\n %r',
i['name'], i['guid'], i['error'],
)
logger.error(
'However, other zpools imported so the failover process continued.'
)
else:
logger.info('Volume imports complete')
# Now that the volumes have been imported, get a head-start on activating extents.
if handle_alua and iscsi_cleaned:
logger.info('Activating ALUA extents')
self.run_call('iscsi.alua.activate_extents')
logger.info('Done activating ALUA extents')
# need to make sure failover status is updated in the middleware cache
logger.info('Refreshing failover status')
self.run_call('failover.status_refresh')
logger.info('Done refreshing failover status')
# this enables all necessary services that have been enabled by the user
logger.info('Enabling necessary services')
self.run_call('etc.generate', 'rc')
logger.info('Done enabling necessary services')
logger.info('Configuring system dataset')
self.run_call('systemdataset.setup')
logger.info('Done configuring system dataset')
# now we restart the services, prioritizing the "critical" services
logger.info('Restarting critical services.')
self.run_call('failover.events.restart_services', {'critical': True})
logger.info('Done restarting critical services')
# setup directory services. This is backgrounded job
logger.info('Starting background job for directoryservices.setup')
self.run_call('directoryservices.setup')
logger.info('Done starting background job for directoryservices.setup')
logger.info('Allowing network traffic.')
fw_accept_job = self.run_call('failover.firewall.accept_all')
fw_accept_job.wait_sync()
if fw_accept_job.error:
logger.error(f'Error allowing network traffic: {fw_accept_job.error}')
else:
logger.info('Done allowing network traffic.')
logger.info('Critical portion of failover is now complete')
# regenerate cron
logger.info('Regenerating cron')
self.run_call('etc.generate', 'cron')
logger.info('Done regenerating cron')
# sync disks is disabled on passive node
logger.info('Syncing disks')
self.run_call('disk.sync_all', {'zfs_guid': True})
logger.info('Done syncing disks')
if handle_alua:
try:
if iscsi_suspended:
logger.info('Clearing iSCSI suspend')
if self.run_call('iscsi.scst.clear_suspend'):
logger.info('Cleared iSCSI suspend')
# Kick off a job to start clearing up HA targets from when we were STANDBY
self.run_call('iscsi.alua.reset_active')
except Exception:
logger.exception('Failed to complete iSCSI bringup')
# restart the remaining "non-critical" services
logger.info('Restarting remaining services')
self.run_call('failover.events.restart_services', {'critical': False, 'timeout': 60})
logger.info('Done restarting remaining services')
logger.info('Restarting reporting metrics')
self.run_call('service.restart', 'netdata')
logger.info('Done restarting reporting metrics')
logger.info('Updating replication tasks')
self.run_call('zettarepl.update_tasks')
logger.info('Done updating replication tasks')
logger.info('Temporarily blocking failover alerts')
self.run_call('alert.block_failover_alerts')
logger.info('Done temporarily blocking failover alerts')
logger.info('Initializing alert system')
self.run_call('alert.initialize', False)
logger.info('Done initializing alert system')
logger.info('Starting truecommand service (if necessary)')
self.run_call('truecommand.start_truecommand_service')
logger.info('Done starting truecommand service (if necessary)')
kmip_config = self.run_call('kmip.config')
if kmip_config and kmip_config['enabled']:
logger.info('Syncing encryption keys with KMIP server')
# Even though we keep keys in sync, it's best that we do this as well
# to ensure that the system is up to date with the latest keys available
# from KMIP. If it's unaccessible, the already synced memory keys are used
# meanwhile.
self.run_call('kmip.initialize_keys')
logger.info('Done syncing encryption keys with KMIP server')
self.start_apps()
self.start_virt()
logger.info('Migrating interface information (if required)')
self.run_call('interface.persist_link_addresses')
logger.info('Done migrating interface information (if required)')
try:
logger.info('Updating HA reboot info')
self.run_call('failover.reboot.info')
except Exception:
logger.warning('Failed to update reboot info', exc_info=True)
else:
logger.info('Done updating HA reboot info')
logger.info('Failover event complete.')
# clear the description and set the result
job.set_progress(None, description='SUCCESS')
self.FAILOVER_RESULT = 'SUCCESS'
return self.FAILOVER_RESULT
@job(lock=FAILOVER_LOCK_NAME)
def vrrp_backup(self, job, fobj, ifname, event):
# we need to check a couple things before we stop fenced
# and start the process of becoming backup
#
# 1. if the interface that we've received a BACKUP event for is
# in a failover group with other interfaces and ANY of the
# other members in the failover group are still MASTER,
# then we need to ignore the event.
#
# TODO: Not sure how keepalived and laggs operate so need to test this
# (maybe the event only gets triggered if the lagg goes down)
#
masters, _, offline = self.run_call(
'failover.vip.check_failover_group', ifname, fobj['groups']
)
if offline:
# this isn't common but we're very verbose in this file so let's
# log the offline interfaces while we're here
logger.warning('Offline interfaces detected: %r', ', '.join(offline))
# this means that we received a BACKUP event and the interface was
# in a failover group. And in that failover group, there were other
# interfaces that were still in the MASTER state so ignore the event.
if masters:
logger.warning(
'Received BACKUP event for %r, but other '
'interfaces (%s) are still working. '
'Ignoring event.', ifname, ', '.join(masters),
)
job.set_progress(None, description='IGNORED')
raise IgnoreFailoverEvent()
logger.warning('Entering BACKUP on "%s".', ifname)
# We will try to give some time to docker to gracefully stop before zpools will be forcefully
# exported. This is to avoid any potential data corruption.
stop_docker_thread = threading.Thread(
target=self.stop_apps,
name='failover_stop_docker',
)
stop_docker_thread.start()
# We will try to give some time to containers to gracefully stop before zpools will be forcefully
# exported. This is to avoid any potential data corruption.
stop_virt_thread = threading.Thread(
target=self.stop_virt,
name='failover_stop_virt',
)
stop_virt_thread.start()
# We stop netdata before exporting pools because otherwise we might have erroneous stuff
# getting logged and causing spam
logger.info('Stopping reporting metrics')
self.run_call('service.stop', 'netdata', self.HA_PROPAGATE)
logger.info('Blocking network traffic.')
fw_drop_job = self.run_call('failover.firewall.drop_all')
fw_drop_job.wait_sync()
if fw_drop_job.error:
logger.error(f'Error blocking network traffic: {fw_drop_job.error}')
# restarting keepalived sends a priority 0 advertisement
# which means any VIP that is on this controller will be
# migrated to the other controller
logger.info('Pausing failover event processing')
self.run_call('vrrpthread.pause_events')
logger.info('Transitioning all VIPs off this node')
self.run_call('service.stop', 'keepalived', self.HA_PROPAGATE)
# ticket 23361 enabled a feature to send email alerts when an unclean reboot occurrs.
# TrueNAS HA, by design, has a triggered unclean shutdown.
# If a controller is demoted to standby, we set a 4 sec countdown using watchdog.
# If the zpool(s) can't export within that timeframe, we use watchdog to violently reboot the controller.
# When this occurrs, the customer gets an email about an "Unauthorized system reboot".
# The idea for creating a new sentinel file for watchdog related panics,
# is so that we can send an appropriate email alert.
# So if we panic here, middleware will check for this file and send an appropriate email.
# ticket 39114
with contextlib.suppress(Exception):
with open(WATCHDOG_ALERT_FILE, 'w') as f:
f.write(f'{time.time()}')
f.flush() # be sure it goes straight to disk
os.fsync(f.fileno()) # be EXTRA sure it goes straight to disk
# setup the zpool cachefile
# self.run_call('failover.zpool.cachefile.setup', 'BACKUP')
# export zpools in a thread and set a timeout to
# to `self.ZPOOL_EXPORT_TIMEOUT`.
# if we can't export the zpool(s) in this timeframe,
# we send the 'b' character to the /proc/sysrq-trigger
# to trigger an immediate reboot of the system
# https://www.kernel.org/doc/html/latest/admin-guide/sysrq.html
export_thread = threading.Thread(
target=self._export_zpools,
name='failover_export_zpools',
args=(fobj['volumes'], )
)
export_thread.start()
export_thread.join(timeout=self.ZPOOL_EXPORT_TIMEOUT)
if export_thread.is_alive():
# have to enable the "magic" sysrq triggers
with open('/proc/sys/kernel/sysrq', 'w') as f:
f.write('1')
# now violently reboot
with open('/proc/sysrq-trigger', 'w') as f:
f.write('b')
# Pools are now exported and so we can make disks available to other controller
logger.warning('Stopping fenced')
self.run_call('failover.fenced.stop')
# Now that fenced is stopped, attach NVMe/RoCE.
logger.info('Start bring up of NVMe/RoCE')
try:
# Do not need to wait, nor request fenced_reload
self.run_call('jbof.configure_job')
except Exception:
logger.error('Unexpected error', exc_info=True)
# We also remove this file here, because on boot we become BACKUP if the other
# controller is MASTER. So this means we have no volumes to export which means
# the `self.ZPOOL_EXPORT_TIMEOUT` is honored.
with contextlib.suppress(Exception):
os.unlink(WATCHDOG_ALERT_FILE)
logger.info('Refreshing failover status')
self.run_call('failover.status_refresh')
logger.info('Setting up system dataset')
self.run_call('systemdataset.setup')
logger.info('Regenerating cron')
self.run_call('etc.generate', 'cron')
self.run_call('truecommand.stop_truecommand_service')
logger.info('Stopping NFS mountd service')
self.run_call('service.stop', 'mountd')
# we keep SSH running on both controllers (if it's enabled by user)
filters = [['srv_service', '=', 'ssh']]
options = {'get': True}
if self.run_call('datastore.query', 'services.services', filters, options)['srv_enable']:
logger.info('Restarting SSH')
self.run_call('service.restart', 'ssh', self.HA_PROPAGATE)
if self.run_call('iscsi.global.alua_enabled'):
if self.run_call('service.started_or_enabled', 'iscsitarget'):
logger.info('Starting iSCSI for ALUA')
# Rewrite the scst.conf config to a clean slate state
self.run_call('iscsi.alua.standby_write_empty_config', True)
self.run_call('etc.generate', 'scst')
# The most likely situation is that scst is not running
if self.run_call('iscsi.scst.is_kernel_module_loaded'):
self.run_call('service.restart', 'iscsitarget', self.HA_PROPAGATE)
else:
self.run_call('service.start', 'iscsitarget', self.HA_PROPAGATE)
logger.info('Syncing encryption keys from MASTER node (if any)')
try:
self.run_call('failover.call_remote', 'failover.sync_keys_to_remote_node', [],
{'raise_connect_error': False})
except Exception:
logger.warning('Unhandled exception syncing keys from MASTER node', exc_info=True)
try:
self.run_call('failover.call_remote', 'interface.persist_link_addresses', [],
{'raise_connect_error': False})
except Exception:
logger.warning('Unhandled exception persisting network interface link addresses on MASTER node',
exc_info=True)
logger.info('Starting VRRP daemon')
self.run_call('service.start', 'keepalived', self.HA_PROPAGATE)
logger.info('Unpausing failover event processing')
self.run_call('vrrpthread.unpause_events')
logger.info('Retasting disks (if required)')
self.run_call('disk.retaste')
logger.info('Done retasting disks (if required)')
logger.info('Successfully became the BACKUP node.')
self.FAILOVER_RESULT = 'SUCCESS'
return self.FAILOVER_RESULT
def start_apps(self):
self.start_apps_impl()
self.middleware.create_task(self.middleware.call('k8s_to_docker.trigger_migration'))
def start_apps_impl(self):
pool = self.run_call('docker.config')['pool']
if not pool:
self.middleware.call_sync('docker.state.set_status', DockerStatus.UNCONFIGURED.value)
logger.info('Skipping starting apps as they are not configured')
return
logger.info('Going to initialize apps plugin as %r pool is configured for apps', pool)
try:
self.run_call('docker.state.start_service', True)
except Exception:
logger.error('Failed to start docker service', exc_info=True)
else:
logger.info('Docker service started successfully')
def stop_apps(self):
if not self.middleware.call_sync('docker.config')['dataset']:
return
logger.info('Trying to gracefully stop docker service')
try:
self.run_call('service.stop', 'docker')
except Exception:
logger.error('Failed to stop docker service gracefully', exc_info=True)
else:
logger.info('Docker service stopped gracefully')
def start_virt(self):
logger.info('Going to initialize virt plugin')
job = self.run_call('virt.global.setup')
job.wait_sync(timeout=10)
if job.error:
logger.info('Failed to setup virtualization: %r', job.error)
else:
config = self.run_call('virt.global.config')
if config['state'] == VirtStatus.INITIALIZED.value:
logger.info('Virtualization initalized.')
elif config['state'] != VirtStatus.NO_POOL.value:
logger.warning('Virtualization failed to initialize with state %r.', config['state'])
def stop_virt(self):
logger.info('Going to stop virt plugin')
job = self.run_call('virt.global.reset')
# virt instances have a timeout of 10 seconds to stop
job.wait_sync(timeout=15)
if job.error:
logger.warning('Failed to reset virtualization state.')
else:
logger.info('Virtualization has been successfully resetted.')
async def vrrp_fifo_hook(middleware, data):
ifname = data['ifname']
event = data['event']
middleware.send_event(
'failover.vrrp_event',
'CHANGED',
fields={
'ifname': ifname,
'event': event,
}
)
await middleware.call('failover.events.event', ifname, event)
def setup(middleware):
middleware.event_register('failover.vrrp_event', 'Sent when a VRRP state changes.')
middleware.register_hook('vrrp.fifo', vrrp_fifo_hook)
| 45,927 | Python | .py | 890 | 39.996629 | 116 | 0.616613 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |