id int64 0 458k | file_name stringlengths 4 119 | file_path stringlengths 14 227 | content stringlengths 24 9.96M | size int64 24 9.96M | language stringclasses 1 value | extension stringclasses 14 values | total_lines int64 1 219k | avg_line_length float64 2.52 4.63M | max_line_length int64 5 9.91M | alphanum_fraction float64 0 1 | repo_name stringlengths 7 101 | repo_stars int64 100 139k | repo_forks int64 0 26.4k | repo_open_issues int64 0 2.27k | repo_license stringclasses 12 values | repo_extraction_date stringclasses 433 values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
24,700 | zpool_cachefile.py | truenas_middleware/src/middlewared/middlewared/plugins/failover_/zpool_cachefile.py | # Copyright (c) - iXsystems Inc.
#
# Licensed under the terms of the TrueNAS Enterprise License Agreement
# See the file LICENSE.IX for complete terms and conditions
from pathlib import Path
from middlewared.service import Service
from middlewared.schema import Str, accepts
from middlewared.plugins.pool_.utils import ZPOOL_CACHE_FILE
ZPOOL_CACHE_FILE_SAVED = f'{ZPOOL_CACHE_FILE}.saved'
ZPOOL_CACHE_FILE_OVERWRITE = f'{ZPOOL_CACHE_FILE}.overwrite'
class FailoverZpoolCacheFileService(Service):
class Config:
private = True
namespace = 'failover.zpool.cachefile'
@accepts(Str('event', enum=['MASTER', 'BACKUP', 'SYNC'], default='MASTER'))
def setup(self, event):
saved = Path(ZPOOL_CACHE_FILE_SAVED)
default = Path(ZPOOL_CACHE_FILE)
overwrite = Path(ZPOOL_CACHE_FILE_OVERWRITE)
se = saved.exists()
de = default.exists()
oe = overwrite.exists()
try:
if event == 'MASTER' and se:
# we're becoming master which means on backup
# event we modify the save cache file first and
# if the pool is successfully exported then the
# default cachefile is updated and the zpool entry
# is removed from that file. This is done by zfs
# itself and not us. That behavior is counter
# intuitive to what we're trying to do so that's
# why we save the cachefile before we export
saved.rename(default)
elif event == 'BACKUP' and de:
# means we're becoming backup so we need to save
# the zpool cachefile before we export the zpools
saved.write_bytes(default.read_bytes())
elif event == 'SYNC' and oe:
# a zpool was created/updated on the active controller
# and the newly created zpool cachefile was sent to this
# controller so we need to overwrite
overwrite.rename(default)
default.touch(exist_ok=True)
if not event == 'BACKUP':
saved.unlink(missing_ok=True)
overwrite.unlink(missing_ok=True)
except Exception:
self.logger.warning('Failed setting up zpool cachefile', exc_info=True)
| 2,322 | Python | .py | 48 | 37.875 | 83 | 0.632833 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,701 | configure.py | truenas_middleware/src/middlewared/middlewared/plugins/failover_/configure.py | # Copyright (c) - iXsystems Inc.
#
# Licensed under the terms of the TrueNAS Enterprise License Agreement
# See the file LICENSE.IX for complete terms and conditions
from middlewared.service import Service
HA_LICENSE_CACHE_KEY = 'LICENSED_FOR_HA'
class FailoverConfigureService(Service):
class Config:
namespace = 'failover.configure'
private = True
def license(self, dser_lic):
"""
1. cache locally whether this is a HA license
2. if this is a HA license:
--ensure we populate IP of heartbeat iface for remote node
--ensure we tell remote node to populate IP of heartbeat iface for local node
--copy the license file to the remote node
--invalidate the license cache on the remote node
--enable/disable systemd services on the remote node
"""
is_ha = bool(dser_lic.system_serial_ha)
self.middleware.call_sync('cache.put', HA_LICENSE_CACHE_KEY, is_ha)
if is_ha:
try:
self.middleware.call_sync('failover.ensure_remote_client')
except Exception:
# this is fatal because we can't determine what the remote ip address
# is to so any failover.call_remote calls will fail
self.logger.error('Failed to determine remote heartbeat IP address', exc_info=True)
return
try:
self.middleware.call_sync('failover.call_remote', 'failover.ensure_remote_client')
except Exception:
# this is not fatal, so no reason to return early
# it just means that any "failover.call_remote" calls initiated from the remote node
# will fail but that shouldn't be happening anyways
self.logger.warning('Remote node failed to determine this nodes heartbeat IP address', exc_info=True)
try:
self.middleware.call_sync('failover.send_small_file', self.middleware.call_sync('system.license_path'))
except Exception:
self.logger.warning('Failed to sync database to remote node', exc_info=True)
return
try:
self.middleware.call_sync('failover.call_remote', 'cache.pop', [HA_LICENSE_CACHE_KEY])
except Exception:
self.logger.warning('Failed to invalidate license cache on remote node', exc_info=True)
try:
self.middleware.call_sync('failover.call_remote', 'etc.generate', ['rc'])
except Exception:
self.logger.warning('etc.generate failed on standby', exc_info=True)
| 2,697 | Python | .py | 50 | 41.54 | 119 | 0.627228 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,702 | event_exceptions.py | truenas_middleware/src/middlewared/middlewared/plugins/failover_/event_exceptions.py | # Copyright (c) - iXsystems Inc.
#
# Licensed under the terms of the TrueNAS Enterprise License Agreement
# See the file LICENSE.IX for complete terms and conditions
class AllZpoolsFailedToImport(Exception):
"""
This is raised if all zpools failed to
import when becoming master.
"""
pass
class IgnoreFailoverEvent(Exception):
"""
This is raised when a failover event is ignored.
"""
pass
class FencedError(Exception):
"""
This is raised if fenced fails to run.
"""
pass
| 528 | Python | .py | 20 | 22.55 | 70 | 0.715706 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,703 | status.py | truenas_middleware/src/middlewared/middlewared/plugins/failover_/status.py | # Copyright (c) - iXsystems Inc.
#
# Licensed under the terms of the TrueNAS Enterprise License Agreement
# See the file LICENSE.IX for complete terms and conditions
from middlewared.service import Service
class DetectFailoverStatusService(Service):
class Config:
private = True
namespace = 'failover.status'
async def get_local(self, app):
licensed = await self.middleware.call('failover.licensed')
if not licensed:
# no HA license so nothing else matters
return 'SINGLE'
master_ifaces = backup_ifaces = vips_configured = 0
interfaces = await self.middleware.call('interface.query')
for iface in filter(lambda x: x['state']['vrrp_config'], interfaces):
vips_configured += len(iface.get('failover_virtual_aliases', []))
if iface['state']['vrrp_config']:
for ip in iface['state']['vrrp_config']:
if ip['state'] == 'MASTER':
master_ifaces += 1
else:
backup_ifaces += 1
if not vips_configured:
# We have a license but we don't have a single interface that has been
# configured with a VIP. It's safe to assume that this is a system that
# has just been licensed for HA. To allow the user to login to the system
# for initial HA configuration, we need to return SINGLE.
# `failover.disabled.reasons` will reports lots of issues on why HA isn't
# "healthy" in this scenario
return 'SINGLE'
elif master_ifaces and not backup_ifaces:
# all interfaces that are configured for HA are master, safe to assume
# this _should_ be the master node
return 'MASTER'
elif backup_ifaces and not master_ifaces:
# all interfaces that are configured for HA are backup, safe to assume
# this _should_ be the backup node
return 'BACKUP'
fenced_running = (await self.middleware.call('failover.fenced.run_info'))['running']
only_boot_pool = len((await self.middleware.call('zfs.pool.query_imported_fast'))) <= 1
if not fenced_running:
if only_boot_pool:
# we only have boot pool, fenced is not running, and we're licensed
# safe to assume we're the backup node
return 'BACKUP'
else:
# we have at least 1 zpool imported, but fenced is not running and we're licensed
# ...that's not good but it's safe to return MASTER. failover.disabled.reasons
# will return NO_FENCED will cause alerts and warnings and emails to be sent to
# end-user
return 'MASTER'
elif not only_boot_pool:
# we have at least 1 zpool imported, fenced is running, and we're licensed
# safe to assume we're the master node
return 'MASTER'
# last ditch effort to determine the status of this node. if there are
# no failover events occurring locally and we make it this far, the caller
# of this method will check the remote system which is slow...but have no
# option at that point. Note: we shouldn't get here ideally because calling
# core.get_jobs is not known for being "performant" especially as more jobs
# accumulate as uptime increases
filters = [('method', '=', 'failover.event.vrrp_master')]
options = {'order_by': ['-id']}
for i in await self.middleware.call('core.get_jobs', filters, options):
if i['state'] == 'RUNNING':
# we're currently becoming master node
return i['progress']['description']
elif i['progress']['description'] == 'ERROR':
# last failover failed
return i['progress']['description']
| 3,940 | Python | .py | 72 | 42.819444 | 97 | 0.612594 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,704 | ha_hardware.py | truenas_middleware/src/middlewared/middlewared/plugins/failover_/ha_hardware.py | # Copyright (c) - iXsystems Inc.
#
# Licensed under the terms of the TrueNAS Enterprise License Agreement
# See the file LICENSE.IX for complete terms and conditions
import enum
import re
class HA_HARDWARE(enum.Enum):
"""
The echostream E16 JBOD and the echostream Z-series chassis
are the same piece of hardware. One of the only ways to differentiate
them is to look at the enclosure elements in detail. The Z-series
chassis identifies element 0x26 as `ZSERIES_ENCLOSURE` listed below.
The E16 JBOD does not. The E16 identifies element 0x25 as NM_3115RL4WB66_8R5K5.
We use this fact to ensure we are looking at the internal enclosure, and
not a shelf. If we used a shelf to determine which node was A or B, you could
cause the nodes to switch identities by switching the cables for the shelf.
"""
ZSERIES_ENCLOSURE = re.compile(r'SD_9GV12P1J_12R6K4', re.M)
ZSERIES_NODE = re.compile(r'3U20D-Encl-([AB])', re.M)
XSERIES_ENCLOSURE = re.compile(r'\s*CELESTIC\s*(P3215-O|P3217-B)', re.M)
XSERIES_NODEA = re.compile(r'ESCE A_(5[0-9A-F]{15})', re.M)
XSERIES_NODEB = re.compile(r'ESCE B_(5[0-9A-F]{15})', re.M)
MSERIES_ENCLOSURE = re.compile(r'\s*(ECStream|iX)\s*4024S([ps])', re.M)
| 1,247 | Python | .py | 23 | 50.173913 | 83 | 0.722496 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,705 | nftables.py | truenas_middleware/src/middlewared/middlewared/plugins/failover_/nftables.py | # Copyright (c) - iXsystems Inc.
#
# Licensed under the terms of the TrueNAS Enterprise License Agreement
# See the file LICENSE.IX for complete terms and conditions
from subprocess import run, PIPE, STDOUT
from middlewared.service import Service, accepts, job, CallError
FW_RULES_FILE = '/tmp/fw-rules.nft'
JOB_LOCK = 'firewall_rules_update'
class NftablesService(Service):
class Config:
namespace = 'failover.firewall'
private = True
def generate_rules(self, data):
"""Generate a list of v4 and v6 firewall rules and apply them to nftables"""
if data['drop']:
sshport = self.middleware.call_sync('ssh.config')['tcpport']
web = self.middleware.call_sync('system.general.config')
for i in ('ip', 'ip6'):
rules = [
f'add table {i} filter',
f'add chain {i} filter INPUT {{ type filter hook input priority 0; policy accept; }}',
f'add chain {i} filter FORWARD {{ type filter hook forward priority 0; policy accept; }}',
f'add chain {i} filter OUTPUT {{ type filter hook output priority 0; policy accept; }}',
]
if data['drop']:
# we always allow ssh and webUI access when limiting inbound connections
rules.append(f'add rule {i} filter INPUT tcp dport {sshport} counter accept')
rules.append(f'add rule {i} filter INPUT tcp dport {web["ui_port"]} counter accept')
rules.append(f'add rule {i} filter INPUT tcp dport {web["ui_httpsport"]} counter accept')
for j in data['vips']:
# only block the VIPs because there is the possibility of
# running MPIO for iSCSI which uses the non-VIP addresses of
# each controller on an HA system. We, obviously, dont want
# to block traffic there.
if j['type'] == 'INET' and i == 'ip':
rules.append(f'add rule {i} filter INPUT {i} saddr {j["address"]}/32 counter drop')
elif j['type'] == 'INET6' and i == 'ip6':
rules.append(f'add rule {i} filter INPUT {i} saddr {j["address"]}/128 counter drop')
if i == 'ip':
v4 = rules
else:
v6 = rules
# now we write the rulesets to a file
try:
with open(FW_RULES_FILE, 'w+') as f:
f.write('\n'.join(v4 + v6)) # combine the rules into a single ruleset
except Exception as e:
raise CallError(f'Failed writing {FW_RULES_FILE!r} with error {e}')
# finally, we load the rulesets into nftables
# note: this is an atomic operation (-f) so we don't need to worry about obscure race conditions
rv = run(['nft', '-f', FW_RULES_FILE], stdout=PIPE, stderr=STDOUT)
if rv.returncode:
raise CallError(f'Failed restoring firewall rules: {rv.stdout}')
return True
def flush_chain_INPUT(self):
ip4_flush = not bool(run(['nft', 'flush', 'chain', 'filter', 'INPUT']).returncode)
ip6_flush = not bool(run(['nft', 'flush', 'chain', 'ip6', 'filter', 'INPUT']).returncode)
return ip4_flush and ip6_flush
@accepts()
@job(lock=JOB_LOCK)
def drop_all(self, job):
"""
Drops (silently) all v4/v6 inbound traffic destined for the
VIP addresses on a TrueNAS SCALE HA system. SSH and webUI
mgmt traffic is always allowed.
NOTE:
Do not call this unless you know what
you're doing or you can cause a service
disruption.
"""
if not self.middleware.call_sync('failover.licensed'):
return False
vips = []
for i in self.middleware.call_sync('interface.query'):
for j in i.get('failover_virtual_aliases', []):
vips.append(j)
if not vips:
raise CallError('No VIP addresses detected on system')
if not self.flush_chain_INPUT():
self.logger.error('Failed flushing INPUT chain')
return self.generate_rules({'drop': True, 'vips': vips})
@accepts()
@job(lock=JOB_LOCK)
def accept_all(self, job):
"""Accepts all v4/v6 inbound traffic"""
if not self.middleware.call_sync('failover.licensed'):
return False
if not self.flush_chain_INPUT():
self.logger.error('Failed flushing INPUT chain')
return self.generate_rules({'drop': False, 'vips': []})
| 4,585 | Python | .py | 90 | 39.744444 | 108 | 0.59222 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,706 | scheduled_reboot_alert.py | truenas_middleware/src/middlewared/middlewared/plugins/failover_/scheduled_reboot_alert.py | # Copyright (c) - iXsystems Inc.
#
# Licensed under the terms of the TrueNAS Enterprise License Agreement
# See the file LICENSE.IX for complete terms and conditions
import datetime
import os
WATCHDOG_ALERT_FILE = "/data/sentinels/.watchdog-alert"
FENCED_ALERT_FILE = "/data/sentinels/.fenced-alert"
def get_fqdn(middleware):
gc = middleware.call_sync("datastore.config", "network.globalconfiguration")
key = "gc_hostname"
if middleware.call_sync("failover.node") == "B":
key = "gc_hostname_b"
return f"{gc[key]}.{gc['gc_domain']}" if gc["gc_domain"] else gc[key]
def get_sentinel_files_time_and_clean_them_up(middleware):
watchdog_time = fenced_time = None
try:
os.makedirs(os.path.dirname(FENCED_ALERT_FILE), exist_ok=True)
except Exception:
middleware.logger.error('Unhandled exceptin creating sentinels directory', exc_info=True)
else:
for idx, i in enumerate((WATCHDOG_ALERT_FILE, FENCED_ALERT_FILE)):
try:
with open(i) as f:
time = float(f.read().strip())
if idx == 0:
watchdog_time = time
else:
fenced_time = time
# if file exists, we've gotten the time from it so remove it
os.unlink(i)
except (FileNotFoundError, ValueError):
pass
return watchdog_time, fenced_time
def setup_impl(middleware):
if not middleware.call_sync("core.is_starting_during_boot") or not middleware.call_sync("failover.licensed"):
return
now = datetime.datetime.now().strftime("%c")
fqdn = get_fqdn(middleware)
watchdog_time, fenced_time = get_sentinel_files_time_and_clean_them_up(middleware)
if watchdog_time and (not fenced_time or watchdog_time > fenced_time):
middleware.call_sync("alert.oneshot_create", "FailoverReboot", {'fqdn': fqdn, 'now': now})
elif fenced_time:
middleware.call_sync("alert.oneshot_create", "FencedReboot", {'fqdn': fqdn, 'now': now})
else:
middleware.call_sync("alert.oneshot_delete", "FencedReboot")
middleware.call_sync("alert.oneshot_delete", "FailoverReboot")
async def setup(middleware):
await middleware.run_in_thread(setup_impl, middleware)
| 2,308 | Python | .py | 49 | 38.959184 | 113 | 0.657016 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,707 | disabled_reasons.py | truenas_middleware/src/middlewared/middlewared/plugins/failover_/disabled_reasons.py | # Copyright (c) - iXsystems Inc.
#
# Licensed under the terms of the TrueNAS Enterprise License Agreement
# See the file LICENSE.IX for complete terms and conditions
from enum import Enum
from middlewared.schema import accepts, returns, List, Str
from middlewared.service import Service, CallError, pass_app, no_auth_required, private
from middlewared.plugins.interface.netif import netif
from middlewared.utils.zfs import query_imported_fast_impl
class DisabledReasonsEnum(str, Enum):
NO_CRITICAL_INTERFACES = 'No network interfaces are marked critical for failover.'
MISMATCH_DISKS = 'The quantity of disks do not match between the nodes.'
MISMATCH_VERSIONS = 'TrueNAS software versions do not match between storage controllers.'
MISMATCH_NICS = 'Network interfaces do not match between storage controllers.'
DISAGREE_VIP = 'Nodes Virtual IP states do not agree.'
NO_LICENSE = 'Other node has no license.'
NO_FAILOVER = 'Administratively Disabled.'
NO_PONG = 'Unable to contact remote node via the heartbeat interface.'
NO_VOLUME = 'No zpools have been configured.'
NO_VIP = 'No interfaces have been configured with a Virtual IP.'
NO_SYSTEM_READY = 'Other node has not finished booting.'
NO_FENCED = 'Fenced is not running.'
REM_FAILOVER_ONGOING = 'Other node is currently processing a failover event.'
LOC_FAILOVER_ONGOING = 'This node is currently processing a failover event.'
NO_HEARTBEAT_IFACE = 'Local heartbeat interface does not exist.'
NO_CARRIER_ON_HEARTBEAT = 'Local heartbeat interface is down.'
LOC_FIPS_REBOOT_REQ = 'This node needs to be rebooted to apply FIPS configuration'
REM_FIPS_REBOOT_REQ = 'Other node needs to be rebooted to apply FIPS configuration'
LOC_SYSTEM_DATASET_MIGRATION_IN_PROGRESS = 'This node is currently configuring the system dataset'
REM_SYSTEM_DATASET_MIGRATION_IN_PROGRESS = 'Other node is currently configuring the system dataset'
class FailoverDisabledReasonsService(Service):
class Config:
cli_namespace = 'system.failover.disabled'
namespace = 'failover.disabled'
LAST_DISABLED_REASONS = None
SYSTEM_DATASET_SETUP_IN_PROGRESS = False
@no_auth_required
@accepts()
@returns(List('reasons', items=[Str('reason')]))
@pass_app()
def reasons(self, app):
"""Returns a list of reasons why failover is not enabled/functional.
See `DisabledReasonsEnum` for the reasons and their explanation.
"""
reasons = self.middleware.call_sync('failover.disabled.get_reasons', app)
if reasons != FailoverDisabledReasonsService.LAST_DISABLED_REASONS:
FailoverDisabledReasonsService.LAST_DISABLED_REASONS = reasons
self.middleware.send_event(
'failover.disabled.reasons', 'CHANGED',
fields={'disabled_reasons': list(reasons)}
)
return list(reasons)
@private
def heartbeat_health(self, app, reasons):
try:
heartbeat_iface_name = self.middleware.call_sync('failover.internal_interface.detect')[0]
except IndexError:
# if something calls this directly from cli on a non-ha machine, don't
# crash since it's easily avoided
return
try:
iface = netif.list_interfaces()[heartbeat_iface_name]
if iface.link_state != 'LINK_STATE_UP':
reasons.add(DisabledReasonsEnum.NO_CARRIER_ON_HEARTBEAT.name)
except KeyError:
# saw this on an internal m50 because the systemd-modules-load.service
# timed out and was subsequently killed so the ntb kernel module didn't
# get loaded
reasons.add(DisabledReasonsEnum.NO_HEARTBEAT_IFACE.name)
@private
def get_local_reasons(self, app, ifaces, reasons):
"""This method checks the local node to try and determine its failover status."""
if self.middleware.call_sync('failover.config')['disabled']:
reasons.add(DisabledReasonsEnum.NO_FAILOVER.name)
if self.middleware.call_sync('failover.in_progress'):
reasons.add(DisabledReasonsEnum.LOC_FAILOVER_ONGOING.name)
# no reason to check anything else since failover
# is happening on this system
return
reboot_info = self.middleware.call_sync('failover.reboot.info')
if reboot_info['this_node']['reboot_required_reasons']:
reasons.add(DisabledReasonsEnum.LOC_FIPS_REBOOT_REQ.name)
if reboot_info['other_node'] is not None and reboot_info['other_node']['reboot_required_reasons']:
reasons.add(DisabledReasonsEnum.REM_FIPS_REBOOT_REQ.name)
if self.SYSTEM_DATASET_SETUP_IN_PROGRESS:
reasons.add(DisabledReasonsEnum.LOC_SYSTEM_DATASET_MIGRATION_IN_PROGRESS.name)
self.heartbeat_health(app, reasons)
crit_iface = vip = master = False
for iface in ifaces:
if iface['failover_critical']:
# only need 1 interface marked critical for failover
crit_iface = True
if iface['failover_virtual_aliases']:
# only need 1 interface with a virtual IP
vip = True
if any((i['state'] == 'MASTER' for i in iface['state'].get('vrrp_config') or [])):
# means this interface is MASTER
master = True
if not crit_iface:
reasons.add(DisabledReasonsEnum.NO_CRITICAL_INTERFACES.name)
elif not vip:
reasons.add(DisabledReasonsEnum.NO_VIP.name)
elif master:
fenced_running = self.middleware.call_sync('failover.fenced.run_info')['running']
num_of_zpools_imported = len(query_imported_fast_impl())
if num_of_zpools_imported > 1:
# boot pool is returned by default which is why we check > 1
if not fenced_running:
# zpool(s) imported but fenced isn't running which is bad
reasons.add(DisabledReasonsEnum.NO_FENCED.name)
else:
# we've got interfaces marked as master but we have no zpool(s) imported
reasons.add(DisabledReasonsEnum.NO_VOLUME.name)
@private
def get_remote_reasons(self, app, ifaces, reasons):
"""This method checks the remote node to try and determine its failover status."""
try:
assert self.middleware.call_sync('failover.remote_connected')
if not self.middleware.call_sync('failover.call_remote', 'system.ready', [], {'timeout': 5}):
# if the remote node panic's (this happens on failover event if we cant export the
# zpool in 4 seconds (linux reboots silently by design) then the p2p interface stays
# "UP" and the websocket remains open. At this point, we have to wait for the TCP
# timeout (60 seconds default). This means the assert line up above will return `True`.
# However, any `call_remote` method will hang because the websocket is still
# open but hasn't closed due to the default TCP timeout window. This can be painful
# on failover events because it delays the process of restarting services in a timely
# manner. To work around this, we place a `timeout` of 5 seconds on the system.ready
# call. This essentially bypasses the TCP timeout window.
reasons.add(DisabledReasonsEnum.NO_SYSTEM_READY.name)
if not self.middleware.call_sync('failover.call_remote', 'failover.licensed'):
reasons.add(DisabledReasonsEnum.NO_LICENSE.name)
lsw = self.middleware.call_sync('system.version')
rsw = self.middleware.call_sync('failover.call_remote', 'system.version')
if lsw != rsw:
reasons.add(DisabledReasonsEnum.MISMATCH_VERSIONS.name)
if self.middleware.call_sync('failover.call_remote', 'failover.in_progress'):
reasons.add(DisabledReasonsEnum.REM_FAILOVER_ONGOING.name)
local = self.middleware.call_sync('failover.vip.get_states', ifaces)
remote = self.middleware.call_sync('failover.call_remote', 'failover.vip.get_states')
if self.middleware.call_sync('failover.vip.check_states', local, remote):
reasons.add(DisabledReasonsEnum.DISAGREE_VIP.name)
mismatch_disks = self.middleware.call_sync('failover.mismatch_disks')
if mismatch_disks['missing_local'] or mismatch_disks['missing_remote']:
reasons.add(DisabledReasonsEnum.MISMATCH_DISKS.name)
mismatch_nics = self.middleware.call_sync('failover.mismatch_nics')
if mismatch_nics['missing_local'] or mismatch_nics['missing_remote']:
reasons.add(DisabledReasonsEnum.MISMATCH_NICS.name)
if self.middleware.call_sync(
'failover.call_remote', 'failover.disabled.get_systemdataset_state', [],
{'raise_connect_error': False, 'timeout': 2, 'connect_timeout': 2}
):
reasons.add(DisabledReasonsEnum.REM_SYSTEM_DATASET_MIGRATION_IN_PROGRESS.name)
except Exception:
reasons.add(DisabledReasonsEnum.NO_PONG.name)
@private
def get_reasons(self, app):
reasons = set()
if self.middleware.call_sync('failover.licensed'):
ifaces = self.middleware.call_sync('interface.query')
self.get_local_reasons(app, ifaces, reasons)
self.get_remote_reasons(app, ifaces, reasons)
return reasons
@private
async def update_systemdataset_state(self, in_progress):
self.SYSTEM_DATASET_SETUP_IN_PROGRESS = in_progress
@private
async def get_systemdataset_state(self):
return self.SYSTEM_DATASET_SETUP_IN_PROGRESS
async def systemdataset_setup_hook(middleware, data):
await middleware.call('failover.disabled.update_systemdataset_state', data['in_progress'])
async def setup(middleware):
middleware.event_register(
'failover.disabled.reasons', 'Sent when failover status reasons change.', no_auth_required=True
)
middleware.register_hook('sysdataset.setup', systemdataset_setup_hook)
| 10,314 | Python | .py | 175 | 48.588571 | 106 | 0.674057 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,708 | internal_interface.py | truenas_middleware/src/middlewared/middlewared/plugins/failover_/internal_interface.py | # Copyright (c) - iXsystems Inc.
#
# Licensed under the terms of the TrueNAS Enterprise License Agreement
# See the file LICENSE.IX for complete terms and conditions
import ipaddress
from pathlib import Path
from pyroute2 import NDB
from middlewared.service import Service
from middlewared.utils.functools_ import cache
class InternalInterfaceService(Service):
http_site = None
class Config:
private = True
namespace = 'failover.internal_interface'
@cache
def detect(self):
hardware = self.middleware.call_sync('failover.hardware')
if hardware == 'BHYVE':
return ['enp0s6f1']
elif hardware == 'IXKVM':
return ['enp1s0']
elif hardware == 'ECHOSTREAM':
# z-series
for i in Path('/sys/class/net/').iterdir():
try:
data = (i / 'device/uevent').read_text()
if 'PCI_ID=8086:10D3' in data and 'PCI_SUBSYS_ID=8086:A01F' in data:
return [i.name]
except FileNotFoundError:
continue
elif hardware in ('PUMA', 'ECHOWARP', 'LAJOLLA2', 'SUBLIGHT'):
# {x/m/f/h}-series
return ['ntb0']
else:
return []
async def pre_sync(self):
if not await self.middleware.call('system.is_enterprise'):
return
node = await self.middleware.call('failover.node')
if node == 'A':
internal_ip = '169.254.10.1'
elif node == 'B':
internal_ip = '169.254.10.2'
else:
self.logger.error('Node position could not be determined.')
return
iface = await self.middleware.call('failover.internal_interfaces')
if not iface:
self.logger.error('Internal interface not found.')
return
iface = iface[0]
await self.middleware.run_in_thread(self.sync, iface, internal_ip)
def sync(self, iface, internal_ip):
default_table, rtn_blackhole = 254, 6
with NDB(log='off') as ndb:
try:
with ndb.interfaces[iface] as dev:
dev.add_ip(f'{internal_ip}/24').set(state='up')
except KeyError:
# ip address already exists on this interface
pass
# add a blackhole route of 169.254.10.0/23 which is 1 bit larger than
# ip address we put on the internal interface. We do this because the
# f-series platform uses AMD ntb driver and the behavior for when the
# B controller is active and the A controller reboots, is that the ntb0
# interface is removed from the B controller. This means any src/dst
# traffic on the 169.254.10/24 subnet will be forwarded out of the gateway
# of last resort (default route). Since this is internal traffic, we
# obviously don't want to forward this traffic to the default gateway.
# This just routes the data into oblivion (drops it).
dst_network = ipaddress.ip_interface(f'{internal_ip}/23').network.exploded
try:
ndb.routes.create(dst=dst_network, table=default_table, type=rtn_blackhole).commit()
except KeyError:
# blackhole route already exists
pass
self.middleware.call_sync('failover.internal_interface.post_sync', internal_ip)
async def post_sync(self, internal_ip):
if self.http_site is None:
self.http_site = await self.middleware.start_tcp_site(internal_ip)
async def __event_system_ready(middleware, event_type, args):
await middleware.call('failover.internal_interface.pre_sync')
async def setup(middleware):
# on HA systems, we bind ourselves on 127.0.0.1:6000, however
# often times developers/CI/CD do `systemctl restart middlewared`
# which will tear down the local listening socket so we need to
# be sure and set it up everytime middleware starts. This is a
# NO-OP otherwise.
middleware.event_subscribe('system.ready', __event_system_ready)
if await middleware.call('system.ready'):
await middleware.call('failover.internal_interface.pre_sync')
| 4,262 | Python | .py | 91 | 36.637363 | 100 | 0.626506 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,709 | datastore.py | truenas_middleware/src/middlewared/middlewared/plugins/failover_/datastore.py | # Copyright (c) - iXsystems Inc.
#
# Licensed under the terms of the TrueNAS Enterprise License Agreement
# See the file LICENSE.IX for complete terms and conditions
import os
import time
from middlewared.service import Service
from middlewared.plugins.config import FREENAS_DATABASE
from middlewared.plugins.datastore.connection import thread_pool
from middlewared.utils.threading import start_daemon_thread, set_thread_name
from middlewared.utils import db as db_utils
FREENAS_DATABASE_REPLICATED = f'{FREENAS_DATABASE}.replicated'
RAISE_ALERT_SYNC_RETRY_TIME = 1200 # 20mins (some platforms take 15-20mins to reboot)
class FailoverDatastoreService(Service):
class Config:
namespace = 'failover.datastore'
private = True
thread_pool = thread_pool
async def sql(self, data, sql, params):
if await self.middleware.call('system.version') != data['version']:
return
if await self.middleware.call('failover.status') != 'BACKUP':
# We can't query failover.status on `MASTER` node (please see `hook_datastore_execute_write` for
# explanations). Non-BACKUP nodes are responsible for checking their failover status.
return
await self.middleware.call('datastore.execute', sql, params)
failure = False
def is_failure(self):
return self.failure
def set_failure(self):
self.failure = True
try:
# This is executed in `hook_datastore_execute_write` so we can't query local failover status here and we'll
# have to rely on remote.
if (fs := self.middleware.call_sync('failover.call_remote', 'failover.status')) == 'BACKUP':
self.send()
else:
# Avoid sending database if we are not MASTER.
self.logger.warning('Remote node failover status is %s while retrying database send', fs)
self.failure = False
except Exception as e:
self.logger.warning('Error sending database to remote node on first replication failure: %r', e)
def send_retry():
set_thread_name('failover_datastore')
raise_alert_time = RAISE_ALERT_SYNC_RETRY_TIME
total_mins = raise_alert_time / 60
sleep_time = 30
while True:
raise_alert_time -= sleep_time
time.sleep(sleep_time)
if not self.failure:
# Someone sent the database for us
return
if (fs := self.middleware.call_sync('failover.status')) != 'MASTER':
self.logger.warning('Failover status is %s while retrying database send', fs)
self.failure = False
break
try:
self.middleware.call_sync('failover.datastore.send')
except Exception:
pass
if raise_alert_time <= 0 and self.failure:
self.middleware.call_sync('alert.oneshot_create', 'FailoverSyncFailed', {'mins': total_mins})
raise_alert_time = RAISE_ALERT_SYNC_RETRY_TIME
start_daemon_thread(target=send_retry)
def send(self):
token = self.middleware.call_sync('failover.call_remote', 'auth.generate_token')
self.middleware.call_sync('failover.send_file', token, FREENAS_DATABASE, FREENAS_DATABASE_REPLICATED, {'mode': db_utils.FREENAS_DATABASE_MODE})
self.middleware.call_sync('failover.call_remote', 'failover.datastore.receive')
self.failure = False
self.middleware.call_sync('alert.oneshot_delete', 'FailoverSyncFailed', None)
def receive(self):
# Take the following example:
# 1. upgrade both HA controllers
# 2. standby controller reboots (by design) into the newly OS version
# 3. active controller does NOT reboot (by design)
# 4. for some unpredictable reason, upgrade is not "finalized"
# (i.e. reboot the active to failover to the newly upgraded controller, etc)
# 5. User (or something inside middleware) makes a change to the database on the active
# (remember it's running the "old" version compared to the standby)
# 6. active controller makes changes to local db or the user decides to "sync to peer"
# 7. active controller replicates the entire database to the standby
# 8. because the standby is running a newer version, then the schema migrations that could
# have occurred on the standby are now lost because the database was replaced _entirely_
# with a copy from the active controller (running an old version)
#
# The worst part about this scenario is that the standby controller will continue to run
# without issue until:
# 1. a change is made on the standby that tries to reference the new schema
# 2. OR the standby controller reboots (or middlewared service restarts)
#
# If either of these occur, middlewared service will fail to start and crash early in startup
# because the newer middleware will try to query the database referencing the potential changes
# that occurred in the schema migration of the upgrade. There is no easy solution to this problem
# once you're in this state outside of rolling back to the previous BE and performing a much more
# disruptive upgrade. (i.e. booting the ISO and performing an upgrade that way so db replication
# doesn't occur since middlewared service isn't running) (i.e. take the entire system down)
#
# To prevent this, we check to make sure the local database alembic revision matches the replicated
# database that has been sent to us.
loc_vers = db_utils.query_config_table('alembic_version')['version_num']
rep_vers = db_utils.query_config_table('alembic_version', FREENAS_DATABASE_REPLICATED)['version_num']
if loc_vers != rep_vers:
self.logger.warning(
'Received database alembic revision (%s) does not match local database alembic revision (%s)',
rep_vers, loc_vers
)
return
os.rename(FREENAS_DATABASE_REPLICATED, FREENAS_DATABASE)
self.middleware.call_sync('datastore.setup')
async def force_send(self):
if await self.middleware.call('failover.status') == 'MASTER':
await self.middleware.call('failover.datastore.set_failure')
def hook_datastore_execute_write(middleware, sql, params, options):
# This code is executed in SQLite thread and blocks it (in order to avoid replication query race conditions)
# No switching to the async context that will yield to database queries is allowed here as it will result in
# a deadlock. That's why we can't query failover status and will always try to replicate all queries to the other
# node. The other node will check its own failover status upon receiving them.
if not options['ha_sync']:
return
if not middleware.call_sync('failover.licensed'):
return
if middleware.call_sync('failover.datastore.is_failure'):
return
try:
middleware.call_sync(
'failover.call_remote',
'failover.datastore.sql',
[
{
'version': middleware.call_sync('system.version'),
},
sql,
params,
],
{
'timeout': 10,
},
)
except Exception as e:
middleware.logger.warning('Error replicating SQL on the remote node: %r', e)
middleware.call_sync('failover.datastore.set_failure')
async def setup(middleware):
if not await middleware.call('system.is_enterprise'):
return
middleware.register_hook('datastore.post_execute_write', hook_datastore_execute_write, inline=True)
| 8,075 | Python | .py | 146 | 44.383562 | 151 | 0.649823 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,710 | jobs_copy.py | truenas_middleware/src/middlewared/middlewared/plugins/failover_/jobs_copy.py | # Copyright (c) - iXsystems Inc.
#
# Licensed under the terms of the TrueNAS Enterprise License Agreement
# See the file LICENSE.IX for complete terms and conditions
import base64
import gzip
import os
from middlewared.service import CompoundService, Service
from middlewared.utils.service.task_state import TaskStateMixin
class JobsCopyService(Service):
methods = set()
class Config:
private = True
namespace = "failover.jobs_copy"
async def register_method(self, method):
self.methods.add(method)
async def on_job_complete(self, job):
if job["method"] not in self.methods:
return
if await self.middleware.call("failover.status") != "MASTER":
return
self.middleware.create_task(self.send_job(job))
async def send_job(self, job):
try:
logs = None
if job["logs_path"] is not None:
logs = await self.middleware.call("failover.jobs_copy.read_logs", job["logs_path"])
await self.middleware.call("failover.call_remote", "failover.jobs_copy.receive_job", [job, logs])
except Exception as e:
self.logger.error("Error sending job %r %r: %r", job["method"], job["id"], e)
async def receive_job(self, job, logs):
if logs is not None:
logs = await self.middleware.run_in_thread(lambda: gzip.decompress(base64.b64decode(logs.encode("ascii"))))
await self.middleware.jobs.receive(job, logs)
def read_logs(self, path):
with open(path, "rb") as f:
# We only want to send the last megabyte of the logs
try:
f.seek(-1000000, os.SEEK_END)
except OSError:
# The file is less than one megabyte, that is not an issue
text = f.read()
else:
text = f.read()
# Remove the leftovers of the first incomplete line
text = text[text.find(b'\n') + 1:]
return base64.b64encode(gzip.compress(text)).decode("ascii")
async def on_job_change(middleware, event_type, args):
if event_type == "CHANGED" and args["fields"]["state"] in ["SUCCESS", "FAILED", "ABORTED"]:
await middleware.call("failover.jobs_copy.on_job_complete", args["fields"])
async def setup(middleware):
middleware.event_subscribe("core.get_jobs", on_job_change)
for service in middleware._services.values():
if isinstance(service, CompoundService):
services = service.parts
else:
services = [service]
for svc in services:
if isinstance(svc, TaskStateMixin):
for method in svc.task_state_methods:
await middleware.call("failover.jobs_copy.register_method", method)
| 2,799 | Python | .py | 61 | 36.622951 | 119 | 0.635394 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,711 | fenced.py | truenas_middleware/src/middlewared/middlewared/plugins/failover_/fenced.py | # Copyright (c) - iXsystems Inc.
#
# Licensed under the terms of the TrueNAS Enterprise License Agreement
# See the file LICENSE.IX for complete terms and conditions
import subprocess
import psutil
import contextlib
import os
import signal
from middlewared.plugins.system.product import ProductType
from middlewared.service import Service, CallError
from middlewared.utils import MIDDLEWARE_RUN_DIR
from middlewared.utils.cgroups import move_to_root_cgroups
from fenced.fence import ExitCode as FencedExitCodes
PID_FILE = os.path.join(MIDDLEWARE_RUN_DIR, 'fenced.pid')
IS_ALIVE_SIGNAL = 0
class FencedService(Service):
class Config:
private = True
namespace = 'failover.fenced'
def start(self, force=False, use_zpools=False):
# get the boot disks so fenced doesn't try to
# place reservations on the boot drives
try:
boot_disks = ','.join(self.middleware.call_sync('boot.get_disks'))
except Exception:
self.logger.warning('Failed to get boot disks', exc_info=True)
# just because we can't grab the boot disks from middleware
# doesn't mean we should fail to start fenced since it
# (ultimately) prevents data corruption on HA systems
boot_disks = ''
# build the shell command
cmd = ['fenced']
if boot_disks:
cmd.extend(['-ed', boot_disks])
if force:
cmd.append('-f')
if use_zpools:
cmd.append('-uz')
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
out, err = proc.communicate()
if proc.returncode == 0:
# move out from underneath middlewared (parent) cgroup so
# that when middlewared is restarted via the cli, the signal
# doesn't get sent to fenced causing fenced process to also
# receive the same signal....so gross
move_to_root_cgroups(self.run_info()['pid'])
return proc.returncode
def stop(self, banhammer=True):
if banhammer:
# dont care if it's running or not just
# SIGKILL anything that has "fenced" in
# the process name
subprocess.run(['pkill', '-9', '-f', 'fenced'])
else:
res = self.middleware.call_sync('failover.fenced.run_info')
if res['running'] and res['pid']:
try:
os.kill(res['pid'], signal.SIGKILL)
except ProcessLookupError:
pass
def run_info(self):
res = {'running': False, 'pid': ''}
with contextlib.suppress(Exception):
with open(PID_FILE, 'rb') as f:
res['pid'] = int(f.read().decode())
check_running_procs = False
if res['pid']:
try:
os.kill(res['pid'], IS_ALIVE_SIGNAL)
except OSError:
check_running_procs = True
else:
res['running'] = True
else:
check_running_procs = True
if check_running_procs:
# either 1. no pid in file or 2. pid in file is wrong/stale
for proc in filter(lambda x: x.info['name'] == 'fenced', psutil.process_iter(['pid', 'name'])):
res['pid'] = proc.info['pid']
res['running'] = True
return res
def signal(self, options):
res = self.middleware.call_sync('failover.fenced.run_info')
if res['running'] and res['pid']:
try:
if options.get('reload', False):
os.kill(res['pid'], signal.SIGHUP)
if options.get('log_info', False):
os.kill(res['pid'], signal.SIGUSR1)
except OSError as e:
raise CallError(f'Failed to signal fenced: {e}')
async def hook_pool_event(middleware, *args, **kwargs):
# only run this on SCALE Enterprise
if await middleware.call('system.product_type') != ProductType.SCALE_ENTERPRISE:
return
# HA licensed systems call fenced on their own
if await middleware.call('failover.licensed'):
return
# only run this on the m/x series platform since the other
# platforms are either non-supported or end of life
if (await middleware.call('failover.ha_mode'))[0] not in ('ECHOWARP', 'PUMA'):
return
if (await middleware.call('failover.fenced.run_info'))['running']:
try:
await middleware.call('failover.fenced.signal', {'reload': True})
except CallError as e:
middleware.logger.error('Failed to reload fenced: %r', e)
else:
force = False
use_zpools = True
rc = await middleware.call('failover.fenced.start', force, use_zpools)
if rc:
for i in FencedExitCodes:
if rc == i.value:
middleware.logger.error('Failed to start fenced: %s', i.name)
break
async def setup(middleware):
middleware.register_hook('pool.post_create_or_update', hook_pool_event)
middleware.register_hook('pool.post_import', hook_pool_event)
| 5,211 | Python | .py | 124 | 32.080645 | 107 | 0.602134 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,712 | remote.py | truenas_middleware/src/middlewared/middlewared/plugins/failover_/remote.py | # Copyright (c) - iXsystems Inc.
#
# Licensed under the terms of the TrueNAS Enterprise License Agreement
# See the file LICENSE.IX for complete terms and conditions
import errno
import json
import logging
import requests
import socket
import threading
import time
from collections import defaultdict
from functools import partial
from websocket._exceptions import WebSocketBadStatusException
from truenas_api_client import Client, ClientException, CALL_TIMEOUT
from middlewared.schema import accepts, Any, Bool, Dict, Int, List, Str, Float, returns
from middlewared.service import CallError, Service, private
from middlewared.utils.threading import set_thread_name, start_daemon_thread
from middlewared.validators import Range
logger = logging.getLogger('failover.remote')
NETWORK_ERRORS = (errno.ETIMEDOUT, errno.ECONNABORTED, errno.ECONNREFUSED, errno.ECONNRESET, errno.EHOSTDOWN,
errno.EHOSTUNREACH)
class RemoteClient:
def __init__(self):
self.client = None
self.connected = threading.Event()
self.middleware = None
self.remote_ip = None
self._subscribe_lock = threading.Lock()
self._subscriptions = defaultdict(list)
self._on_connect_callbacks = []
self._on_disconnect_callbacks = []
self._remote_os_version = None
self.refused = False
def run(self):
set_thread_name('ha_connection')
retry = 5
self.refused = False
while True:
try:
self.connect_and_wait()
self.refused = False
except ConnectionRefusedError:
if not self.refused:
logger.error(f'Persistent connection refused, retrying every {retry} seconds')
self.refused = True
except Exception:
logger.error('Remote connection failed', exc_info=True)
self.refused = False
time.sleep(retry)
def connect_and_wait(self, *, legacy=False):
url = f'ws://{self.remote_ip}:6000/api/current'
if legacy:
url = f'ws://{self.remote_ip}:6000/websocket'
try:
with Client(url, reserved_ports=True) as c:
self.client = c
with self._subscribe_lock:
self.connected.set()
# Subscribe to all events on connection
for name in self._subscriptions:
self.client.subscribe(name, partial(self._sub_callback, name))
self._on_connect()
c._closed.wait()
except WebSocketBadStatusException as e:
if not legacy and e.status_code == 404:
# 24.10 middleware and earlier gives 404 when trying to access `/api/current`.
# We should try legacy API server in that case.
return self.connect_and_wait(legacy=True)
raise
except OSError as e:
if e.errno in (
errno.EPIPE, # Happens when failover is configured on cxl device that has no link
errno.EINVAL, # F-Series have `ntb0` device removed when other node is being rebooted
errno.ENETDOWN, errno.EHOSTDOWN, errno.ENETUNREACH, errno.EHOSTUNREACH,
errno.ECONNREFUSED,
) or isinstance(e, socket.timeout):
raise ConnectionRefusedError()
raise
finally:
if self.connected.is_set():
# Only happens if we have successfully connected once
self._on_disconnect()
self.client = None
self.connected.clear()
def is_connected(self):
return self.connected.is_set()
def register_connect(self, cb):
"""
Register a callback to be called everytime we connect to the other node.
"""
self._on_connect_callbacks.append(cb)
def _on_connect(self):
"""
Called everytime connection has been established.
"""
# journal thread checks this attribute to ensure
# we're not trying to alter the remote db if the
# OS versions do not match since schema changes
# can (and do) change between upgrades
self._remote_os_version = self.get_remote_os_version()
for cb in self._on_connect_callbacks:
try:
cb(self.middleware)
except Exception:
logger.error('Failed to run on_connect for remote client', exc_info=True)
if self.refused:
logger.info('Persistent connection reestablished')
def register_disconnect(self, cb):
"""
Register a callback to be called everytime we disconnect from the other node.
"""
self._on_disconnect_callbacks.append(cb)
def _on_disconnect(self):
"""
Called everytime connection is closed for whatever reason.
"""
self._remote_os_version = None
for cb in self._on_disconnect_callbacks:
try:
cb(self.middleware)
except Exception:
logger.error('Failed to run on_disconnect for remote client', exc_info=True)
def call(self, *args, **kwargs):
try:
if not self.connected.wait(timeout=kwargs.pop('connect_timeout')):
if self.remote_ip is None:
raise CallError('Unable to determine remote node IP', errno.EHOSTUNREACH)
raise CallError('Remote connection unavailable', errno.ECONNREFUSED)
return self.client.call(*args, **kwargs)
except AttributeError as e:
# ws4py traceback which can happen when connection is lost
if "'NoneType' object has no attribute 'text_message'" in str(e):
raise CallError('Remote connection closed.', errno.ECONNRESET)
else:
raise
except ClientException as e:
raise CallError(str(e), e.errno or errno.EFAULT)
def subscribe(self, name, callback):
with self._subscribe_lock:
# Only subscribe if we are already connected, otherwise simply register it
if name not in self._subscriptions and self.is_connected():
self.client.subscribe(name, partial(self._sub_callback, name))
self._subscriptions[name].append(callback)
def _sub_callback(self, name, type_, **message):
for callback in self._subscriptions.get(name, []):
try:
callback(self.middleware, type_, **message)
except Exception:
logger.warning('Failed to run callback for %s', name, exc_info=True)
def send_file(self, token, local_path, remote_path, options=None):
# No reason to honor proxy settings in this
# method since we're sending across the
# heartbeat interface which is point-to-point
proxies = {'http': '', 'https': ''}
options = options or {}
r = requests.post(
f'http://{self.remote_ip}:6000/_upload/',
proxies=proxies,
files=[
('data', json.dumps({
'method': 'filesystem.put',
'params': [remote_path, options],
})),
('file', open(local_path, 'rb')),
],
headers={
'Authorization': f'Token {token}',
},
)
job_id = r.json()['job_id']
# TODO: use event subscription in the client instead of polling
while True:
rjob = self.client.call('core.get_jobs', [('id', '=', job_id)])
if rjob:
rjob = rjob[0]
if rjob['state'] == 'FAILED':
raise CallError(
f'Failed to send {local_path} to Standby Controller: {rjob["error"]}.'
)
elif rjob['state'] == 'ABORTED':
raise CallError(
f'Failed to send {local_path} to Standby Controller, job aborted by user.'
)
elif rjob['state'] == 'SUCCESS':
break
time.sleep(0.5)
def get_remote_os_version(self):
if self.client is not None and self._remote_os_version is None:
try:
self._remote_os_version = self.client.call('system.version')
except CallError:
# ignore CallErrors since they're being caught in self.client.call
pass
except Exception:
logger.error('Failed to determine OS version', exc_info=True)
return self._remote_os_version
class FailoverService(Service):
class Config:
cli_private = True
CLIENT = RemoteClient()
@private
async def remote_ip(self):
node = await self.middleware.call('failover.node')
if node == 'A':
remote = '169.254.10.2'
elif node == 'B':
remote = '169.254.10.1'
else:
raise CallError(f'Node {node} invalid for call_remote', errno.EHOSTUNREACH)
return remote
@private
async def local_ip(self):
node = await self.middleware.call('failover.node')
if node == 'A':
local = '169.254.10.1'
elif node == 'B':
local = '169.254.10.2'
else:
raise CallError(f'Node {node} invalid', errno.EHOSTUNREACH)
return local
@accepts(
Str('method'),
List('args'),
Dict(
'options',
Int('timeout', default=CALL_TIMEOUT),
Bool('job', default=False),
Bool('job_return', default=None, null=True),
Any('callback', default=None, null=True),
Float('connect_timeout', default=2.0, validators=[Range(min_=2.0, max_=1800.0)]),
Bool('raise_connect_error', default=True),
),
)
@returns(Any(null=True))
def call_remote(self, method, args, options):
"""
Call a method on the other node.
`method` name of the method to be called
`args` list of arguments to be passed to `method`
`options` dictionary with following keys
`timeout`: time to wait for `method` to return
NOTE: This parameter _ONLY_ applies if the remote
client is connected to the other node.
`job`: whether the `method` being called is a job
`job_return`: if true, will return immediately and not wait
for the job to complete, otherwise will wait for the
job to complete
`callback`: a function that will be called as a callback
on completion/failure of `method`.
NOTE: Only applies if `method` is a job
`connect_timeout`: Maximum amount of time in seconds to wait
for remote connection to become available.
`raise_connect_error`: If false, will not raise an exception if a connection error to the other node
happens, or connection/call timeout happens, or method does not exist on the remote node.
"""
if options.pop('job_return'):
options['job'] = 'RETURN'
raise_connect_error = options.pop('raise_connect_error')
try:
return self.CLIENT.call(method, *args, **options)
except (CallError, ClientException) as e:
if e.errno in NETWORK_ERRORS + (CallError.ENOMETHOD,):
if raise_connect_error:
raise CallError(str(e), e.errno)
else:
self.logger.trace('Failed to call %r on remote node', method, exc_info=True)
else:
raise CallError(str(e), errno.EFAULT)
@private
def get_remote_os_version(self):
if self.CLIENT.remote_ip is not None:
return self.CLIENT.get_remote_os_version()
@private
def send_file(self, token, src, dst, options=None):
self.CLIENT.send_file(token, src, dst, options)
@private
async def ensure_remote_client(self):
if self.CLIENT.remote_ip is not None:
return
try:
self.CLIENT.remote_ip = await self.middleware.call('failover.remote_ip')
self.CLIENT.middleware = self.middleware
start_daemon_thread(target=self.CLIENT.run)
except CallError:
pass
@private
def remote_connected(self):
return self.CLIENT.is_connected()
@private
def remote_subscribe(self, name, callback):
self.CLIENT.subscribe(name, callback)
@private
def remote_on_connect(self, callback):
self.CLIENT.register_connect(callback)
@private
def remote_on_disconnect(self, callback):
self.CLIENT.register_disconnect(callback)
async def setup(middleware):
if await middleware.call('failover.licensed'):
await middleware.call('failover.ensure_remote_client')
| 13,016 | Python | .py | 301 | 32.093023 | 112 | 0.595484 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,713 | reboot.py | truenas_middleware/src/middlewared/middlewared/plugins/failover_/reboot.py | # Copyright (c) - iXsystems Inc.
#
# Licensed under the terms of the TrueNAS Enterprise License Agreement
# See the file LICENSE.IX for complete terms and conditions
import asyncio
from dataclasses import asdict, dataclass
import errno
import time
from middlewared.api import api_method
from middlewared.api.current import (
FailoverRebootInfoArgs, FailoverRebootInfoResult,
FailoverRebootOtherNodeArgs, FailoverRebootOtherNodeResult,
)
from middlewared.service import CallError, job, private, Service
@dataclass
class RemoteRebootReason:
# Boot ID for which the reboot is required. `None` means that the system must be rebooted when it comes online.
boot_id: str | None
reason: str
class FailoverRebootService(Service):
class Config:
cli_namespace = 'system.failover.reboot'
namespace = 'failover.reboot'
remote_reboot_reasons_key: str
remote_reboot_reasons: dict[str, RemoteRebootReason]
@private
async def add_remote_reason(self, code: str, reason: str):
"""
Adds a reason for why the remote system needs a reboot.
This will be appended to the list of the reasons that the remote node itself returns.
:param code: unique identifier for the reason.
:param reason: text explanation for the reason.
"""
try:
boot_id = await self.middleware.call('failover.call_remote', 'system.boot_id', [], {
'raise_connect_error': False,
'timeout': 2,
'connect_timeout': 2,
})
except Exception as e:
if not (isinstance(e, CallError) and e.errno == CallError.ENOMETHOD):
self.logger.warning('Unexpected error querying remote reboot boot id', exc_info=True)
# Remote system is inaccessible, so, when it comes back, another reboot will be required.
boot_id = None
self.remote_reboot_reasons[code] = RemoteRebootReason(boot_id, reason)
await self.persist_remote_reboot_reasons()
await self.send_event()
@api_method(FailoverRebootInfoArgs, FailoverRebootInfoResult, roles=['FAILOVER_READ'])
async def info(self):
changed = False
try:
other_node = await self.middleware.call('failover.call_remote', 'system.reboot.info', [], {
'raise_connect_error': False,
'timeout': 2,
'connect_timeout': 2,
})
except Exception as e:
if not (isinstance(e, CallError) and e.errno == CallError.ENOMETHOD):
self.logger.warning('Unexpected error querying remote reboot info', exc_info=True)
other_node = None
if other_node is not None:
for remote_reboot_reason_code, remote_reboot_reason in list(self.remote_reboot_reasons.items()):
if remote_reboot_reason.boot_id is None:
# This reboot reason was added while the remote node was not functional.
# In that case, when the remote system comes online, an additional reboot is required.
remote_reboot_reason.boot_id = other_node['boot_id']
changed = True
if remote_reboot_reason.boot_id == other_node['boot_id']:
other_node['reboot_required_reasons'].append({
'code': remote_reboot_reason_code,
'reason': remote_reboot_reason.reason,
})
else:
# The system was rebooted, this reason is not valid anymore
self.remote_reboot_reasons.pop(remote_reboot_reason_code)
changed = True
info = {
'this_node': await self.middleware.call('system.reboot.info'),
'other_node': other_node,
}
if changed:
await self.persist_remote_reboot_reasons()
await self.send_event(info)
return info
@api_method(FailoverRebootOtherNodeArgs, FailoverRebootOtherNodeResult, roles=['FULL_ADMIN'])
@job(lock='reboot_standby')
async def other_node(self, job):
"""
Reboot the other node and wait for it to come back online.
NOTE: This makes very few checks on HA systems. You need to
know what you're doing before calling this.
"""
if not await self.middleware.call('failover.licensed'):
return
remote_boot_id = await self.middleware.call('failover.call_remote', 'system.boot_id')
job.set_progress(5, 'Rebooting other controller')
await self.middleware.call(
'failover.call_remote', 'failover.become_passive', [], {'raise_connect_error': False, 'timeout': 20}
)
job.set_progress(30, 'Waiting on the other controller to go offline')
try:
retry_time = time.monotonic()
timeout = 90 # seconds
while time.monotonic() - retry_time < timeout:
await self.middleware.call('failover.call_remote', 'core.ping', [], {'timeout': 5})
await asyncio.sleep(5)
except CallError:
pass
else:
raise CallError(
f'Timed out after {timeout}seconds waiting for the other controller to come back online',
errno.ETIMEDOUT
)
job.set_progress(60, 'Waiting for the other controller to come back online')
if not await self.middleware.call('failover.upgrade_waitstandby'):
# FIXME: `upgrade_waitstandby` is a really poor name for a method that
# just waits on the other controller to come back online and be ready
raise CallError('Timed out waiting for the other controller to come online', errno.ETIMEDOUT)
# We captured the boot_id of the standby controller before we rebooted it
# This variable represents a 1-time unique boot id. It's supposed to be different
# every time the system boots up. If this check is True, then it's safe to say
# that the remote system never rebooted
if remote_boot_id == await self.middleware.call('failover.call_remote', 'system.boot_id'):
raise CallError('Other controller failed to reboot')
job.set_progress(100, 'Other controller rebooted successfully')
return True
@private
async def send_event(self, info=None):
if info is None:
info = await self.info()
self.middleware.send_event('failover.reboot.info', 'CHANGED', id=None, fields=info)
@private
async def load_remote_reboot_reasons(self):
self.remote_reboot_reasons_key = f'remote_reboot_reasons_{await self.middleware.call("failover.node")}'
self.remote_reboot_reasons = {
k: RemoteRebootReason(**v)
for k, v in (await self.middleware.call('keyvalue.get', self.remote_reboot_reasons_key, {})).items()
}
@private
async def persist_remote_reboot_reasons(self):
await self.middleware.call('keyvalue.set', self.remote_reboot_reasons_key, {
k: asdict(v)
for k, v in self.remote_reboot_reasons.items()
})
async def reboot_info(middleware, *args, **kwargs):
await middleware.call('failover.reboot.send_event')
def remote_reboot_info(middleware, *args, **kwargs):
asyncio.run_coroutine_threadsafe(middleware.call('failover.reboot.send_event'), loop=middleware.loop)
async def setup(middleware):
await middleware.call('failover.reboot.load_remote_reboot_reasons')
middleware.event_register('failover.reboot.info', 'Sent when a system reboot is required.', roles=['FAILOVER_READ'])
middleware.event_subscribe('system.reboot.info', remote_reboot_info)
await middleware.call('failover.remote_on_connect', remote_reboot_info)
await middleware.call('failover.remote_subscribe', 'system.reboot.info', remote_reboot_info)
| 7,944 | Python | .py | 154 | 41.467532 | 120 | 0.650323 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,714 | vrrp.py | truenas_middleware/src/middlewared/middlewared/plugins/failover_/vrrp.py | # Copyright (c) - iXsystems Inc.
#
# Licensed under the terms of the TrueNAS Enterprise License Agreement
# See the file LICENSE.IX for complete terms and conditions
from middlewared.service import Service
MASTER_PRIO = 254
BACKUP_PRIO = 200
class FailoverVrrpService(Service):
class Config:
private = True
cli_private = True
namespace = 'failover.vrrp'
def get_priority(self):
"""Return the VRRP priority value that should be set
based on whether or not this controller is the MASTER
or BACKUP system"""
if self.middleware.call_sync('failover.status') == 'MASTER':
return MASTER_PRIO
master_event = self.middleware.call_sync('core.get_jobs', [
('method', '=', 'failover.events.vrrp_master'),
('state', '=', 'RUNNING'),
])
fenced = self.middleware.call_sync('failover.fenced.run_info')
if master_event and fenced['running']:
# a master event is taking place and it started fenced
return MASTER_PRIO
return BACKUP_PRIO
| 1,089 | Python | .py | 27 | 33 | 70 | 0.65782 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,715 | virtual_ips.py | truenas_middleware/src/middlewared/middlewared/plugins/failover_/virtual_ips.py | # Copyright (c) - iXsystems Inc.
#
# Licensed under the terms of the TrueNAS Enterprise License Agreement
# See the file LICENSE.IX for complete terms and conditions
from middlewared.service import Service
class DetectVirtualIpStates(Service):
class Config:
private = True
namespace = 'failover.vip'
async def check_failover_group(self, ifname, groups):
"""
Check the other members (if any) in failover group for `ifname`
"""
failover_grp_ifaces = list()
for grp, names in groups.items():
if ifname in names:
# get the list of interfaces that are in the same
# failover group as `ifname`.
failover_grp_ifaces = names[:]
# we remove `ifname` since we only care about the other
# interfaces in this failover group
failover_grp_ifaces.remove(ifname)
# An interface can only ever be in a single failover
# group so we can break the loop early here
break
masters, backups, offline = list(), list(), list()
filters = [['id', 'in', failover_grp_ifaces]]
for i in await self.middleware.call('interface.query', filters):
if i['state'].get('link_state') != 'LINK_STATE_UP':
# It's not common, but some users will configure interfaces
# for failover but they won't be online. In this scenario
# the interfaces will appear as "backup", but that's
# misleading since they technically are backup they're not
# actually participating in any VRRP negotiations. In this
# instance, we'll mark them as offline
offline.append(i['id'])
continue
# We're checking any other interface that is in the same
# failover group as `ifname`. For example, customers often
# configure multiple physical interfaces for iSCSI MPIO.
# Since they are using MPIO, each interface serves as a
# discreet path to their data. However, if 1 of the 4
# interfaces go down then we don't need to failover since
# 3 other paths are up (That's the point of MPIO). In this
# scenario, the customer will have to put all 4 of the
# physical interfaces in the _same_ failover group.
for vrrp_info in (i['state'].get('vrrp_config') or []):
# `vrrp_config` can be NoneType when a bond interface
# has been configured that has no config on it. The
# reason why a bond will have no config is when it's
# used as a parent interface to host vlan interfaces.
# In this scenario, vrrp_config is expected to be None.
if vrrp_info['state'] == 'MASTER':
masters.append(i['id'])
else:
backups.append(i['id'])
return masters, backups, offline
async def get_states(self, interfaces=None):
masters, backups, inits = [], [], []
if interfaces is None:
interfaces = await self.middleware.call('interface.query')
int_ifaces = await self.middleware.call('interface.internal_interfaces')
for i in filter(lambda x: x['name'] not in int_ifaces and x['state']['vrrp_config'], interfaces):
if i['state']['link_state'] == 'LINK_STATE_UP':
vrrp_state = i['state']['vrrp_config'][0]['state']
if vrrp_state == 'MASTER':
masters.append(i['name'])
elif vrrp_state == 'BACKUP':
backups.append(i['name'])
return masters, backups, inits
async def check_states(self, local, remote):
errors = []
interfaces = set(local[0] + local[1] + remote[0] + remote[1])
if not interfaces:
errors.append('There are no failover interfaces')
for name in interfaces:
if name in local[1] and name in remote[1]:
errors.append(f'Interface "{name}" is BACKUP on both nodes')
if name in local[0] and name in remote[0]:
errors.append(f'Interface "{name}" is MASTER on both nodes')
return errors
| 4,335 | Python | .py | 81 | 40.802469 | 105 | 0.588388 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,716 | chflags.py | truenas_middleware/src/middlewared/middlewared/plugins/filesystem_/chflags.py | import fcntl
import os
import struct
from middlewared.service import CallError
F_IOC_GETFLAGS = 0x80086601
F_IOC_SETFLAGS = 0x40086602
IMMUTABLE_FL = 16
def is_immutable_set(path: str) -> bool:
flags = get_flags(path)
return bool(flags & IMMUTABLE_FL)
def get_flags(path: str) -> int:
fd = os.open(path, os.O_RDONLY)
try:
return get_flags_impl(path, fd)
finally:
os.close(fd)
def get_flags_impl(path: str, fd: int) -> int:
fl = struct.unpack('i', fcntl.ioctl(fd, F_IOC_GETFLAGS, struct.pack('i', 0)))
if not fl:
raise CallError(f'Unable to retrieve attribute of {path!r} path')
return fl[0]
def set_immutable(path: str, set_flag: bool) -> None:
fd = os.open(path, os.O_RDONLY)
try:
set_immutable_impl(fd, path, set_flag)
finally:
os.close(fd)
def set_immutable_impl(fd: int, path: str, set_flag: bool) -> None:
existing_flags = get_flags_impl(path, fd)
new_flags = existing_flags | IMMUTABLE_FL if set_flag else existing_flags & ~IMMUTABLE_FL
fcntl.ioctl(fd, F_IOC_SETFLAGS, struct.pack('i', new_flags))
if new_flags != get_flags_impl(path, fd):
raise CallError(f'Unable to {"set" if set_flag else "unset"} immutable flag at {path!r}')
| 1,257 | Python | .py | 33 | 33.424242 | 97 | 0.673823 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,717 | perm_check.py | truenas_middleware/src/middlewared/middlewared/plugins/filesystem_/perm_check.py | import errno
import os
import pathlib
from middlewared.schema import accepts, Bool, Dict, returns, Str
from middlewared.service import CallError, Service, private
from middlewared.utils.nss import pwd, grp
from middlewared.utils.user_context import run_with_user_context, set_user_context
# This should be a sufficiently high UID to never be used explicitly
# We need one for doing access checks based on groups
SYNTHETIC_UID = 2 ** 32 -2
def check_access(path: str, check_perms: dict) -> bool:
flag = True
for perm, check_flag in filter(
lambda v: v[0] is not None, (
(check_perms['read'], os.R_OK),
(check_perms['write'], os.W_OK),
(check_perms['execute'], os.X_OK),
)
):
perm_check = os.access(path, check_flag)
flag &= (perm_check if perm else not perm_check)
return flag
def get_user_details(id_type: str, xid: int) -> dict:
if id_type not in ['USER', 'GROUP']:
raise CallError(f'{id_type}: invalid ID type. Must be "USER" or "GROUP"')
if id_type == 'USER':
try:
u = pwd.getpwuid(xid)
out = {
'pw_name': u.pw_name,
'pw_uid': u.pw_uid,
'pw_gid': u.pw_gid,
'pw_gecos': u.pw_gecos,
'pw_dir': u.pw_dir,
'pw_shell': u.pw_shell,
}
out['grouplist'] = os.getgrouplist(u.pw_name, u.pw_gid)
out['id_name'] = out['pw_name']
return out
except KeyError:
return None
try:
g = grp.getgrgid(xid)
grp_obj = {
'gr_name': g.gr_name,
'gr_gid': g.gr_gid,
'gr_mem': g.gr_mem
}
except KeyError:
return None
return {
'pw_name': 'synthetic_user',
'pw_uid': SYNTHETIC_UID,
'pw_gid': grp_obj['gr_gid'],
'pw_gecos': 'synthetic user',
'pw_dir': '/var/empty',
'pw_shell': '/usr/bin/zsh',
'grouplist': [grp_obj['gr_gid']],
'id_name': grp_obj['gr_name']
}
def check_acl_execute_impl(path: str, acl: list, uid: int, gid: int, path_must_exist: bool):
"""
WARNING: The only way this method should be called is within context of `run_with_user_context`
"""
parts = pathlib.Path(path).parts
for entry in acl:
if entry['tag'] in ('everyone@', 'OTHER', 'MASK'):
continue
if entry.get('type', 'ALLOW') != 'ALLOW':
continue
id_info = {'id_type': None, 'xid': entry['id']}
if entry['tag'] == 'GROUP':
id_info['id_type'] = 'GROUP'
elif entry['tag'] == 'USER':
id_info['id_type'] = 'USER'
elif entry['tag'] in ('owner@', 'USER_OBJ'):
id_info['id_type'] = 'USER'
id_info['xid'] = uid
elif entry['tag'] in ('group@', 'GROUP_OBJ'):
id_info['id_type'] = 'USER'
id_info['xid'] = gid
if (user_details := get_user_details(**id_info)) is None:
# Account does not exist on server. Skip validation
continue
id_type = id_info['id_type']
for idx, part in enumerate(parts):
if idx < 2:
continue
path_to_check = f'/{"/".join(parts[1:idx])}'
if not os.path.exists(path_to_check):
if path_must_exist:
raise CallError(f'{path_to_check}: path component does not exist.', errno.ENOENT)
continue
set_user_context(user_details)
if not check_access(path_to_check, {'read': None, 'write': None, 'execute': True}):
raise CallError(
f'Filesystem permissions on path {path_to_check} prevent access for '
f'{id_type.lower()} "{user_details["id_name"]}" to the path {path}. '
f'This may be fixed by granting the aforementioned {id_type.lower()} '
f'execute permissions on the path: {path_to_check}.', errno.EPERM
)
class FilesystemService(Service):
@private
def generate_user_details(self, id_type, xid):
if id_type not in ['USER', 'GROUP']:
raise CallError(f'{id_type}: invalid ID type. Must be "USER" or "GROUP"')
if id_type == 'USER':
try:
out = self.middleware.call_sync(
'user.get_user_obj',
{'uid': xid, 'get_groups': True}
)
out['id_name'] = out['pw_name']
return out
except KeyError:
return None
try:
grp = self.middleware.call_sync('group.get_group_obj', {'gid': xid})
except KeyError:
return None
# get a UID not currently in use
tmp_uid = self.middleware.call_sync('user.get_next_uid')
try:
res = self.middleware.call_sync(
'user.get_user_obj',
{'uid': tmp_uid}
)
self.logger.warning(
'%s: user exists on system but not in TrueNAS configuration. '
'This may indicate that it was created manually from shell '
'or there is an unexpected overlap between local and directory '
'services user accounts', res['pw_name']
)
# daemon user probably should not have access to user data
# so we'll use this for testing
uid = 1
except KeyError:
uid = tmp_uid
return {
'pw_name': 'synthetic_user',
'pw_uid': uid,
'pw_gid': grp['gr_gid'],
'pw_gecos': 'synthetic user',
'pw_dir': '/var/empty',
'pw_shell': '/usr/bin/zsh',
'grouplist': [grp['gr_gid']],
'id_name': grp['gr_name']
}
@private
def check_as_user_impl(self, user_details, path, perms):
return run_with_user_context(check_access, user_details, [path, perms])
@accepts(
Str('username', empty=False),
Str('path', empty=False),
Dict(
'permissions',
Bool('read', default=None, null=True),
Bool('write', default=None, null=True),
Bool('execute', default=None, null=True),
)
)
@returns(Bool())
def can_access_as_user(self, username, path, perms):
"""
Check if `username` is able to access `path` with specific `permissions`. At least one of `read/write/execute`
permission must be specified for checking with each of these defaulting to `null`. `null` for
`read/write/execute` represents that the permission should not be checked.
"""
path_obj = pathlib.Path(path)
if not path_obj.is_absolute():
raise CallError('A valid absolute path must be provided', errno.EINVAL)
elif not path_obj.exists():
raise CallError(f'{path!r} does not exist', errno.EINVAL)
if all(v is None for v in perms.values()):
raise CallError('At least one of read/write/execute flags must be set', errno.EINVAL)
try:
user_details = self.middleware.call_sync('user.get_user_obj', {'username': username, 'get_groups': True})
except KeyError:
raise CallError(f'{username!r} user does not exist', errno=errno.ENOENT)
return self.check_as_user_impl(user_details, path, perms)
@private
def check_path_execute(self, path, id_type, xid, path_must_exist):
user_details = self.generate_user_details(id_type, xid)
if user_details is None:
# User or group does not exist on server.
# This can happen for a variety of reasons that are potentially
# acceptable (or better than alternative of changing permissions).
# Hence, skip validation.
self.logger.trace('%s %d does not exist. Skipping validation',
id_type.lower(), xid)
return
parts = pathlib.Path(path).parts
for idx, part in enumerate(parts):
if idx < 2:
continue
path_to_check = f'/{"/".join(parts[1:idx])}'
if not os.path.exists(path_to_check):
if path_must_exist:
raise CallError(f'{path_to_check}: path component does not exist.', errno.ENOENT)
continue
ok = self.check_as_user_impl(user_details, path_to_check, {'read': None, 'write': None, 'execute': True})
if not ok:
raise CallError(
f'Filesystem permissions on path {path_to_check} prevent access for '
f'{id_type.lower()} "{user_details["id_name"]}" to the path {path}. '
f'This may be fixed by granting the aforementioned {id_type.lower()} '
f'execute permissions on the path: {path_to_check}.', errno.EPERM
)
@private
def check_acl_execute(self, path, acl, uid, gid, path_must_exist=False):
run_with_user_context(check_acl_execute_impl, {
'pw_uid': 0, 'pw_gid': 0, 'pw_dir': '/var/empty', 'pw_name': 'root', 'grouplist': [0, 544]
}, [path, acl, uid, gid, path_must_exist])
| 9,323 | Python | .py | 213 | 32.483568 | 118 | 0.550469 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,718 | acl_template.py | truenas_middleware/src/middlewared/middlewared/plugins/filesystem_/acl_template.py | from middlewared.service import CallError, CRUDService, ValidationErrors
from middlewared.service import accepts, private, returns
from middlewared.schema import Bool, Dict, Int, List, Str, Ref, Patch, OROperator
from middlewared.plugins.smb import SMBBuiltin
from middlewared.utils.directoryservices.constants import DSStatus, DSType
from .utils import ACLType
import middlewared.sqlalchemy as sa
import errno
import os
import copy
class ACLTempateModel(sa.Model):
__tablename__ = 'filesystem_acltemplate'
id = sa.Column(sa.Integer(), primary_key=True)
acltemplate_name = sa.Column(sa.String(120), unique=True)
acltemplate_comment = sa.Column(sa.Text())
acltemplate_acltype = sa.Column(sa.String(255))
acltemplate_acl = sa.Column(sa.JSON(list))
acltemplate_builtin = sa.Column(sa.Boolean())
class ACLTemplateService(CRUDService):
class Config:
datastore = 'filesystem.acltemplate'
datastore_prefix = 'acltemplate_'
namespace = 'filesystem.acltemplate'
cli_private = True
ENTRY = Patch(
'acltemplate_create', 'acltemplate_entry',
('add', Int('id')),
('add', Bool('builtin')),
)
@private
async def validate_acl(self, data, schema, verrors):
acltype = ACLType[data['acltype']]
aclcheck = acltype.validate({'dacl': data['acl']})
if not aclcheck['is_valid']:
for err in aclcheck['errors']:
if err[2]:
v = f'{schema}.{err[0]}.{err[2]}'
else:
v = f'{schema}.{err[0]}'
verrors.add(v, err[1])
if acltype is ACLType.POSIX1E:
await self.middleware.call(
"filesystem.gen_aclstring_posix1e",
copy.deepcopy(data["acl"]), False, verrors
)
for idx, ace in enumerate(data['acl']):
if ace.get('id') is None:
verrors.add(f'{schema}.{idx}.id', 'null id is not permitted.')
@accepts(Dict(
"acltemplate_create",
Str("name", required=True),
Str("acltype", required=True, enum=["NFS4", "POSIX1E"]),
Str("comment"),
OROperator(Ref('nfs4_acl'), Ref('posix1e_acl'), name='acl', required=True),
register=True
), roles=['FILESYSTEM_ATTRS_WRITE'])
async def do_create(self, data):
"""
Create a new filesystem ACL template.
"""
verrors = ValidationErrors()
if len(data['acl']) == 0:
verrors.add(
"filesystem_acltemplate_create.acl",
"At least one ACL entry must be specified."
)
await self.validate_acl(data, "filesystem_acltemplate_create.acl", verrors)
verrors.check()
data['builtin'] = False
data['id'] = await self.middleware.call(
'datastore.insert',
self._config.datastore,
data,
{'prefix': self._config.datastore_prefix}
)
return await self.get_instance(data['id'])
@accepts(
Int('id'),
Patch(
'acltemplate_create',
'acltemplate_update',
('attr', {'update': True})
),
roles=['FILESYSTEM_ATTRS_WRITE']
)
async def do_update(self, id_, data):
"""
update filesystem ACL template with `id`.
"""
old = await self.get_instance(id_)
new = old.copy()
new.update(data)
verrors = ValidationErrors()
if old['builtin']:
verrors.add("filesystem_acltemplate_update.builtin",
"built-in ACL templates may not be changed")
if new['name'] != old['name']:
name_exists = bool(await self.query([('name', '=', new['name'])]))
if name_exists:
verrors.add("filesystem_acltemplate_update.name",
f"{data['name']}: name is not unique")
if len(new['acl']) == 0:
verrors.add(
"filesystem_acltemplate_update.acl",
"At least one ACL entry must be specified."
)
await self.validate_acl(new, "filesystem_acltemplate_update.acl", verrors)
verrors.check()
await self.middleware.call(
'datastore.update',
self._config.datastore,
id_,
new,
{'prefix': self._config.datastore_prefix}
)
return await self.get_instance(id_)
@accepts(Int('id'))
async def do_delete(self, id_):
entry = await self.get_instance(id_)
if entry['builtin']:
raise CallError("Deletion of builtin templates is not permitted",
errno.EPERM)
return await self.middleware.call(
'datastore.delete', self._config.datastore, id_
)
@private
async def append_builtins_internal(self, ids, data):
"""
This method ensures that ACL grants some minimum level of permissions
to our builtin users or builtin admins accounts.
"""
bu_id, ba_id = ids
has_bu = bool([x['id'] for x in data['acl'] if x['id'] == bu_id])
has_ba = bool([x['id'] for x in data['acl'] if x['id'] == ba_id])
if (bu_id != -1 and has_bu) or (ba_id != -1 and has_ba):
return
if data['acltype'] == ACLType.NFS4.name:
if bu_id != -1:
data['acl'].append(
{"tag": "GROUP", "id": bu_id, "perms": {"BASIC": "MODIFY"}, "flags": {"BASIC": "INHERIT"}, "type": "ALLOW"},
)
if ba_id != -1:
data['acl'].append(
{"tag": "GROUP", "id": ba_id, "perms": {"BASIC": "FULL_CONTROL"}, "flags": {"BASIC": "INHERIT"}, "type": "ALLOW"},
)
return
has_default_mask = any(filter(lambda x: x["tag"] == "MASK" and x["default"], data['acl']))
has_access_mask = any(filter(lambda x: x["tag"] == "MASK" and x["default"], data['acl']))
all_perms = {"READ": True, "WRITE": True, "EXECUTE": True}
if bu_id != -1:
data['acl'].extend([
{"tag": "GROUP", "id": bu_id, "perms": all_perms, "default": False},
{"tag": "GROUP", "id": bu_id, "perms": all_perms, "default": True},
])
if ba_id != -1:
data['acl'].extend([
{"tag": "GROUP", "id": ba_id, "perms": all_perms, "default": False},
{"tag": "GROUP", "id": ba_id, "perms": all_perms, "default": True},
])
if not has_default_mask:
data['acl'].append({"tag": "MASK", "id": -1, "perms": all_perms, "default": False})
if not has_access_mask:
data['acl'].append({"tag": "MASK", "id": -1, "perms": all_perms, "default": True})
return
@private
async def append_builtins(self, data):
bu_id = int(SMBBuiltin.USERS.value[1][9:])
ba_id = int(SMBBuiltin.ADMINISTRATORS.value[1][9:])
await self.append_builtins_internal((bu_id, ba_id), data)
ds = await self.middleware.call('directoryservices.status')
if ds['type'] != DSType.AD.value or ds['status'] != DSStatus.HEALTHY.name:
return
domain_info = await self.middleware.call('idmap.domain_info', 'DS_TYPE_ACTIVEDIRECTORY')
if 'ACTIVE_DIRECTORY' not in domain_info['domain_flags']['parsed']:
self.logger.warning(
'%s: domain is not identified properly as an Active Directory domain.',
domain_info['dns_name']
)
return
# If user has explicitly chosen to not include local builtin_users, don't add domain variant
domain_users_sid = domain_info['sid'] + '-513'
domain_admins_sid = domain_info['sid'] + '-512'
idmaps = await self.middleware.call('idmap.convert_sids', [
domain_users_sid, domain_admins_sid
])
has_bu = bool([x['id'] for x in data['acl'] if x['id'] == bu_id])
if has_bu:
du = idmaps['mapped'].get(domain_users_sid)
else:
du = {'id': -1}
da = idmaps['mapped'].get(domain_admins_sid)
if du is None:
self.logger.warning(
"Failed to resolve the Domain Users group to a Unix ID. This most likely "
"indicates a misconfiguration of idmap for the active directory domain. If "
"The idmap backend is AD, further configuration may be required to manually "
"assign a GID to the domain users group."
)
du = {'id': -1}
if da is None:
self.logger.warning(
"Failed to resolve the Domain Users group to a Unix ID. This most likely "
"indicates a misconfiguration of idmap for the active directory domain. If "
"The idmap backend is AD, further configuration may be required to manually "
"assign a GID to the domain users group."
)
da = {'id': -1}
await self.append_builtins_internal((du['id'], da['id']), data)
@private
async def resolve_names(self, uid, gid, data):
for ace in data['acl']:
if ace['id'] not in (-1, None):
ace['who'] = await self.middleware.call(
'idmap.id_to_name', ace['id'], ace['tag']
)
elif ace['tag'] in ('group@', 'GROUP_OBJ'):
ace['who'] = await self.middleware.call(
'idmap.id_to_name', gid, 'GROUP'
)
elif ace['tag'] in ('owner@', 'USER_OBJ'):
ace['who'] = await self.middleware.call(
'idmap.id_to_name', uid, 'USER'
)
else:
ace['who'] = None
return
@accepts(Dict(
"acltemplate_by_path",
Str("path", default=""),
Ref('query-filters'),
Ref('query-options'),
Dict(
"format-options",
Bool("canonicalize", default=False),
Bool("ensure_builtins", default=False),
Bool("resolve_names", default=False),
),
), roles=['FILESYSTEM_ATTRS_READ'])
@returns(List(
'templates',
items=[Ref('acltemplate_entry')]
))
async def by_path(self, data):
"""
Retrieve list of available ACL templates for a given `path`.
Supports `query-filters` and `query-options`.
`format-options` gives additional options to alter the results of
the template query:
`canonicalize` - place ACL entries for NFSv4 ACLs in Microsoft canonical order.
`ensure_builtins` - ensure all results contain entries for `builtin_users` and `builtin_administrators`
groups.
`resolve_names` - convert ids in ACL entries into names.
"""
verrors = ValidationErrors()
filters = data.get('query-filters')
if data['path']:
acltype = await self.middleware.call(
'filesystem.path_get_acltype', data['path']
)
if acltype == ACLType.DISABLED.name:
return []
if acltype == ACLType.POSIX1E.name and data['format-options']['canonicalize']:
verrors.add(
"filesystem.acltemplate_by_path.format-options.canonicalize",
"POSIX1E ACLs may not be sorted into Windows canonical order."
)
filters.append(("acltype", "=", acltype))
if not data['path'] and data['format-options']['resolve_names']:
verrors.add(
"filesystem.acltemplate_by_path.format-options.resolve_names",
"ACL entry ids may not be resolved into names unless path is provided."
)
verrors.check()
templates = await self.query(filters, data['query-options'])
for t in templates:
if data['format-options']['ensure_builtins']:
await self.append_builtins(t)
if data['format-options']['resolve_names']:
st = await self.middleware.run_in_thread(os.stat, data['path'])
await self.resolve_names(st.st_uid, st.st_gid, t)
if data['format-options']['canonicalize'] and t['acltype'] == ACLType.NFS4.name:
canonicalized = ACLType[t['acltype']].canonicalize(t['acl'])
t['acl'] = canonicalized
return templates
| 12,524 | Python | .py | 284 | 32.873239 | 134 | 0.555938 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,719 | utils.py | truenas_middleware/src/middlewared/middlewared/plugins/filesystem_/utils.py | import enum
class ACLType(enum.Enum):
NFS4 = (['tag', 'id', 'perms', 'flags', 'type'], ["owner@", "group@", "everyone@"])
POSIX1E = (['default', 'tag', 'id', 'perms'], ["USER_OBJ", "GROUP_OBJ", "OTHER", "MASK"])
DISABLED = ([], [])
def _validate_id(self, id_, special):
if id_ is None or id_ < 0:
return True if special else False
return False if special else True
def _validate_entry(self, idx, entry, errors):
is_special = entry['tag'] in self.value[1]
if is_special and entry.get('type') == 'DENY':
errors.append((
idx,
f'{entry["tag"]}: DENY entries for this principal are not permitted.',
'tag'
))
if not self._validate_id(entry['id'], is_special):
errors.append(
(idx, 'ACL entry has invalid id for tag type.', 'id')
)
def validate(self, theacl):
errors = []
ace_keys = self.value[0]
if self != ACLType.NFS4 and theacl.get('nfs41flags'):
errors.append(f"NFS41 ACL flags are not valid for ACLType [{self.name}]")
for idx, entry in enumerate(theacl['dacl']):
extra = set(entry.keys()) - set(ace_keys)
missing = set(ace_keys) - set(entry.keys())
if extra:
errors.append(
(idx, f"ACL entry contains invalid extra key(s): {extra}", None)
)
if missing:
errors.append(
(idx, f"ACL entry is missing required keys(s): {missing}", None)
)
if extra or missing:
continue
self._validate_entry(idx, entry, errors)
return {"is_valid": len(errors) == 0, "errors": errors}
def _is_inherited(self, ace):
if ace['flags'].get("BASIC"):
return False
return ace['flags'].get('INHERITED', False)
def canonicalize(self, theacl):
"""
Order NFS4 ACEs according to MS guidelines:
1) Deny ACEs that apply to the object itself (NOINHERIT)
2) Allow ACEs that apply to the object itself (NOINHERIT)
3) Deny ACEs that apply to a subobject of the object (INHERIT)
4) Allow ACEs that apply to a subobject of the object (INHERIT)
See http://docs.microsoft.com/en-us/windows/desktop/secauthz/order-of-aces-in-a-dacl
Logic is simplified here because we do not determine depth from which ACLs are inherited.
"""
if self == ACLType.POSIX1E:
return
out = []
acl_groups = {
"deny_noinherit": [],
"deny_inherit": [],
"allow_noinherit": [],
"allow_inherit": [],
}
for ace in theacl:
key = f'{ace.get("type", "ALLOW").lower()}_{"inherit" if self._is_inherited(ace) else "noinherit"}'
acl_groups[key].append(ace)
for g in acl_groups.values():
out.extend(g)
return out
def xattr_names():
return set([
"system.posix_acl_access",
"system.posix_acl_default",
"system.nfs4_acl_xdr"
])
def __calculate_inherited_posix1e(self, theacl, isdir):
inherited = []
for entry in theacl['acl']:
if entry['default'] is False:
continue
# add access entry
inherited.append(entry.copy() | {'default': False})
if isdir:
# add default entry
inherited.append(entry)
return inherited
def __calculate_inherited_nfs4(self, theacl, isdir):
inherited = []
for entry in theacl['acl']:
if not (flags := entry.get('flags', {}).copy()):
continue
if (basic := flags.get('BASIC')) == 'NOINHERIT':
continue
elif basic == 'INHERIT':
flags['INHERITED'] = True
inherited.append(entry)
continue
elif not flags.get('FILE_INHERIT', False) and not flags.get('DIRECTORY_INHERIT', False):
# Entry has no inherit flags
continue
elif not isdir and not flags.get('FILE_INHERIT'):
# File and this entry doesn't inherit on files
continue
if isdir:
if not flags.get('DIRECTORY_INHERIT', False):
if flags['NO_PROPAGATE_INHERIT']:
# doesn't apply to this dir and shouldn't apply to contents.
continue
# This is a directory ACL and we have entry that only applies to files.
flags['INHERIT_ONLY'] = True
elif flags.get('INHERIT_ONLY', False):
flags['INHERIT_ONLY'] = False
elif flags.get('NO_PROPAGATE_INHERIT'):
flags['DIRECTORY_INHERIT'] = False
flags['FILE_INHERIT'] = False
flags['NO_PROPAGATE_INHERIT'] = False
else:
flags['DIRECTORY_INHERIT'] = False
flags['FILE_INHERIT'] = False
flags['NO_PROPAGATE_INHERIT'] = False
flags['INHERIT_ONLY'] = False
inherited.append({
'tag': entry['tag'],
'id': entry['id'],
'type': entry['type'],
'perms': entry['perms'],
'flags': flags | {'INHERITED': True}
})
return inherited
def calculate_inherited(self, theacl, isdir=True):
if self.name != theacl['acltype']:
raise ValueError('ACLType does not match')
if self == ACLType.POSIX1E:
return self.__calculate_inherited_posix1e(theacl, isdir)
elif self == ACLType.NFS4:
return self.__calculate_inherited_nfs4(theacl, isdir)
raise ValueError('ACLType does not support inheritance')
| 6,018 | Python | .py | 138 | 30.724638 | 111 | 0.529945 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,720 | acl.py | truenas_middleware/src/middlewared/middlewared/plugins/filesystem_/acl.py | import errno
import json
import os
import subprocess
import stat as pystat
from pathlib import Path
from middlewared.schema import Bool, Dict, Int, List, Str, Ref, UnixPerm, OROperator
from middlewared.service import accepts, private, returns, job, CallError, ValidationErrors, Service
from middlewared.utils.filesystem.directory import directory_is_empty
from middlewared.utils.path import FSLocation, path_location
from middlewared.validators import Range
from .utils import ACLType
class FilesystemService(Service):
class Config:
cli_private = True
def __acltool(self, path, action, uid, gid, options):
flags = "-r"
flags += "x" if options.get('traverse') else ""
flags += "C" if options.get('do_chmod') else ""
flags += "P" if options.get('posixacl') else ""
acltool = subprocess.run([
'/usr/bin/nfs4xdr_winacl',
'-a', action,
'-O', str(uid), '-G', str(gid),
flags,
'-c', path,
'-p', path], check=False, capture_output=True
)
if acltool.returncode != 0:
raise CallError(f"acltool [{action}] on path {path} failed with error: [{acltool.stderr.decode().strip()}]")
def _common_perm_path_validate(self, schema, data, verrors, pool_mp_ok=False):
loc = path_location(data['path'])
if loc is FSLocation.EXTERNAL:
verrors.add(f'{schema}.path', 'ACL operations on remote server paths are not possible')
return loc
path = data['path']
try:
st = self.middleware.call_sync('filesystem.stat', path)
except CallError as e:
if e.errno == errno.EINVAL:
verrors.add('f{schema}.path', 'Must be an absolute path')
return loc
raise e
if st['type'] == 'FILE' and data['options']['recursive']:
verrors.add(f'{schema}.path', 'Recursive operations on a file are invalid.')
return loc
if st['is_ctldir']:
verrors.add(f'{schema}.path',
'Permissions changes in ZFS control directory (.zfs) are not permitted')
return loc
if any(st['realpath'].startswith(prefix)
for prefix in ('/home/admin/.ssh', '/home/truenas_admin/.ssh', '/root/.ssh')):
return loc
if not st['realpath'].startswith('/mnt/'):
verrors.add(
f'{schema}.path',
"Changes to permissions on paths that are not beneath "
f"the directory /mnt are not permitted: {path}"
)
elif len(Path(st['realpath']).resolve().parents) == 2:
if not pool_mp_ok:
verrors.add(
f'{schema}.path',
f'The specified path is a ZFS pool mountpoint "({path})" '
)
elif self.middleware.call_sync('pool.dataset.path_in_locked_datasets', st['realpath']):
verrors.add(
f'{schema}.path',
'Path component for is currently encrypted and locked'
)
return loc
@private
def path_get_acltype(self, path):
"""
Failure with ENODATA in case acltype is supported, but
acl absent. EOPNOTSUPP means that acltype is not supported.
raises NotImplementedError for EXTERNAL paths
"""
if path_location(path) is FSLocation.EXTERNAL:
raise NotImplementedError
try:
os.getxattr(path, "system.posix_acl_access")
return ACLType.POSIX1E.name
except OSError as e:
if e.errno == errno.ENODATA:
return ACLType.POSIX1E.name
if e.errno != errno.EOPNOTSUPP:
raise
try:
os.getxattr(path, "system.nfs4_acl_xdr")
return ACLType.NFS4.name
except OSError as e:
if e.errno == errno.EOPNOTSUPP:
return ACLType.DISABLED.name
raise
@accepts(
Dict(
'filesystem_ownership',
Str('path', required=True),
Int('uid', null=True, default=None, validators=[Range(min_=-1, max_=2147483647)]),
Int('gid', null=True, default=None, validators=[Range(min_=-1, max_=2147483647)]),
Dict(
'options',
Bool('recursive', default=False),
Bool('traverse', default=False)
)
),
roles=['FILESYSTEM_ATTRS_WRITE'],
audit='Filesystem change owner', audit_extended=lambda data: data['path']
)
@returns()
@job(lock="perm_change")
def chown(self, job, data):
"""
Change owner or group of file at `path`.
`uid` and `gid` specify new owner of the file. If either
key is absent or None, then existing value on the file is not
changed.
`recursive` performs action recursively, but does
not traverse filesystem mount points.
If `traverse` and `recursive` are specified, then the chown
operation will traverse filesystem mount points.
"""
job.set_progress(0, 'Preparing to change owner.')
verrors = ValidationErrors()
uid = -1 if data['uid'] is None else data.get('uid', -1)
gid = -1 if data['gid'] is None else data.get('gid', -1)
options = data['options']
if uid == -1 and gid == -1:
verrors.add("filesystem.chown.uid",
"Please specify either user or group to change.")
self._common_perm_path_validate("filesystem.chown", data, verrors)
verrors.check()
if not options['recursive']:
job.set_progress(100, 'Finished changing owner.')
os.chown(data['path'], uid, gid)
return
job.set_progress(10, f'Recursively changing owner of {data["path"]}.')
options['posixacl'] = True
self.__acltool(data['path'], 'chown', uid, gid, options)
job.set_progress(100, 'Finished changing owner.')
@private
def _strip_acl_nfs4(self, path):
stripacl = subprocess.run(
['nfs4xdr_setfacl', '-b', path],
capture_output=True,
check=False
)
if stripacl.returncode != 0:
raise CallError(f"{path}: Failed to strip ACL on path: {stripacl.stderr.decode()}")
return
@private
def _strip_acl_posix1e(self, path):
posix_xattrs = ['system.posix_acl_access', 'system.posix_acl_default']
for xat in os.listxattr(path):
if xat not in posix_xattrs:
continue
os.removexattr(path, xat)
@accepts(
Dict(
'filesystem_permission',
Str('path', required=True),
UnixPerm('mode', null=True),
Int('uid', null=True, default=None, validators=[Range(min_=-1, max_=2147483647)]),
Int('gid', null=True, default=None, validators=[Range(min_=-1, max_=2147483647)]),
Dict(
'options',
Bool('stripacl', default=False),
Bool('recursive', default=False),
Bool('traverse', default=False),
)
),
roles=['FILESYSTEM_ATTRS_WRITE'],
audit='Filesystem set permission', audit_extended=lambda data: data['path']
)
@returns()
@job(lock="perm_change")
def setperm(self, job, data):
"""
Set unix permissions on given `path`.
If `mode` is specified then the mode will be applied to the
path and files and subdirectories depending on which `options` are
selected. Mode should be formatted as string representation of octal
permissions bits.
`uid` the desired UID of the file user. If set to None (the default), then user is not changed.
`gid` the desired GID of the file group. If set to None (the default), then group is not changed.
`stripacl` setperm will fail if an extended ACL is present on `path`,
unless `stripacl` is set to True.
`recursive` remove ACLs recursively, but do not traverse dataset
boundaries.
`traverse` remove ACLs from child datasets.
If no `mode` is set, and `stripacl` is True, then non-trivial ACLs
will be converted to trivial ACLs. An ACL is trivial if it can be
expressed as a file mode without losing any access rules.
"""
job.set_progress(0, 'Preparing to set permissions.')
options = data['options']
mode = data.get('mode', None)
verrors = ValidationErrors()
uid = -1 if data['uid'] is None else data.get('uid', -1)
gid = -1 if data['gid'] is None else data.get('gid', -1)
self._common_perm_path_validate("filesystem.setperm", data, verrors)
current_acl = self.middleware.call_sync('filesystem.getacl', data['path'])
acl_is_trivial = current_acl['trivial']
if not acl_is_trivial and not options['stripacl']:
verrors.add(
'filesystem.setperm.mode',
f'Non-trivial ACL present on [{data["path"]}]. '
'Option "stripacl" required to change permission.',
)
if mode is not None and int(mode, 8) == 0:
verrors.add(
'filesystem.setperm.mode',
'Empty permissions are not permitted.'
)
verrors.check()
is_nfs4acl = current_acl['acltype'] == 'NFS4'
if mode is not None:
mode = int(mode, 8)
if is_nfs4acl:
self._strip_acl_nfs4(data['path'])
else:
self._strip_acl_posix1e(data['path'])
if mode:
os.chmod(data['path'], mode)
os.chown(data['path'], uid, gid)
if not options['recursive']:
job.set_progress(100, 'Finished setting permissions.')
return
action = 'clone' if mode else 'strip'
job.set_progress(10, f'Recursively setting permissions on {data["path"]}.')
options['posixacl'] = not is_nfs4acl
options['do_chmod'] = True
self.__acltool(data['path'], action, uid, gid, options)
job.set_progress(100, 'Finished setting permissions.')
@private
def getacl_nfs4(self, path, simplified, resolve_ids):
flags = "-jn"
if not simplified:
flags += "v"
getacl = subprocess.run(
['nfs4xdr_getfacl', flags, path],
capture_output=True,
check=False
)
if getacl.returncode != 0:
raise CallError(f"Failed to get ACL for path [{path}]: {getacl.stderr.decode()}")
output = json.loads(getacl.stdout.decode())
for ace in output['acl']:
if resolve_ids and ace['id'] != -1:
ace['who'] = self.middleware.call_sync(
'idmap.id_to_name', ace['id'], ace['tag']
)
elif resolve_ids and ace['tag'] == 'group@':
ace['who'] = self.middleware.call_sync(
'idmap.id_to_name', output['gid'], 'GROUP'
)
elif resolve_ids and ace['tag'] == 'owner@':
ace['who'] = self.middleware.call_sync(
'idmap.id_to_name', output['uid'], 'USER'
)
elif resolve_ids:
ace['who'] = None
ace['flags'].pop('SUCCESSFUL_ACCESS', None)
ace['flags'].pop('FAILED_ACCESS', None)
na41flags = output.pop('nfs41_flags')
output['nfs41_flags'] = {
"protected": na41flags['PROTECTED'],
"defaulted": na41flags['DEFAULTED'],
"autoinherit": na41flags['AUTOINHERIT']
}
output['acltype'] = 'NFS4'
return output
@private
def getacl_posix1e(self, path, simplified, resolve_ids):
st = os.stat(path)
ret = {
'uid': st.st_uid,
'gid': st.st_gid,
'acl': [],
'flags': {
'setuid': bool(st.st_mode & pystat.S_ISUID),
'setgid': bool(st.st_mode & pystat.S_ISGID),
'sticky': bool(st.st_mode & pystat.S_ISVTX),
},
'acltype': ACLType.POSIX1E.name
}
ret['uid'] = st.st_uid
ret['gid'] = st.st_gid
gfacl = subprocess.run(['getfacl', '-c', '-n', path],
check=False, capture_output=True)
if gfacl.returncode != 0:
raise CallError(f"Failed to get POSIX1e ACL on path [{path}]: {gfacl.stderr.decode()}")
# linux output adds extra line to output if it's an absolute path and extra newline at end.
entries = gfacl.stdout.decode().splitlines()
entries = entries[:-1]
for entry in entries:
if entry.startswith("#"):
continue
entry = entry.split("\t")[0]
ace = {
"default": False,
"tag": None,
"id": -1,
"perms": {
"READ": False,
"WRITE": False,
"EXECUTE": False,
}
}
tag, id_, perms = entry.rsplit(":", 2)
ace['perms'].update({
"READ": perms[0].casefold() == "r",
"WRITE": perms[1].casefold() == "w",
"EXECUTE": perms[2].casefold() == "x",
})
if tag.startswith('default'):
ace['default'] = True
tag = tag[8:]
ace['tag'] = tag.upper()
if id_.isdigit():
ace['id'] = int(id_)
if resolve_ids:
ace['who'] = self.middleware.call_sync(
'idmap.id_to_name', ace['id'], ace['tag']
)
elif ace['tag'] not in ['OTHER', 'MASK']:
if resolve_ids:
to_check = st.st_gid if ace['tag'] == "GROUP" else st.st_uid
ace['who'] = self.middleware.call_sync(
'idmap.id_to_name', to_check, ace['tag']
)
ace['tag'] += '_OBJ'
elif resolve_ids:
ace['who'] = None
ret['acl'].append(ace)
ret['trivial'] = (len(ret['acl']) == 3)
ret['path'] = path
return ret
@private
def getacl_disabled(self, path):
st = os.stat(path)
return {
'uid': st.st_uid,
'gid': st.st_gid,
'acl': [],
'acltype': ACLType.DISABLED.name,
'trivial': True,
}
@accepts(
Str('path'),
Bool('simplified', default=True),
Bool('resolve_ids', default=False),
roles=['FILESYSTEM_ATTRS_READ']
)
@returns(Dict(
'truenas_acl',
Str('path'),
Bool('trivial'),
Str('acltype', enum=[x.name for x in ACLType], null=True),
OROperator(
Ref('nfs4_acl'),
Ref('posix1e_acl'),
name='acl'
)
))
def getacl(self, path, simplified, resolve_ids):
"""
Return ACL of a given path. This may return a POSIX1e ACL or a NFSv4 ACL. The acl type is indicated
by the `acltype` key.
`simplified` - effect of this depends on ACL type on underlying filesystem. In the case of
NFSv4 ACLs simplified permissions and flags are returned for ACL entries where applicable.
NFSv4 errata below. In the case of POSIX1E ACls, this setting has no impact on returned ACL.
`resolve_ids` - adds additional `who` key to each ACL entry, that converts the numeric id to
a user name or group name. In the case of owner@ and group@ (NFSv4) or USER_OBJ and GROUP_OBJ
(POSIX1E), st_uid or st_gid will be converted from stat() return for file. In the case of
MASK (POSIX1E), OTHER (POSIX1E), everyone@ (NFSv4), key `who` will be included, but set to null.
In case of failure to resolve the id to a name, `who` will be set to null. This option should
only be used if resolving ids to names is required.
Errata about ACLType NFSv4:
`simplified` returns a shortened form of the ACL permset and flags where applicable. If permissions
have been simplified, then the `perms` object will contain only a single `BASIC` key with a string
describing the underlying permissions set.
`TRAVERSE` sufficient rights to traverse a directory, but not read contents.
`READ` sufficient rights to traverse a directory, and read file contents.
`MODIFIY` sufficient rights to traverse, read, write, and modify a file.
`FULL_CONTROL` all permissions.
If the permisssions do not fit within one of the pre-defined simplified permissions types, then
the full ACL entry will be returned.
"""
if path_location(path) is FSLocation.EXTERNAL:
raise CallError(f'{path} is external to TrueNAS', errno.EXDEV)
if not os.path.exists(path):
raise CallError('Path not found.', errno.ENOENT)
path_acltype = self.path_get_acltype(path)
acltype = ACLType[path_acltype]
if acltype == ACLType.NFS4:
ret = self.getacl_nfs4(path, simplified, resolve_ids)
elif acltype == ACLType.POSIX1E:
ret = self.getacl_posix1e(path, simplified, resolve_ids)
else:
ret = self.getacl_disabled(path)
return ret
@private
def setacl_nfs4_internal(self, path, acl, do_canon, verrors):
payload = {
'acl': ACLType.NFS4.canonicalize(acl) if do_canon else acl,
}
json_payload = json.dumps(payload)
setacl = subprocess.run(
['nfs4xdr_setfacl', '-j', json_payload, path],
capture_output=True,
check=False
)
"""
nfs4xr_setacl with JSON input will return validation
errors on exit with EX_DATAERR (65).
"""
if setacl.returncode == 65:
err = setacl.stderr.decode()
json_verrors = json.loads(err.split(None, 1)[1])
for entry in json_verrors:
for schema, err in entry.items():
verrors.add(f'filesystem_acl.{schema.replace("acl", "dacl")}', err)
verrors.check()
elif setacl.returncode != 0:
raise CallError(setacl.stderr.decode())
@private
def setacl_nfs4(self, job, data):
job.set_progress(0, 'Preparing to set acl.')
verrors = ValidationErrors()
options = data.get('options', {})
recursive = options.get('recursive', False)
do_strip = options.get('stripacl', False)
do_canon = options.get('canonicalize', False)
path = data.get('path', '')
uid = -1 if data['uid'] is None else data.get('uid', -1)
gid = -1 if data['gid'] is None else data.get('gid', -1)
aclcheck = ACLType.NFS4.validate(data)
if not aclcheck['is_valid']:
for err in aclcheck['errors']:
if err[2]:
v = f'filesystem_acl.dacl.{err[0]}.{err[2]}'
else:
v = f'filesystem_acl.dacl.{err[0]}'
verrors.add(v, err[1])
current_acl = self.getacl(path)
if current_acl['acltype'] != ACLType.NFS4.name:
verrors.add(
'filesystem_acl.acltype',
f'ACL type mismatch. On-disk format is [{current_acl["acltype"]}], '
f'but received [{data.get("acltype")}].'
)
verrors.check()
if do_strip:
self._strip_acl_nfs4(path)
else:
if options['validate_effective_acl']:
uid_to_check = current_acl['uid'] if uid == -1 else uid
gid_to_check = current_acl['gid'] if gid == -1 else gid
self.middleware.call_sync(
'filesystem.check_acl_execute',
path, data['dacl'], uid_to_check, gid_to_check, True
)
self.setacl_nfs4_internal(path, data['dacl'], do_canon, verrors)
if not recursive:
os.chown(path, uid, gid)
job.set_progress(100, 'Finished setting NFSv4 ACL.')
return
self.__acltool(path, 'clone' if not do_strip else 'strip',
uid, gid, options)
job.set_progress(100, 'Finished setting NFSv4 ACL.')
@private
def gen_aclstring_posix1e(self, dacl, recursive, verrors):
"""
This method iterates through provided POSIX1e ACL and
performs addtional validation before returning the ACL
string formatted for the setfacl command. In case
of ValidationError, None is returned.
"""
has_tag = {
"USER_OBJ": False,
"GROUP_OBJ": False,
"OTHER": False,
"MASK": False,
"DEF_USER_OBJ": False,
"DEF_GROUP_OBJ": False,
"DEF_OTHER": False,
"DEF_MASK": False,
}
required_entries = ["USER_OBJ", "GROUP_OBJ", "OTHER"]
has_named = False
has_def_named = False
has_default = False
aclstring = ""
for idx, ace in enumerate(dacl):
if idx != 0:
aclstring += ","
if ace['id'] == -1:
ace['id'] = ''
who = "DEF_" if ace['default'] else ""
who += ace['tag']
duplicate_who = has_tag.get(who)
if duplicate_who is True:
verrors.add(
'filesystem_acl.dacl.{idx}',
f'More than one {"default" if ace["default"] else ""} '
f'{ace["tag"]} entry is not permitted'
)
elif duplicate_who is False:
has_tag[who] = True
if ace['tag'] in ["USER", "GROUP"]:
if ace['default']:
has_def_named = True
else:
has_named = True
ace['tag'] = ace['tag'].rstrip('_OBJ').lower()
if ace['default']:
has_default = True
aclstring += "default:"
aclstring += f"{ace['tag']}:{ace['id']}:"
aclstring += 'r' if ace['perms']['READ'] else '-'
aclstring += 'w' if ace['perms']['WRITE'] else '-'
aclstring += 'x' if ace['perms']['EXECUTE'] else '-'
if has_named and not has_tag['MASK']:
verrors.add(
'filesystem_acl.dacl',
'Named (user or group) POSIX ACL entries '
'require a mask entry to be present in the ACL.'
)
elif has_def_named and not has_tag['DEF_MASK']:
verrors.add(
'filesystem_acl.dacl',
'Named default (user or group) POSIX ACL entries '
'require a default mask entry to be present in the ACL.'
)
if recursive and not has_default:
verrors.add(
'filesystem_acl.dacl',
'Default ACL entries are required in order to apply '
'ACL recursively.'
)
for entry in required_entries:
if not has_tag[entry]:
verrors.add(
'filesystem_acl.dacl',
f'Presence of [{entry}] entry is required.'
)
if has_default and not has_tag[f"DEF_{entry}"]:
verrors.add(
'filesystem_acl.dacl',
f'Presence of default [{entry}] entry is required.'
)
return aclstring
@private
def setacl_posix1e(self, job, data):
job.set_progress(0, 'Preparing to set acl.')
verrors = ValidationErrors()
options = data['options']
recursive = options.get('recursive', False)
do_strip = options.get('stripacl', False)
dacl = data.get('dacl', [])
path = data['path']
uid = -1 if data['uid'] is None else data.get('uid', -1)
gid = -1 if data['gid'] is None else data.get('gid', -1)
aclcheck = ACLType.POSIX1E.validate(data)
if not aclcheck['is_valid']:
for err in aclcheck['errors']:
if err[2]:
v = f'filesystem_acl.dacl.{err[0]}.{err[2]}'
else:
v = f'filesystem_acl.dacl.{err[0]}'
verrors.add(v, err[1])
current_acl = self.getacl(path)
if current_acl['acltype'] != ACLType.POSIX1E.name:
verrors.add(
'filesystem_acl.acltype',
f'ACL type mismatch. On-disk format is [{current_acl["acltype"]}], '
f'but received [{data.get("acltype")}].'
)
if do_strip and dacl:
verrors.add(
'filesystem_acl.dacl',
'Simulatenously setting and removing ACL from path is invalid.'
)
if not do_strip:
if options['validate_effective_acl']:
try:
# check execute on parent paths
uid_to_check = current_acl['uid'] if uid == -1 else uid
gid_to_check = current_acl['gid'] if gid == -1 else gid
self.middleware.call_sync(
'filesystem.check_acl_execute',
path, dacl, uid_to_check, gid_to_check, True
)
except CallError as e:
if e.errno != errno.EPERM:
raise
verrors.add(
'filesystem_acl.path',
e.errmsg
)
aclstring = self.gen_aclstring_posix1e(dacl, recursive, verrors)
verrors.check()
self._strip_acl_posix1e(path)
job.set_progress(50, 'Setting POSIX1e ACL.')
if not do_strip:
setacl = subprocess.run(['setfacl', '-m', aclstring, path],
check=False, capture_output=True)
if setacl.returncode != 0:
raise CallError(f'Failed to set ACL [{aclstring}] on path [{path}]: '
f'{setacl.stderr.decode()}')
if not recursive:
os.chown(path, uid, gid)
job.set_progress(100, 'Finished setting POSIX1e ACL.')
return
options['posixacl'] = True
self.__acltool(data['path'],
'clone' if not do_strip else 'strip',
uid, gid, options)
job.set_progress(100, 'Finished setting POSIX1e ACL.')
@accepts(
Dict(
'filesystem_acl',
Str('path', required=True),
Int('uid', null=True, default=None, validators=[Range(min_=-1, max_=2147483647)]),
Int('gid', null=True, default=None, validators=[Range(min_=-1, max_=2147483647)]),
OROperator(
List(
'nfs4_acl',
items=[Dict(
'nfs4_ace',
Str('tag', enum=['owner@', 'group@', 'everyone@', 'USER', 'GROUP']),
Int('id', null=True, validators=[Range(min_=-1, max_=2147483647)]),
Str('type', enum=['ALLOW', 'DENY']),
Dict(
'perms',
Bool('READ_DATA'),
Bool('WRITE_DATA'),
Bool('APPEND_DATA'),
Bool('READ_NAMED_ATTRS'),
Bool('WRITE_NAMED_ATTRS'),
Bool('EXECUTE'),
Bool('DELETE_CHILD'),
Bool('READ_ATTRIBUTES'),
Bool('WRITE_ATTRIBUTES'),
Bool('DELETE'),
Bool('READ_ACL'),
Bool('WRITE_ACL'),
Bool('WRITE_OWNER'),
Bool('SYNCHRONIZE'),
Str('BASIC', enum=['FULL_CONTROL', 'MODIFY', 'READ', 'TRAVERSE']),
),
Dict(
'flags',
Bool('FILE_INHERIT'),
Bool('DIRECTORY_INHERIT'),
Bool('NO_PROPAGATE_INHERIT'),
Bool('INHERIT_ONLY'),
Bool('INHERITED'),
Str('BASIC', enum=['INHERIT', 'NOINHERIT']),
),
register=True
)],
register=True
),
List(
'posix1e_acl',
items=[Dict(
'posix1e_ace',
Bool('default', default=False),
Str('tag', enum=['USER_OBJ', 'GROUP_OBJ', 'USER', 'GROUP', 'OTHER', 'MASK']),
Int('id', default=-1, validators=[Range(min_=-1, max_=2147483647)]),
Dict(
'perms',
Bool('READ', default=False),
Bool('WRITE', default=False),
Bool('EXECUTE', default=False),
),
register=True
)],
register=True
),
name='dacl',
),
Dict(
'nfs41_flags',
Bool('autoinherit', default=False),
Bool('protected', default=False),
Bool('defaulted', default=False),
),
Str('acltype', enum=[x.name for x in ACLType], null=True),
Dict(
'options',
Bool('stripacl', default=False),
Bool('recursive', default=False),
Bool('traverse', default=False),
Bool('canonicalize', default=True),
Bool('validate_effective_acl', default=True)
)
), roles=['FILESYSTEM_ATTRS_WRITE'], audit='Filesystem set ACL', audit_extended=lambda data: data['path']
)
@returns()
@job(lock="perm_change")
def setacl(self, job, data):
verrors = ValidationErrors()
data['loc'] = self._common_perm_path_validate("filesystem.setacl", data, verrors)
verrors.check()
if 'acltype' in data:
acltype = ACLType[data['acltype']]
else:
path_acltype = self.path_get_acltype(data['path'])
acltype = ACLType[path_acltype]
if acltype == ACLType.NFS4:
return self.setacl_nfs4(job, data)
elif acltype == ACLType.POSIX1E:
return self.setacl_posix1e(job, data)
else:
raise CallError(f"{data['path']}: ACLs disabled on path.", errno.EOPNOTSUPP)
@private
def add_to_acl_posix(self, acl, entries):
def convert_perm(perm):
if perm == 'MODIFY' or perm == 'FULL_CONTROL':
return {'READ': True, 'WRITE': True, 'EXECUTE': True}
if perm == 'READ':
return {'READ': True, 'WRITE': False, 'EXECUTE': True}
raise CallError(f'{perm}: unsupported permissions type for POSIX1E acltype')
def check_acl_for_entry(entry):
id_type = entry['id_type']
xid = entry['id']
perm = entry['access']
canonical_entries = {
'USER_OBJ': {'has_default': False, 'entry': None},
'GROUP_OBJ': {'has_default': False, 'entry': None},
'OTHER': {'has_default': False, 'entry': None},
}
has_default = False
has_access = False
has_access_mask = False
has_default_mask = False
for ace in acl:
if (centry := canonical_entries.get(ace['tag'])) is not None:
if ace['default']:
centry['has_default'] = True
else:
centry['entry'] = ace
continue
if ace['tag'] == 'MASK':
if ace['default']:
has_default_mask = True
else:
has_access_mask = True
continue
if ace['tag'] != id_type or ace['id'] != xid:
continue
if ace['perms'] != convert_perm(perm):
continue
if ace['default']:
has_default = True
else:
has_access = True
for key, val in canonical_entries.items():
if val['has_default']:
continue
acl.append({
'tag': key,
'id': val['entry']['id'],
'perms': val['entry']['perms'],
'default': True
})
return (has_default, has_access, has_access_mask, has_default_mask)
def add_entry(entry, default):
acl.append({
'tag': entry['id_type'],
'id': entry['id'],
'perms': convert_perm(entry['access']),
'default': default
})
def add_mask(default):
acl.append({
'tag': 'MASK',
'id': -1,
'perms': {'READ': True, 'WRITE': True, 'EXECUTE': True},
'default': default
})
changed = False
for entry in entries:
default, access, mask, default_mask = check_acl_for_entry(entry)
if not default:
changed = True
add_entry(entry, True)
if not access:
changed = True
add_entry(entry, False)
if not mask:
changed = True
add_mask(False)
if not default_mask:
changed = True
add_mask(True)
return changed
@private
def add_to_acl_nfs4(self, acl, entries):
def convert_perm(perm):
if perm == 'MODIFY':
return {'BASIC': 'MODIFY'}
if perm == 'READ':
return {'BASIC': 'READ'}
if perm == 'FULL_CONTROL':
return {'BASIC': 'FULL_CONTROL'}
raise CallError(f'{perm}: unsupported permissions type for NFSv4 acltype')
def check_acl_for_entry(entry):
id_type = entry['id_type']
xid = entry['id']
perm = entry['access']
for ace in acl:
if ace['tag'] != id_type or ace['id'] != xid or ace['type'] != 'ALLOW':
continue
if ace['perms'].get('BASIC', {}) == perm:
return True
return False
changed = False
for entry in entries:
if check_acl_for_entry(entry):
continue
acl.append({
'tag': entry['id_type'],
'id': entry['id'],
'perms': convert_perm(entry['access']),
'flags': {'BASIC': 'INHERIT'},
'type': 'ALLOW'
})
changed = True
return changed
@private
@accepts(Dict(
'add_to_acl',
Str('path', required=True),
List('entries', required=True, items=[Dict(
'simplified_acl_entry',
Str('id_type', enum=['USER', 'GROUP'], required=True),
Int('id', required=True),
Str('access', enum=['READ', 'MODIFY', 'FULL_CONTROL'], required=True)
)]),
Dict(
'options',
Bool('force', default=False),
)
), roles=['FILESYSTEM_ATTRS_WRITE'], audit='Filesystem add to ACL', audit_extended=lambda data: data['path'])
@job()
def add_to_acl(self, job, data):
"""
Simplified ACL maintenance API for charts users to grant either read or
modify access to particulr IDs on a given path. This call overwrites
any existing ACL on the given path.
`id_type` specifies whether the extra entry will be a user or group
`id` specifies the numeric id of the user / group for which access is
being granted.
`access` specifies the simplified access mask to be granted to the user.
For NFSv4 ACLs `READ` means the READ set, and `MODIFY` means the MODIFY
set. For POSIX1E `READ` means read and execute, `MODIFY` means read, write,
execute.
"""
init_path = data['path']
verrors = ValidationErrors()
self._common_perm_path_validate('filesystem.add_to_acl', data, verrors)
verrors.check()
data['path'] = init_path
current_acl = self.getacl(data['path'])
acltype = ACLType[current_acl['acltype']]
if acltype == ACLType.NFS4:
changed = self.add_to_acl_nfs4(current_acl['acl'], data['entries'])
elif acltype == ACLType.POSIX1E:
changed = self.add_to_acl_posix(current_acl['acl'], data['entries'])
else:
raise CallError(f"{data['path']}: ACLs disabled on path.", errno.EOPNOTSUPP)
if not changed:
job.set_progress(100, 'ACL already contains all requested entries.')
return changed
if not directory_is_empty(data['path']) and not data['options']['force']:
raise CallError(
f'{data["path"]}: path contains existing data '
'and `force` was not specified', errno.EPERM
)
setacl_job = self.middleware.call_sync('filesystem.setacl', {
'path': data['path'],
'dacl': current_acl['acl'],
'acltype': current_acl['acltype'],
'options': {'recursive': True}
})
job.wrap_sync(setacl_job)
return changed
@private
@accepts(Dict(
'calculate_inherited_acl',
Str('path', required=True),
Dict(
'options',
Bool('directory', default=True)
)
))
def get_inherited_acl(self, data):
"""
Generate an inherited ACL based on given `path`
Supports `directory` `option` that allows specifying whether the generated
ACL is for a file or a directory.
"""
verrors = ValidationErrors()
self._common_perm_path_validate('filesystem.get_inherited_acl', data, verrors, True)
verrors.check()
current_acl = self.getacl(data['path'], False)
acltype = ACLType[current_acl['acltype']]
return acltype.calculate_inherited(current_acl, data['options']['directory'])
| 39,103 | Python | .py | 913 | 29.514786 | 120 | 0.517948 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,721 | passdb.py | truenas_middleware/src/middlewared/middlewared/plugins/smb_/passdb.py | import os
from middlewared.api.current import UserEntry
from middlewared.service import filterable, Service, job, private
from middlewared.utils.sid import get_domain_rid
from .util_passdb import (
delete_passdb_entry,
insert_passdb_entries,
query_passdb_entries,
update_passdb_entry,
user_entry_to_passdb_entry,
PassdbMustReinit,
PASSDB_PATH
)
class SMBService(Service):
class Config:
service = 'cifs'
service_verb = 'restart'
@private
@filterable
def passdb_list(self, filters, options):
""" query existing passdb users """
try:
return query_passdb_entries(filters or [], options or {})
except PassdbMustReinit as err:
self.logger.warning(err.errmsg)
self.synchronize_passdb(True).wait_sync(raise_error=True)
return query_passdb_entries(filters or [], options or {})
@private
def update_passdb_user(self, user: UserEntry):
server_name = self.middleware.call_sync('smb.config')['netbiosname_local']
existing_entry = self.passdb_list([['username', '=', user['username']]])
passdb_entry = user_entry_to_passdb_entry(
server_name,
user,
existing_entry[0] if existing_entry else None
)
update_passdb_entry(passdb_entry)
@private
def remove_passdb_user(self, username, sid):
delete_passdb_entry(username, get_domain_rid(sid))
@private
@job(lock="passdb_sync", lock_queue_size=1)
def synchronize_passdb(self, job, force=False):
""" Sync user configuration from our user table with Samba's passdb.tdb file
Params:
force - force resync by deleting the existing passdb.tdb file
Raises:
PassdbMustReinit - the synchronize job must be rerun with force command
RuntimeError - TDB library error
"""
server_name = self.middleware.call_sync('smb.config')['netbiosname_local']
if force:
try:
os.unlink(PASSDB_PATH)
except FileNotFoundError:
pass
pdb_entries = {entry['user_rid']: entry for entry in query_passdb_entries([], {})}
to_update = []
for entry in self.middleware.call_sync('user.query', [("smb", "=", True), ('local', '=', True)]):
existing_entry = pdb_entries.pop(get_domain_rid(entry['sid']), None)
to_update.append(user_entry_to_passdb_entry(server_name, entry, existing_entry))
if existing_entry and existing_entry['username'] != entry['username']:
# username changed. Since it's part of key for one of tdb entries we have to nuke it.
delete_passdb_entry(existing_entry['username'], existing_entry['user_rid'])
# inserting over existing entries replaces them
# this is performed with a transaction lock in place and so
# we don't have to worry about rollback in case of failure
insert_passdb_entries(to_update)
for entry in pdb_entries.values():
# we popped off keys as we matched them to existing DB users.
# any remaining shouldn't be in the passdb file
self.logger.debug('%s: removing user from SMB user database', entry['username'])
delete_passdb_entry(entry['username'], entry['user_rid'])
| 3,376 | Python | .py | 73 | 37.273973 | 105 | 0.645053 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,722 | util_smbconf.py | truenas_middleware/src/middlewared/middlewared/plugins/smb_/util_smbconf.py | import os
from logging import getLogger
from middlewared.utils import filter_list
from middlewared.utils.directoryservices.constants import DSType
from middlewared.plugins.account import DEFAULT_HOME_PATH
from middlewared.plugins.smb_.constants import LOGLEVEL_MAP, SMBEncryption, SMBPath
LOGGER = getLogger(__name__)
def generate_smb_conf_dict(
ds_type: DSType,
ds_config: dict | None,
smb_service_config: dict,
smb_shares: list,
smb_bind_choices: dict,
idmap_settings: list,
is_enterprise: bool = False
):
guest_enabled = any(filter_list(smb_shares, [['guestok', '=', True]]))
fsrvp_enabled = any(filter_list(smb_shares, [['fsrvp', '=', True]]))
ad_idmap = None
ipa_domain = None
match ds_type:
case DSType.AD:
ad_idmap = filter_list(
idmap_settings,
[('name', '=', 'DS_TYPE_ACTIVEDIRECTORY')],
{'get': True}
)
case DSType.IPA:
ipa_domain = ds_config['ipa_domain']
ipa_config = ds_config['ipa_config']
case _:
pass
home_share = filter_list(smb_shares, [['home', '=', True]])
if home_share:
if ds_type is DSType.AD:
home_path_suffix = '%D/%U'
elif not home_share[0]['path_suffix']:
home_path_suffix = '%U'
else:
home_path_suffix = home_share[0]['path_suffix']
home_path = os.path.join(home_share[0]['path'], home_path_suffix)
else:
home_path = DEFAULT_HOME_PATH
loglevelint = int(LOGLEVEL_MAP.inv.get(smb_service_config['loglevel'], 1))
"""
First set up our legacy / default SMB parameters. Several are related to
making sure that we don't have printing support enabled.
fruit:nfs_aces
fruit:zero_file_id
------------------
are set to ensure that vfs_fruit will always have appropriate configuration.
nfs_aces allows clients to chmod via special ACL entries. This reacts
poorly with rich ACL models.
vfs_fruit has option to set the file ID to zero, which causes client to
fallback to algorithically generated file ids by hashing file name rather
than using server-provided ones. This is not handled properly by all
MacOS client versions and a hash collision can lead to data corruption.
restrict anonymous
------------------
We default to disabling anonymous IPC$ access. This is mostly in response
to being flagged by security scanners. We have to re-enable if server guest
access is enabled.
winbind request timeout
------------------
The nsswitch is only loaded once for the life of a running process on Linux
and so winbind will always be present. In case of standalone server we want
to reduce the risk that unhealthy winbind state would cause hangs in NSS
for middlewared.
passdb backend
------------------
The passdb backend is stored in non-default path in order to prevent open
handles from affecting system dataset operations. This is safe because we
regenerate the passdb.tdb file on reboot.
obey pam restrictions
------------------
This is currently only required for case where user homes share is in use
because we rely on pam_mkhomedir to auto-generate the path.
It introduces a potential failure mode where pam_session() failure will
lead to inability access SMB shares, and so at some point we should remove
the pam_mkhomedir dependency.
"""
smbconf = {
'disable spoolss': True,
'dns proxy': False,
'load printers': False,
'max log size': 5120,
'printcap': '/dev/null',
'bind interfaces only': True,
'fruit:nfs_aces': False,
'fruit:zero_file_id': False,
'restrict anonymous': 0 if guest_enabled else 2,
'winbind request timeout': 60 if ds_type is DSType.AD else 2,
'passdb backend': f'tdbsam:{SMBPath.PASSDB_DIR.value[0]}/passdb.tdb',
'workgroup': smb_service_config['workgroup'],
'netbios name': smb_service_config['netbiosname_local'],
'netbios aliases': ' '.join(smb_service_config['netbiosalias']),
'guest account': smb_service_config['guest'] if smb_service_config['guest'] else 'nobody',
'obey pam restrictions': any(home_share),
'create mask': smb_service_config['filemask'] or '0664',
'directory mask': smb_service_config['dirmask'] or '0775',
'ntlm auth': smb_service_config['ntlmv1_auth'],
'server multichannel support': smb_service_config['multichannel'],
'unix charset': smb_service_config['unixcharset'],
'local master': smb_service_config['localmaster'],
'server string': smb_service_config['description'],
'log level': loglevelint,
'logging': 'file',
'server smb encrypt': SMBEncryption[smb_service_config['encryption']].value,
}
"""
When guest access is enabled on _any_ SMB share we have to change the
behavior of when the server maps to the guest account. `Bad User` here means
that attempts to authenticate as a user that does not exist on the server
will be automatically mapped to the guest account. This can lead to unexpected
access denied errors, but many legacy users depend on this functionality and
so we canot remove it.
"""
if guest_enabled:
smbconf['map to guest'] = 'Bad User'
"""
If fsrvp is enabled on any share, then we need to have samba fork off an
fssd daemon to handle snapshot management requests.
"""
if fsrvp_enabled:
smbconf.update({
'rpc_daemon:fssd': 'fork',
'fss:prune stale': True,
})
if smb_service_config['enable_smb1']:
smbconf['server min protocol'] = 'NT1'
if smb_service_config['syslog']:
smbconf['logging'] = f'syslog@{min(3, loglevelint)} file'
if smb_bindips := smb_service_config['bindip']:
allowed_ips = set(smb_bind_choices.values())
if (rejected := set(smb_bindips) - allowed_ips):
LOGGER.warning(
'%s: IP address(es) are no longer in use and should be removed '
'from SMB configuration.', rejected
)
if (final_ips := allowed_ips & set(smb_bindips)):
smbconf['interfaces'] = ' '.join(final_ips | {'127.0.0.1'})
else:
# We need to generate SMB configuration to prevent breaking
# winbindd
LOGGER.error('No specified SMB bind IP addresses are available')
smbconf['interfaces'] = '127.0.0.1'
"""
The following are our default Active Directory related parameters
winbindd max domain connections
------------------
Winbindd defaults to a single connection per domain controller. Real
life testing in enterprise environments indicated that this was
often a bottleneck on busy servers. Ten has been default since FreeNAS
11.2 and we have yet to see cases where it needs to scale higher.
allow trusted domains
------------------
We disable support for trusted domains by default due to need to configure
idmap backends for them. There is separate validation when the field is
enabled in the AD plugin to check that user has properly configured idmap
settings. If idmap settings are not configured, then SID mappings are
written to the default idmap backend (which is a TDB file on the system
dataset). This is not desirable because the insertion for a domain is
first-come-first-serve (not consistent between servers).
winbind enum users
winbind enum groups
------------------
These are defaulted to being on to preserve legacy behavior and meet user
expectations based on long histories of howto guides online. They affect
whether AD users / groups will appear when full lists of users / groups
via getpwent / getgrent. It does not impact getpwnam and getgrnam.
"""
if ds_type is DSType.AD:
ac = ds_config
smbconf.update({
'server role': 'member server',
'kerberos method': 'secrets and keytab',
'security': 'ADS',
'local master': False,
'domain master': False,
'preferred master': False,
'winbind cache time': 7200,
'winbind max domain connections': 10,
'winbind use default domain': ac['use_default_domain'],
'client ldap sasl wrapping': 'seal',
'template shell': '/bin/sh',
'allow trusted domains': ac['allow_trusted_doms'],
'realm': ac['domainname'],
'ads dns update': False,
'winbind nss info': ac['nss_info'].lower(),
'template homedir': home_path,
'winbind enum users': not ac['disable_freenas_cache'],
'winbind enum groups': not ac['disable_freenas_cache'],
})
"""
The following parameters are based on what is performed when admin runs
command ipa-client-samba-install.
NOTE1: This requires us to have joined IPA domain through middleware
because we need to store the password associated with the SMB keytab in
samba's secrets.tdb file.
NOTE2: There is some chance that the IPA domain will not have SMB information
and in this situation we will omit from our smb.conf.
"""
if ds_type is DSType.IPA and ipa_domain is not None:
# IPA SMB config is stored in remote IPA server and so we don't let
# users override the config. If this is a problem it should be fixed on
# the other end.
domain_short = ipa_domain['netbios_name']
range_low = ipa_domain['range_id_min']
range_high = ipa_domain['range_id_max']
smbconf.update({
'server role': 'member server',
'kerberos method': 'dedicated keytab',
'dedicated keytab file': 'FILE:/etc/ipa/smb.keytab',
'workgroup': ipa_domain['netbios_name'],
'realm': ipa_config['realm'],
f'idmap config {domain_short} : backend': 'sss',
f'idmap config {domain_short} : range': f'{range_low} - {range_high}',
})
"""
The following part generates the smb.conf parameters from our idmap plugin
settings. This is primarily relevant for case where TrueNAS is joined to
an Active Directory domain.
"""
for i in idmap_settings:
match i['name']:
case 'DS_TYPE_DEFAULT_DOMAIN':
if ad_idmap and ad_idmap['idmap_backend'] == 'AUTORID':
continue
domain = '*'
case 'DS_TYPE_ACTIVEDIRECTORY':
if ds_type is not DSType.AD:
continue
if i['idmap_backend'] == 'AUTORID':
domain = '*'
else:
domain = smb_service_config['workgroup']
case 'DS_TYPE_LDAP':
# TODO: in future we will have migration remove this
# from the idmap table
continue
case _:
domain = i['name']
idmap_prefix = f'idmap config {domain} :'
smbconf.update({
f'{idmap_prefix} backend': i['idmap_backend'].lower(),
f'{idmap_prefix} range': f'{i["range_low"]} - {i["range_high"]}',
})
for k, v in i['options'].items():
backend_parameter = 'realm' if k == 'cn_realm' else k
match k:
case 'ldap_server':
value = 'ad' if v == 'AD' else 'stand-alone'
case 'ldap_url':
value = f'{"ldaps://" if i["options"]["ssl"] == "ON" else "ldap://"}{v}'
case 'ssl':
continue
case _:
value = v
smbconf.update({f'{idmap_prefix} {backend_parameter}': value})
for e in smb_service_config['smb_options'].splitlines():
# Add relevant auxiliary parameters
entry = e.strip()
if entry.startswith(('#', ';')) or '=' not in entry:
continue
param, value = entry.split('=', 1)
smbconf[param.strip()] = value.strip()
# The following parameters must come after processing includes in order to
# prevent auxiliary parameters from overriding them
smbconf.update({
'zfs_core:zfs_integrity_streams': is_enterprise,
'zfs_core:zfs_block_cloning': is_enterprise,
'registry shares': True,
'include': 'registry',
})
return smbconf
| 12,577 | Python | .py | 275 | 36.967273 | 98 | 0.62551 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,723 | util_net_conf.py | truenas_middleware/src/middlewared/middlewared/plugins/smb_/util_net_conf.py | import errno
import json
import subprocess
from middlewared.service_exception import CallError, MatchNotFound
from .constants import SMBCmd
CONF_JSON_VERSION = {"major": 0, "minor": 1}
NETCONF_ACTIONS = (
'list',
'showshare',
'addshare',
'delshare',
'getparm',
'setparm',
'delparm',
)
def json_check_version(version):
if version == CONF_JSON_VERSION:
return
raise CallError(
"Unexpected JSON version returned from Samba utils: "
f"[{version}]. Expected version was: [{CONF_JSON_VERSION}]. "
"Please file a bug report at jira.ixsystems.com with this traceback."
)
def netconf(**kwargs):
"""
wrapper for net(8) conf. This manages the share configuration, which is stored in
samba's registry.tdb file.
"""
action = kwargs.get('action')
if action not in NETCONF_ACTIONS:
raise CallError(f'Action [{action}] is not permitted.', errno.EPERM)
share = kwargs.get('share')
args = kwargs.get('args', [])
jsoncmd = kwargs.get('jsoncmd', False)
if jsoncmd:
cmd = [SMBCmd.NET.value, '--json', 'conf', action]
else:
cmd = [SMBCmd.NET.value, 'conf', action]
if share:
cmd.append(share)
if args:
cmd.extend(args)
netconf = subprocess.run(cmd, capture_output=True, check=False)
if netconf.returncode != 0:
errmsg = netconf.stderr.decode().strip()
if 'SBC_ERR_NO_SUCH_SERVICE' in errmsg or 'does not exist' in errmsg:
svc = share if share else json.loads(args[0])['service']
raise MatchNotFound(svc)
elif 'SBC_ERR_INVALID_PARAM' in errmsg:
raise CallError(errmsg, errno.EINVAL)
raise CallError(
f'net conf {action} [{cmd}] failed with error: {errmsg}'
)
if jsoncmd:
out = netconf.stdout.decode()
if out:
out = json.loads(out)
else:
out = netconf.stdout.decode()
return out
def reg_listshares():
"""
Generate list of names of SMB shares in current running configuration
"""
out = []
res = netconf(action='list', jsoncmd=True)
version = res.pop('version')
json_check_version(version)
for s in res['sections']:
if s['is_share']:
out.append(s['service'])
return out
def reg_addshare(name, parameters):
"""
add share with specified payload to running configuration
"""
netconf(
action='addshare',
jsoncmd=True,
args=[json.dumps({"service": name, "parameters": parameters})]
)
def reg_delshare(share):
"""
Delete share from running configuration by name
"""
return netconf(action='delshare', share=share)
def reg_showshare(share):
"""
Dump share running configuration
"""
net = netconf(action='showshare', share=share, jsoncmd=True)
version = net.pop('version')
json_check_version(version)
to_list = ['vfs objects', 'hosts allow', 'hosts deny']
parameters = net.get('parameters', {})
for p in to_list:
if parameters.get(p):
parameters[p]['parsed'] = parameters[p]['raw'].split()
return net
def reg_setparm(data):
"""
set specified parameters for the SMB share specified in the data.
data is dict consisting of two keys `service` (share name) and
`parameters` (dict containing parameters) as follows:
{
'service': share_name,
'parameters': {'available': {'parsed': True}}
}
each parameter may specify `raw` or `parsed` value. In case of raw
value it should be a string.
"""
return netconf(action='setparm', args=[json.dumps(data)], jsoncmd=True)
def reg_delparm(data):
"""
delete the specified parameters from the SMB share configuration.
JSON object for input is identical format as reg_setparm.
"""
return netconf(action='delparm', args=[json.dumps(data)], jsoncmd=True)
def reg_getparm(share, parm):
"""
Retrieve the value of the specified parameter for the specified share
NOTE: this only queries the registry and will not present SMB server
default values.
"""
to_list = ['vfs objects', 'hosts allow', 'hosts deny']
try:
ret = netconf(action='getparm', share=share, args=[parm]).strip()
except CallError as e:
if f"Error: given parameter '{parm}' is not set." in e.errmsg:
# Copy behavior of samba python binding
return None
raise e from None
return ret.split() if parm in to_list else ret
| 4,550 | Python | .py | 133 | 28.097744 | 85 | 0.646333 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,724 | sharesec.py | truenas_middleware/src/middlewared/middlewared/plugins/smb_/sharesec.py | import os
from base64 import b64encode, b64decode
from middlewared.plugins.sysdataset import SYSDATASET_PATH
from middlewared.service import filterable, periodic, private, CRUDService
from middlewared.service_exception import CallError, MatchNotFound
from middlewared.utils import filter_list
from middlewared.utils.security_descriptor import (
share_acl_to_sd_bytes,
sd_bytes_to_share_acl,
)
from middlewared.utils.tdb import (
get_tdb_handle,
TDBDataType,
TDBOptions,
TDBPathType,
)
from struct import pack
LOCAL_SHARE_INFO_FILE = os.path.join(SYSDATASET_PATH, 'samba4', 'share_info.tdb')
SHARE_INFO_TDB_OPTIONS = TDBOptions(TDBPathType.CUSTOM, TDBDataType.BYTES)
SHARE_INFO_VERSION_KEY = 'INFO/version'
SHARE_INFO_VERSION_DATA = b64encode(pack('<I', 3))
def fetch_share_acl(share_name: str) -> str:
""" fetch base64-encoded NT ACL for SMB share """
with get_tdb_handle(LOCAL_SHARE_INFO_FILE, SHARE_INFO_TDB_OPTIONS) as hdl:
return hdl.get(f'SECDESC/{share_name.lower()}')
def set_version_share_info():
with get_tdb_handle(LOCAL_SHARE_INFO_FILE, SHARE_INFO_TDB_OPTIONS) as hdl:
hdl.store(SHARE_INFO_VERSION_KEY, SHARE_INFO_VERSION_DATA)
def store_share_acl(share_name: str, val: str) -> None:
""" write base64-encoded NT ACL for SMB share to server running configuration """
set_version_key = not os.path.exists(LOCAL_SHARE_INFO_FILE)
with get_tdb_handle(LOCAL_SHARE_INFO_FILE, SHARE_INFO_TDB_OPTIONS) as hdl:
if set_version_key:
hdl.store(SHARE_INFO_VERSION_KEY, SHARE_INFO_VERSION_DATA)
return hdl.store(f'SECDESC/{share_name.lower()}', val)
def remove_share_acl(share_name: str) -> None:
""" remove ACL from share causing default entry of S-1-1-0 FULL_CONTROL """
with get_tdb_handle(LOCAL_SHARE_INFO_FILE, SHARE_INFO_TDB_OPTIONS) as hdl:
hdl.delete(f'SECDESC/{share_name.lower()}')
def dup_share_acl(src: str, dst: str) -> None:
val = fetch_share_acl(src)
store_share_acl(dst, val)
class ShareSec(CRUDService):
class Config:
namespace = 'smb.sharesec'
cli_namespace = 'sharing.smb.sharesec'
private = True
@filterable
def entries(self, filters, options):
# TDB file contains INFO/version key that we don't want to return
try:
with get_tdb_handle(LOCAL_SHARE_INFO_FILE, SHARE_INFO_TDB_OPTIONS) as hdl:
return filter_list(
hdl.entries(),
filters + [['key', '^', 'SECDESC/']],
options
)
except FileNotFoundError:
# File may not have been created yet or overzealous admin may have deleted
return []
def getacl(self, share_name):
"""
View the ACL information for `share_name`. The share ACL is distinct from filesystem
ACLs which can be viewed by calling `filesystem.getacl`.
"""
if share_name.upper() == 'HOMES':
share_filter = [['home', '=', True]]
else:
share_filter = [['name', 'C=', share_name]]
try:
self.middleware.call_sync(
'sharing.smb.query', share_filter, {'get': True, 'select': ['home', 'name']}
)
except MatchNotFound:
raise CallError(f'{share_name}: share does not exist')
if not os.path.exists(LOCAL_SHARE_INFO_FILE):
set_version_share_info()
try:
share_sd_bytes = b64decode(fetch_share_acl(share_name))
share_acl = sd_bytes_to_share_acl(share_sd_bytes)
except MatchNotFound:
# Non-exist share ACL is treated as granting world FULL permissions
share_acl = [{'ae_who_sid': 'S-1-1-0', 'ae_perm': 'FULL', 'ae_type': 'ALLOWED'}]
return {'share_name': share_name, 'share_acl': share_acl}
def setacl(self, data, db_commit=True):
"""
Set an ACL on `share_name`. Changes are written to samba's share_info.tdb file.
This only impacts SMB sessions.
`share_name` the name of the share
`share_acl` a list of ACL entries (dictionaries) with the following keys:
`ae_who_sid` who the ACL entry applies to expressed as a Windows SID
`ae_perm` string representation of the permissions granted to the user or group.
`FULL` grants read, write, execute, delete, write acl, and change owner.
`CHANGE` grants read, write, execute, and delete.
`READ` grants read and execute.
`ae_type` can be ALLOWED or DENIED.
"""
if data['share_name'].upper() == 'HOMES':
share_filter = [['home', '=', True]]
else:
share_filter = [['name', 'C=', data['share_name']]]
try:
config_share = self.middleware.call_sync('sharing.smb.query', share_filter, {'get': True})
except MatchNotFound:
raise CallError(f'{data["share_name"]}: share does not exist')
share_sd_bytes = b64encode(share_acl_to_sd_bytes(data['share_acl'])).decode()
store_share_acl(data['share_name'], share_sd_bytes)
self.middleware.call_sync(
'datastore.update', 'sharing.cifs_share', config_share['id'],
{'cifs_share_acl': share_sd_bytes}
)
@private
def flush_share_info(self):
"""
Write stored share acls to share_info.tdb. This should only be called
if share_info.tdb contains default entries.
"""
shares = self.middleware.call_sync('datastore.query', 'sharing.cifs_share', [], {'prefix': 'cifs_'})
for share in shares:
share_name = 'HOMES' if share['home'] else share['name']
if share['share_acl'] and share['share_acl'].startswith('S-1-'):
self.setacl({'share_name': share_name, 'share_acl': share['share_acl']})
elif share['share_acl']:
store_share_acl(share_name, share['share_acl'])
@periodic(3600, run_on_start=False)
def check_share_info_tdb(self):
if not os.path.exists(LOCAL_SHARE_INFO_FILE):
if not self.middleware.call_sync('service.started', 'cifs'):
return
else:
return self.flush_share_info()
self.middleware.call_sync('smb.sharesec.synchronize_acls')
async def synchronize_acls(self):
"""
Synchronize the share ACL stored in the config database with Samba's running
configuration as reflected in the share_info.tdb file.
The only situation in which the configuration stored in the database will
overwrite samba's running configuration is if share_info.tdb is empty. Samba
fakes a single S-1-1-0:ALLOW/0x0/FULL entry in the absence of an entry for a
share in share_info.tdb.
"""
if not (entries := (await self.middleware.call('smb.sharesec.entries'))):
return
shares = await self.middleware.call('datastore.query', 'sharing.cifs_share', [], {'prefix': 'cifs_'})
for s in shares:
share_name = s['name'] if not s['home'] else 'homes'
if not (share_acl := filter_list(entries, [['key', '=', f'SECDESC/{share_name.lower()}']])):
continue
if share_acl[0]['value'] != s['share_acl']:
self.logger.debug('Updating stored copy of SMB share ACL on %s', share_name)
await self.middleware.call(
'datastore.update',
'sharing.cifs_share',
s['id'],
{'cifs_share_acl': share_acl[0]['value']}
)
| 7,641 | Python | .py | 156 | 39.628205 | 109 | 0.624362 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,725 | util_sd.py | truenas_middleware/src/middlewared/middlewared/plugins/smb_/util_sd.py | import errno
import enum
import json
import subprocess
from middlewared.schema import Bool, Dict, Password, Str, accepts
from middlewared.service import private, CallError, Service
from middlewared.plugins.smb import SMBCmd
class ACLType(enum.Enum):
SMB = "SMB"
NFSV4 = "NFSV4"
class ACLPrincipal(enum.Enum):
OWNER = ("owner@", "CREATOR-OWNER", "S-1-3-0")
GROUP = ("group@", "CREATOR-GROUP", "S-1-3-1")
EVERYONE = ("everyone@", "EVERYONE", "S-1-1-0")
def list_txt(acl_type):
aclt = ACLType[acl_type]
if aclt == ACLType.SMB:
return [x.value[1] for x in ACLPrincipal]
elif aclt == ACLType.NFSV4:
return [x.value[0] for x in ACLPrincipal]
def sids():
return [x.value[2] for x in ACLPrincipal]
def from_sid(sid):
for x in ACLPrincipal:
if sid == x.value[2]:
return x
return None
def from_nfsv4(principal):
for x in ACLPrincipal:
if principal == x.value[0]:
return x
return None
def to_sid(self):
return self.value[2]
def to_smb(self):
return self.value[1]
def to_nfsv4(self):
return self.value[0]
class ACLFlags(enum.Enum):
FI = ("FILE_INHERIT", "OBJECT_INHERIT")
DI = ("DIRECTORY_INHERIT", "CONTAINER_INHERIT")
NI = ("NO_PROPAGATE_INHERIT", "NO_PROPAGATE_INHERIT")
IO = ("INHERIT_ONLY", "INHERIT_ONLY")
id_ = ("INHERITED", "INHERITED")
def convert(aclt, in_flags):
acl = ACLType[aclt]
rv = {}
if acl == ACLType.SMB:
for f in ACLFlags:
parm = in_flags.get(f.value[1])
rv.update({f.value[0]: True if parm else False})
else:
for f in ACLFlags:
parm = in_flags.get(f.value[0])
rv.update({f.value[1]: True if parm else False})
return rv
class ACLPerms(enum.Enum):
RD = (("READ_DATA", 0x00000008), ("READ", 0x00000001))
WD = (("WRITE_DATA", 0x00000010), ("WRITE", 0x00000002))
EX = (("EXECUTE", 0x0001), ("EXECUTE", 0x00000020))
DE = (("DELETE", 0x00000100), ("DELETE", 0x00010000))
WC = (("WRITE_ACL", 0x00002000), ("WRITE_DAC", 0x00040000))
WO = (("WRITE_OWNER", 0x00004000), ("WRITE_OWNER", 0x00080000))
AD = (("APPEND_DATA", 0x00000020), ("APPEND_DATA", 0x00000004))
RA = (("READ_ATTRIBUTES", 0x00000200), ("READ_ATTRIBUTES", 0x00000080))
WA = (("WRITE_ATTRIBUTES", 0x00000400), ("WRITE_ATTRIBUTES", 0x00000100))
RE = (("READ_NAMED_ATTRS", 0x00000040), ("READ_EA", 0x00000008))
WE = (("WRITE_NAMED_ATTRS", 0x00000080), ("WRITE_EA", 0x00000010))
DC = (("DELETE_CHILD", 0x00000800), ("DELETE_CHILD", 0x00000040))
RC = (("READ_ACL", 0x00001000), ("READ_CONTROL", 0x00020000))
SY = (("SYNCHRONIZE", 0x00008000), ("SYNCHRONIZE", 0x00100000))
def convert(aclt, in_perms):
acl = ACLType[aclt]
rv = {}
if acl == ACLType.SMB:
for f in ACLPerms:
parm = in_perms.get(f.value[1][0])
rv.update({f.value[0][0]: True if parm else False})
else:
for f in ACLPerms:
parm = in_perms.get(f.value[0][0])
rv.update({f.value[1][0]: True if parm else False})
return rv
def to_standard(in_perms):
defaults = {x.value[1][0]: False for x in ACLPerms}
std_perms = {
"READ": defaults.copy(),
"CHANGE": defaults.copy(),
"FULL": {x.value[1][0]: True for x in ACLPerms}
}
std_perms["READ"].update({
"READ": True,
"EXECUTE": True
})
std_perms["CHANGE"].update({
"READ": True,
"EXECUTE": True,
"DELETE": True,
"WRITE": True
})
for k, v in std_perms.items():
if v == in_perms:
return k
return ""
def to_hex(aclt, in_perms):
acl = ACLType[aclt]
rv = 0
if acl == ACLType.SMB:
for f in ACLPerms:
if in_perms.get(f.value[1][0]):
rv = rv | f.value[1][1]
return f"0x{hex(rv)[2:].zfill(8)}"
class SMBService(Service):
class Config:
service = 'cifs'
service_verb = 'restart'
@private
@accepts(
Dict(
'get_remote_acl',
Str('server', required=True),
Str('share', required=True),
Str('path', default='\\'),
Str('username', required=True),
Password('password', required=True),
Dict(
'options',
Bool('use_kerberos', default=False),
Str('output_format', enum=['SMB', 'LOCAL'], default='SMB'),
)
)
)
def get_remote_acl(self, data):
"""
Retrieves an ACL from a remote SMB server.
`server` IP Address or hostname of the remote server
`share` Share name
`path` path on the remote SMB server. Use "\" to separate path components
`username` username to use for authentication
`password` password to use for authentication
`use_kerberos` use credentials to get a kerberos ticket for authentication.
AD only.
`output_format` format for resulting ACL data. Choices are either 'SMB',
which will present the information as a Windows SD or 'LOCAL', which formats
the ACL information according local filesystem of the TrueNAS server.
"""
if data['options']['use_kerberos']:
raise CallError("kerberos authentication for this function is not "
"currently supported.", errno.EOPNOTSUP)
sc = subprocess.run([
SMBCmd.SMBCACLS.value,
f'//{data["server"]}/{data["share"]}',
data['path'], '-j', '-U', data['username']],
capture_output=True,
input=data['password'].encode()
)
if sc.returncode != 0:
raise CallError("Failed to retrieve remote SMB server ACL: "
f"{sc.stderr.decode().strip()}")
smb_sd = json.loads(sc.stdout.decode().splitlines()[1])
if data['options']['output_format'] == 'SMB':
return {"acl_type": "SMB", "acl_data": smb_sd}
else:
return self.middleware.call_sync('smb.convert_acl', ACLType.SMB.value, smb_sd)
@private
async def smb_to_nfsv4(self, sd, ignore_errors=False):
acl_out = {"uid": None, "gid": None, "acl": []}
idmaps = await self.middleware.call('idmap.convert_sids', [
entry['trustee']['sid'] for entry in sd['dacl']
])
for x in sd['dacl']:
entry = {'tag': None, 'id': None, 'type': None, 'perms': {}, 'flags': {}}
entry['perms'] = ACLPerms.convert('SMB', x['access_mask']['special'])
entry['flags'] = ACLFlags.convert('SMB', x['flags'])
entry['type'] = "ALLOW" if x['type'] == "ALLOWED" else "DENY"
if x['trustee']['sid'] in ACLPrincipal.sids():
aclp = ACLPrincipal.from_sid(x['trustee']['sid'])
entry['tag'] = aclp.to_nfsv4()
else:
trustee = idmaps['mapped'].get(x['trustee']['sid'])
if trustee is None:
if not ignore_errors:
raise CallError(f"Failed to convert SID [{x['trustee']['sid']}] "
"to ID")
else:
self.logger.debug(f"Failed to convert SID [{x['trustee']['sid']}] "
f"to ID. Dropping entry from ACL: {x}.")
continue
entry['tag'] = "USER" if trustee['id_type'] == "USER" else "GROUP"
entry['id'] = trustee['id']
acl_out['acl'].append(entry)
return {"acl_type": "NFSV4", "acl_data": acl_out}
@private
async def get_trustee(self, unixid, id_type):
out = {"sid": None, "name": None}
if unixid is None:
aclp = ACLPrincipal.from_nfsv4(id_type)
out['sid'] = aclp.to_sid()
out['name'] = aclp.to_smb()
else:
idmaps = await self.middleware.call('idmap.convert_unixids', [{
'id_type': id_type, 'id': unixid
}])
key = f'{"UID" if id_type == "USER" else "GID"}:{id}'
if (entry := idmaps['mapped'].get(key)):
out['sid'] = entry['sid']
out['name'] = entry['name']
return out
@private
async def nfsv4_to_smb(self, nfs4acl, ignore_errors=False):
inherited_present = False
sd_out = {
"revision": 1,
"owner": {"sid": None, "name": None},
"group": {"sid": None, "name": None},
"dacl": [],
"control": {
"Self Relative": True,
"RM Control Valid": False,
"SACL Protected": False,
"DACL Protected": False,
"SACL Auto Inherited": False,
"DACL Auto Inherited": False,
"SACL Inheritance Required": False,
"DACL Inheritance Required": False,
"Server Security": False,
"DACL Trusted": False,
"SACL Defaulted": False,
"SACL Present": False,
"DACL Defaulted": False,
"DACL Present": True,
"Group Defaulted": False,
"Owner Defaulted": False,
}
}
for x in [("owner", "uid", "user"), ("group", "gid", "group")]:
sd_out[x[0]] = await self.get_trustee(nfs4acl[x[1]], x[2].upper())
for ace in nfs4acl["acl"]:
entry = {
"trustee": {"sid": None, "name": None},
"type": None,
"access_mask": {"hex": "", "standard": "", "special": {}, "unknown": "0x00000000"},
"flags": {},
}
entry["trustee"] = await self.get_trustee(ace["id"], ace["tag"])
must_special_convert = entry["trustee"]["sid"] in ["S-1-3-0", "S-1-3-1"]
entry["type"] = "ALLOWED" if ace["type"] == "ALLOW" else "DENIED"
entry["access_mask"]["special"] = ACLPerms.convert("NFSV4", ace["perms"])
if entry["type"] == "ALLOWED":
entry["access_mask"]["special"]["SYNCHRONIZE"] = True
entry["access_mask"]["standard"] = ACLPerms.to_standard(entry["access_mask"]["special"])
entry["access_mask"]["hex"] = ACLPerms.to_hex("SMB", entry["access_mask"]["special"])
entry["flags"] = ACLFlags.convert("NFSV4", ace["flags"])
if entry["flags"]["INHERITED"]:
inherited_present = True
if must_special_convert:
entry["flags"]["INHERIT_ONLY"] = True
dup_entry = entry.copy()
dup_entry["trustee"] = sd_out["owner"].copy() if entry["trustee"]["sid"] == "S-1-3-0" else sd_out["group"].copy()
dup_entry["flags"].update({
"OBJECT_INHERIT": False,
"CONTAINER_INHERIT": False,
"INHERIT_ONLY": False,
"NO_PROPAGATE_INHERIT": False
})
sd_out['dacl'].append(entry)
sd_out['dacl'].append(dup_entry)
else:
sd_out['dacl'].append(entry)
if not inherited_present:
sd_out['control']['DACL Protected'] = True
return {"acl_type": "SMB", "acl_data": sd_out}
@private
async def convert_acl(self, acl_type, data):
aclt = ACLType[acl_type]
if aclt == ACLType.SMB:
return await self.smb_to_nfsv4(data)
else:
return await self.nfsv4_to_smb(data)
| 11,952 | Python | .py | 279 | 31.21147 | 129 | 0.521949 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,726 | constants.py | truenas_middleware/src/middlewared/middlewared/plugins/smb_/constants.py | import enum
from bidict import bidict
from middlewared.utils import MIDDLEWARE_RUN_DIR
NETIF_COMPLETE_SENTINEL = f"{MIDDLEWARE_RUN_DIR}/ix-netif-complete"
CONFIGURED_SENTINEL = '/var/run/samba/.configured'
SMB_AUDIT_DEFAULTS = {'enable': False, 'watch_list': [], 'ignore_list': []}
INVALID_SHARE_NAME_CHARACTERS = {'%', '<', '>', '*', '?', '|', '/', '\\', '+', '=', ';', ':', '"', ',', '[', ']'}
RESERVED_SHARE_NAMES = ('global', 'printers', 'homes')
LOGLEVEL_MAP = bidict({
'0': 'NONE',
'1': 'MINIMUM',
'2': 'NORMAL',
'3': 'FULL',
'10': 'DEBUG',
})
class SMBHAMODE(enum.IntEnum):
"""
'standalone' - Not an HA system.
'legacy' - Two samba instances simultaneously running on active and standby controllers with no shared state.
'unified' - Single set of state files migrating between controllers. Single netbios name.
"""
STANDALONE = 0
UNIFIED = 2
CLUSTERED = 3
class SMBCmd(enum.Enum):
NET = 'net'
PDBEDIT = 'pdbedit'
SHARESEC = 'sharesec'
SMBCACLS = 'smbcacls'
SMBCONTROL = 'smbcontrol'
SMBPASSWD = 'smbpasswd'
STATUS = 'smbstatus'
WBINFO = 'wbinfo'
class SMBEncryption(enum.Enum):
DEFAULT = 'default'
NEGOTIATE = 'if_required'
DESIRED = 'desired'
REQUIRED = 'required'
class SMBBuiltin(enum.Enum):
ADMINISTRATORS = ('builtin_administrators', 'S-1-5-32-544')
GUESTS = ('builtin_guests', 'S-1-5-32-546')
USERS = ('builtin_users', 'S-1-5-32-545')
@property
def nt_name(self):
return self.value[0][8:].capitalize()
@property
def sid(self):
return self.value[1]
@property
def rid(self):
return int(self.value[1].split('-')[-1])
def unix_groups():
return [x.value[0] for x in SMBBuiltin]
def sids():
return [x.value[1] for x in SMBBuiltin]
def by_rid(rid):
for x in SMBBuiltin:
if x.value[1].endswith(str(rid)):
return x
return None
class SMBPath(enum.Enum):
GLOBALCONF = ('/etc/smb4.conf', 0o644, False)
STUBCONF = ('/usr/local/etc/smb4.conf', 0o644, False)
SHARECONF = ('/etc/smb4_share.conf', 0o755, False)
STATEDIR = ('/var/db/system/samba4', 0o755, True)
PRIVATEDIR = ('/var/db/system/samba4/private', 0o700, True)
LEGACYSTATE = ('/root/samba', 0o755, True)
LEGACYPRIVATE = ('/root/samba/private', 0o700, True)
CACHE_DIR = ('/var/run/samba-cache', 0o755, True)
PASSDB_DIR = ('/var/run/samba-cache/private', 0o700, True)
MSG_SOCK = ('/var/db/system/samba4/private/msg.sock', 0o700, False)
RUNDIR = ('/var/run/samba', 0o755, True)
LOCKDIR = ('/var/run/samba-lock', 0o755, True)
LOGDIR = ('/var/log/samba4', 0o755, True)
IPCSHARE = ('/tmp', 0o1777, True)
WINBINDD_PRIVILEGED = ('/var/db/system/samba4/winbindd_privileged', 0o750, True)
def platform(self):
return self.value[0]
def mode(self):
return self.value[1]
def is_dir(self):
return self.value[2]
@property
def path(self):
return self.value[0]
class SMBSharePreset(enum.Enum):
NO_PRESET = {"verbose_name": "No presets", "params": {
'auxsmbconf': '',
}, "cluster": False}
DEFAULT_SHARE = {"verbose_name": "Default share parameters", "params": {
'path_suffix': '',
'home': False,
'ro': False,
'browsable': True,
'timemachine': False,
'recyclebin': False,
'abe': False,
'hostsallow': [],
'hostsdeny': [],
'aapl_name_mangling': False,
'acl': True,
'durablehandle': True,
'shadowcopy': True,
'streams': True,
'fsrvp': False,
'auxsmbconf': '',
}, "cluster": False}
TIMEMACHINE = {"verbose_name": "Basic time machine share", "params": {
'path_suffix': '',
'timemachine': True,
'auxsmbconf': '',
}, "cluster": False}
ENHANCED_TIMEMACHINE = {"verbose_name": "Multi-user time machine", "params": {
'path_suffix': '%U',
'timemachine': True,
'auxsmbconf': '\n'.join([
'zfs_core:zfs_auto_create=true'
])
}, "cluster": False}
MULTI_PROTOCOL_NFS = {"verbose_name": "Multi-protocol (NFSv4/SMB) shares", "params": {
'streams': True,
'durablehandle': False,
'auxsmbconf': '',
}, "cluster": False}
PRIVATE_DATASETS = {"verbose_name": "Private SMB Datasets and Shares", "params": {
'path_suffix': '%U',
'auxsmbconf': '\n'.join([
'zfs_core:zfs_auto_create=true'
])
}, "cluster": False}
WORM_DROPBOX = {"verbose_name": "SMB WORM. Files become readonly via SMB after 5 minutes", "params": {
'path_suffix': '',
'auxsmbconf': '\n'.join([
'worm:grace_period = 300',
])
}, "cluster": False}
| 4,846 | Python | .py | 136 | 29.294118 | 113 | 0.592442 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,727 | util_passdb.py | truenas_middleware/src/middlewared/middlewared/plugins/smb_/util_passdb.py | # Utilities that wrap around samba's passdb.tdb file
#
# test coverage provided by src/middlewared/middlewared/pytest/unit/utils/test_passdb.py
# sample tdb contents (via tdbdump)
#
# {
# key(19) = "INFO/minor_version\00"
# data(4) = "\00\00\00\00"
# }
# {
# key(13) = "RID_00004e66\00"
# data(8) = "smbuser\00"
# }
# {
# key(9) = "NEXT_RID\00"
# data(4) = "\E9\03\00\00"
# }
# {
# key(13) = "USER_smbuser\00"
# data(202) = "\00\00\00\00\7F\A9T|\7F\A9T|\00\00\00\00z\91\C4f\00\00\00\00\7F\A9T|\08\00\00\00smbuser\00\0F\00\00\00TESTMJPYWOO8AG\00\01\00\00\00\00\08\00\00\00smbuser\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\01\00\00\00\00\01\00\00\00\00\01\00\00\00\00\01\00\00\00\00fN\00\00\01\02\00\00\00\00\00\00\10\00\00\00\B3\F3O\F0\FB\B7r\A1\A7\08\10\CB\B32\07@\00\00\00\00\10\00\00\00\A8\00\15\00\00\00 \00\00\00\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\FF\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\EC\04\00\00" # noqa
# }
# {
# key(13) = "INFO/version\00"
# data(4) = "\04\00\00\00"
# }
import enum
import os
from base64 import b64decode, b64encode
from collections.abc import Iterable
from dataclasses import asdict, dataclass
from middlewared.plugins.idmap_.idmap_constants import IDType
from middlewared.service_exception import MatchNotFound
from middlewared.utils import filter_list
from middlewared.utils.sid import db_id_to_rid
from middlewared.utils.tdb import (
get_tdb_handle,
TDBBatchAction,
TDBBatchOperation,
TDBDataType,
TDBHandle,
TDBOptions,
TDBPathType,
)
from struct import pack, unpack
from time import time
from .constants import SMBPath
# Major and minor versions must be written to the passdb.tdb file
# Major version identifies version of struct samu.
MINOR_VERSION_KEY = 'INFO/minor_version'
MINOR_VERSION_VAL = b64encode(pack('<I', 0))
MAJOR_VERSION_KEY = 'INFO/version'
MAJOR_VERSION_VAL = b64encode(pack('<I', 4))
# The following constants are taken from default values
# generated in samu_new() in source3/passdb/passdb.c
DEFAULT_HOURS_LEN = 21
PACKED_HOURS = pack(f'<{"B" * DEFAULT_HOURS_LEN}', *[0xff] * DEFAULT_HOURS_LEN)
UNKNOWN_6 = 0x000004ec # unknown value in samba struct samu
USER_PREFIX = 'USER_'
RID_PREFIX = 'RID_'
PASSDB_TDB_OPTIONS = TDBOptions(TDBPathType.CUSTOM, TDBDataType.BYTES)
PASSDB_PATH = f'{SMBPath.PASSDB_DIR.path}/passdb.tdb'
PASSDB_TIME_T_MAX = 2085923199 # observed in recent samba versions. Should be output of get_time_t_max()
class PassdbMustReinit(Exception):
def __init__(self, reason):
self.errmsg = reason
class UserAccountControl(enum.IntFlag):
"""
from librpc/idl/samr.idl and MS-SAMR 2.2.1.12
account control (acct_ctrl / acct_flags) bits
entries in enum are only ones that may possibly be relevant for local accounts
We may expand as-needed to include more documented flags.
"""
DISABLED = 0x00000001 # User account disabled
NORMAL_ACCOUNT = 0x00000010 # Normal user account
DONT_EXPIRE_PASSWORD = 0x00000200 # User password does not expire
AUTO_LOCKED = 0x00000400 # Account auto locked
PASSWORD_EXPIRED = 0x00020000 # Password expired
@dataclass(frozen=True)
class PDBTimes:
logon: int
logoff: int
kickoff: int
bad_password: int
pass_last_set: int
pass_can_change: int
pass_must_change: int
@dataclass(frozen=True)
class PDBEntry:
"""
Derived from SAMU_BUFFER_FORMAT_V3
NOTE: buffer format v3 and v4 are identical
These are extracted from on-disk passdb.tdb entry
Some fields from the passdb are omitted because we will never allow
changing values (for example unknown_6, lanman password)
"""
username: str # Unix username
nt_username: str # Windows username
domain: str # Windows domain name (netbios name of TrueNAS)
full_name: str # user's full name
comment: str
home_dir: str # home directory
dir_drive: str # home directroy drive string
logon_script: str
profile_path: str
user_rid: int
group_rid: int # Samba's pdbedit defaults to 513 (domain users)
acct_desc: str # User description string
acct_ctrl: int
nt_pw: str # NT password hash
logon_count: int
bad_pw_count: int
times: PDBTimes
def _add_version_info():
""" add version info to new file """
with get_tdb_handle(PASSDB_PATH, PASSDB_TDB_OPTIONS) as hdl:
hdl.store(MINOR_VERSION_KEY, MINOR_VERSION_VAL)
hdl.store(MAJOR_VERSION_KEY, MAJOR_VERSION_VAL)
def _unpack_samba_pascal_string(entry_bytes: bytes, raw: bool = False) -> tuple[str, bytes]:
"""
samba pascal strings have length of string as uint precedening string data
This method unpacks from entry_bytes and returns tuple of string value and
remainaing bytes of data.
"""
entry_len = unpack('<I', entry_bytes[0:4])[0]
entry_bytes = entry_bytes[4:]
if raw:
entry = unpack(f'<{entry_len}s', entry_bytes[0: entry_len])[0]
else:
# the length encoded in pascal string includes null-termination
# strip off extra NULL and decode prior to return
entry = unpack(f'<{entry_len}s', entry_bytes[0: entry_len])[0][:-1].decode()
return (entry, entry_bytes[entry_len:])
def _pack_samba_pascal_string(entry: str | bytes) -> bytes:
""" pack given string / bytes into format expected in TDB file """
if isinstance(entry, str):
entry = entry.encode() + b'\x00'
entry_len = pack('<I', len(entry))
return entry_len + entry
def _unpack_pdb_bytes(entry_bytes: bytes) -> PDBEntry:
""" This method unpacks a SAMU_BUFFER_FORMAT_V3 into PDBEntry object """
# first seven entries are various timestamps encoded as signed 32 bit int
times = PDBTimes(*unpack('<iiiiiii', entry_bytes[0:28]))
entry_bytes = entry_bytes[28:]
# next are a series of pascal strings
username, entry_bytes = _unpack_samba_pascal_string(entry_bytes)
domain, entry_bytes = _unpack_samba_pascal_string(entry_bytes)
nt_username, entry_bytes = _unpack_samba_pascal_string(entry_bytes)
full_name, entry_bytes = _unpack_samba_pascal_string(entry_bytes)
homedir, entry_bytes = _unpack_samba_pascal_string(entry_bytes)
dir_drive, entry_bytes = _unpack_samba_pascal_string(entry_bytes)
logon_script, entry_bytes = _unpack_samba_pascal_string(entry_bytes)
profile_path, entry_bytes = _unpack_samba_pascal_string(entry_bytes)
acct_desc, entry_bytes = _unpack_samba_pascal_string(entry_bytes)
workstations, entry_bytes = _unpack_samba_pascal_string(entry_bytes)
comment, entry_bytes = _unpack_samba_pascal_string(entry_bytes)
munged_dial, entry_bytes = _unpack_samba_pascal_string(entry_bytes)
# next are rid values
user_rid, group_rid = unpack('<II', entry_bytes[0:8])
entry_bytes = entry_bytes[8:]
lm_pw, entry_bytes = _unpack_samba_pascal_string(entry_bytes, True)
nt_pw, entry_bytes = _unpack_samba_pascal_string(entry_bytes, True)
nt_pw_history, entry_bytes = _unpack_samba_pascal_string(entry_bytes, True)
acct_ctrl, logon_divs, hours_len = unpack('<iHi', entry_bytes[0:10])
entry_bytes = entry_bytes[10:]
hours, entry_bytes = _unpack_samba_pascal_string(entry_bytes, True)
bad_pw_cnt, logon_cnt = unpack('<HH', entry_bytes[0:4])
return PDBEntry(
username=username,
nt_username=nt_username,
domain=domain,
full_name=full_name,
comment=comment,
home_dir=homedir,
dir_drive=dir_drive,
logon_script=logon_script,
profile_path=profile_path,
acct_desc=acct_desc,
acct_ctrl=acct_ctrl,
nt_pw=nt_pw.hex().upper(),
user_rid=user_rid,
group_rid=group_rid,
logon_count=logon_cnt,
bad_pw_count=bad_pw_cnt,
times=times
)
def _pack_pdb_entry(entry: PDBEntry) -> bytes:
"""
Pack information in PDBEntry into bytes for TDB insertion
Some values are defaulted to empty strings because we do not
provide a mechanism for setting / maintaining them from middleware
or explicitly do not support the associated feature (such as lanman password)
"""
data = pack(
'<iiiiiii',
entry.times.logon,
entry.times.logoff,
entry.times.kickoff,
entry.times.bad_password,
entry.times.pass_last_set,
entry.times.pass_can_change,
entry.times.pass_must_change,
)
data += _pack_samba_pascal_string(entry.username)
data += _pack_samba_pascal_string(entry.domain)
data += _pack_samba_pascal_string(entry.nt_username)
data += _pack_samba_pascal_string(entry.full_name)
data += _pack_samba_pascal_string(entry.home_dir)
data += _pack_samba_pascal_string(entry.dir_drive)
data += _pack_samba_pascal_string(entry.logon_script)
data += _pack_samba_pascal_string(entry.profile_path)
data += _pack_samba_pascal_string(entry.acct_desc)
data += _pack_samba_pascal_string('') # workstations
data += _pack_samba_pascal_string(entry.comment)
data += _pack_samba_pascal_string('') # munged dial
data += pack('<II', entry.user_rid, entry.group_rid)
data += _pack_samba_pascal_string('') # lanman password
data += _pack_samba_pascal_string(bytes.fromhex(entry.nt_pw))
data += _pack_samba_pascal_string('') # NT password history
data += pack('<IHi', entry.acct_ctrl, 168, DEFAULT_HOURS_LEN)
data += _pack_samba_pascal_string(PACKED_HOURS)
data += pack('<HHi', entry.bad_pw_count, entry.logon_count, UNKNOWN_6)
return data
def _parse_passdb_entry(hdl: TDBHandle, tdb_key: str, tdb_val: str) -> PDBEntry:
"""
Retrieve SAMU data based on passdb RID entry and parse bytes into a PDBEntry
object.
"""
key = f'{USER_PREFIX}{b64decode(tdb_val)[:-1].decode()}'
try:
if (pdb_bytes := hdl.get(f'{USER_PREFIX}{b64decode(tdb_val)[:-1].decode()}')) is None:
# malformed passdb entry. Shouldn't happen we'll return None to force
# rewrite
raise PassdbMustReinit(f'{key}: passdb.tdb lacks expected key')
except MatchNotFound:
raise PassdbMustReinit(f'{key}: passdb.tdb lacks expected key') from None
return _unpack_pdb_bytes(b64decode(pdb_bytes))
def passdb_entries(as_dict: bool = False) -> Iterable[PDBEntry, dict]:
""" Iterate the passdb.tdb file
Each SMB user contains two TDB entries. One that maps a RID value to the username
and the other maps the username to a samu buffer. These are both written simultaneously
under a transaction lock and so we should never be in a situation where they are
inconsistent; however if we are unable to look up a USER entry based on the username
in the RID entry, a PassdbMustReinit exception will be raised so that caller knows
that the file should be rewritten.
Params:
as_dict - return as dictionary
Returns:
SMBGroupMap or SMBGroupMembership
Raises:
PassdbMustReinit - internal inconsistencies in passdb file
"""
if not os.path.exists(PASSDB_PATH):
_add_version_info()
with get_tdb_handle(PASSDB_PATH, PASSDB_TDB_OPTIONS) as hdl:
for entry in hdl.entries():
if not entry['key'].startswith(RID_PREFIX):
continue
parsed = _parse_passdb_entry(hdl, entry['key'], entry['value'])
yield asdict(parsed) if as_dict else parsed
def query_passdb_entries(filters: list, options: dict) -> list[dict]:
""" Query passdb entries with default query-filters and query-options
This provides a convenient query API for passdb entries. Wraps around
passdb_entries() and same failure scenarios apply.
Params:
filters - standard query-filters
options - standard query-options
Returns:
filterable returns with asdict() output of PDBEntry objects
Raises:
PassdbMustReinit - internal inconsistencies in passdb file
"""
try:
return filter_list(passdb_entries(as_dict=True), filters, options)
except FileNotFoundError:
return []
def insert_passdb_entries(entries: list[PDBEntry]) -> None:
""" Insert multiple groupmap entries under a transaction lock
Each PDBEntry requires two TDB insertions and so the entire list
is submitted under a TDB transaction lock, which means that in case
of failure changes are rolled back to the state prior to insertion.
Entries that already exist will be overwritten.
Params:
entries - list of PDBEntry objects to be inserted
Raises:
TypeError - list item isn't PDBEntry object
RuntimeError - TDB library error
"""
if not os.path.exists(PASSDB_PATH):
_add_version_info()
batch_ops = []
for entry in entries:
if not isinstance(entry, PDBEntry):
raise TypeError(f'{type(entry)}: not a PDBEntry')
samu_data = _pack_pdb_entry(entry)
batch_ops.extend([
TDBBatchOperation(
action=TDBBatchAction.SET,
key=f'{USER_PREFIX}{entry.username}',
value=b64encode(samu_data)
),
TDBBatchOperation(
action=TDBBatchAction.SET,
key=f'{RID_PREFIX}{entry.user_rid:08x}',
value=b64encode(entry.username.encode() + b'\x00')
)
])
if len(batch_ops) == 0:
# nothing to do, avoid taking lock
return
with get_tdb_handle(PASSDB_PATH, PASSDB_TDB_OPTIONS) as hdl:
hdl.batch_op(batch_ops)
def delete_passdb_entry(username: str, rid: int) -> None:
""" Delete a passdb entry under a transaction lock """
if not os.path.exists(PASSDB_PATH):
# passdb.tdb doesn't exist so nothing to do
return
with get_tdb_handle(PASSDB_PATH, PASSDB_TDB_OPTIONS) as hdl:
# Do this under transaction lock to force atomicity of changes
try:
hdl.batch_op([
TDBBatchOperation(
action=TDBBatchAction.DEL,
key=f'{USER_PREFIX}{username}'
),
TDBBatchOperation(
action=TDBBatchAction.DEL,
key=f'{RID_PREFIX}{rid:08x}'
)
])
except RuntimeError:
# entries do not exist
pass
def update_passdb_entry(entry: PDBEntry) -> None:
""" Update an existing passdb entry or insert a new one
This method attempts to update an existing passdb entry with info in PDBEntry
object. If any operations related to update fail then rollback to original
status of TDB file is performed.
If entry does not exist, then new one is inserted.
Params:
entry - PDBEntry object with samu data for user
Raises:
TypeError - not a PDBEntry
RuntimeError - TDB library error
"""
if not isinstance(entry, PDBEntry):
raise TypeError(f'{type(entry)}: expected PDBEntry type.')
if not os.path.exists(PASSDB_PATH):
_add_version_info()
with get_tdb_handle(PASSDB_PATH, PASSDB_TDB_OPTIONS) as hdl:
batch_ops = []
try:
current_username = b64decode(hdl.get(f'{RID_PREFIX}{entry.user_rid:08x}'))[:-1].decode()
except MatchNotFound:
pass
else:
if current_username != entry.username:
# name has changed. Make sure we clean up old entry under transaction
# lock
batch_ops.append(
TDBBatchOperation(
action=TDBBatchAction.DEL,
key=f'{USER_PREFIX}{current_username}'
),
)
samu_data = _pack_pdb_entry(entry)
batch_ops.extend([
TDBBatchOperation(
action=TDBBatchAction.SET,
key=f'{USER_PREFIX}{entry.username}',
value=b64encode(samu_data)
),
TDBBatchOperation(
action=TDBBatchAction.SET,
key=f'{RID_PREFIX}{entry.user_rid:08x}',
value=b64encode(entry.username.encode() + b'\x00')
)
])
hdl.batch_op(batch_ops)
def user_entry_to_uac_flags(user_entry) -> UserAccountControl:
""" helper function to convert user entry account flags to MS-SAMU User Account Control flags """
flags_out = UserAccountControl.NORMAL_ACCOUNT
if user_entry['locked']:
flags_out |= UserAccountControl.AUTO_LOCKED
if user_entry['password_disabled']:
flags_out |= UserAccountControl.DISABLED
return flags_out
def user_smbhash_to_nt_pw(username, smbhash) -> str:
""" helper function to get the NT hash from `smbhash` data """
if not smbhash:
raise ValueError(f'{username}: no SMB hash available for user')
if ':' in smbhash:
# we may have a legacy entry in smbpasswd format
smbhash = smbhash.split(':')[3]
return smbhash
def user_entry_to_passdb_entry(
netbiosname: str,
user_entry: dict,
existing_entry: dict = None
) -> PDBEntry:
""" Create an updated PDBEntry based on user-provided specifications
This helper function creates a PDBEntry for later use in passdb insertion call. The
intended use is for cases where struct SAMU encodes information that we may wish to
preserve but are unable to due to not having corresponding fields in our middleware
user entries.
"""
if not user_entry['smb']:
raise ValueError(f'{user_entry["username"]}: not an SMB user')
if not user_entry['smbhash']:
raise ValueError(f'{user_entry["username"]}: SMB hash not available')
pdb_times = PDBTimes(
logon=0,
logoff=PASSDB_TIME_T_MAX,
kickoff=PASSDB_TIME_T_MAX,
bad_password=0,
pass_last_set=int(time()),
pass_can_change=0,
pass_must_change=PASSDB_TIME_T_MAX
)
pdb_dict = {
'username': user_entry['username'],
'nt_username': '',
'domain': netbiosname.upper(),
'full_name': user_entry['full_name'],
'comment': '',
'home_dir': '',
'dir_drive': '',
'logon_script': '',
'profile_path': '',
'user_rid': db_id_to_rid(IDType.USER, user_entry['id']),
'group_rid': 513, # samba default -- domain users rid
'acct_desc': '',
'acct_ctrl': user_entry_to_uac_flags(user_entry),
'nt_pw': user_smbhash_to_nt_pw(user_entry['username'], user_entry['smbhash']),
'logon_count': 0,
'bad_pw_count': 0,
'times': pdb_times
}
if existing_entry:
# preserve existing times:
pdb_dict['times'] = PDBTimes(**existing_entry['times'])
# preserve counters
pdb_dict['logon_count'] = existing_entry['logon_count']
pdb_dict['bad_pw_count'] = existing_entry['bad_pw_count']
return PDBEntry(**pdb_dict)
| 18,939 | Python | .py | 444 | 35.851351 | 544 | 0.664184 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,728 | registry_share.py | truenas_middleware/src/middlewared/middlewared/plugins/smb_/registry_share.py | from middlewared.service import private, Service
from middlewared.service_exception import CallError
from middlewared.plugins.smb import SMBCmd, SMBHAMODE
from middlewared.plugins.smb_.smbconf.reg_service import ShareSchema
from .utils import smb_strip_comments
from .util_net_conf import (
reg_setparm,
reg_delparm,
reg_addshare,
reg_listshares,
reg_showshare,
)
import errno
import json
import os
CONF_JSON_VERSION = {"major": 0, "minor": 1}
class SharingSMBService(Service):
class Config:
namespace = 'sharing.smb'
@private
def reg_addshare(self, data):
"""
wrapper around net_conf method
"""
conf = self.share_to_smbconf(data)
name = 'homes' if data['home'] else data['name']
reg_addshare(name, conf)
@private
def reg_listshares(self):
"""
Wrapper primarily used by CI to validate list of shares
"""
return reg_listshares()
@private
def get_global_params(self, globalconf):
if globalconf is None:
globalconf = {}
gl = {
'fruit_enabled': globalconf.get('fruit_enabled', None),
'ad_enabled': globalconf.get('ad_enabled', None),
'nfs_exports': globalconf.get('nfs_exports', None),
'smb_shares': globalconf.get('smb_shares', None)
}
if gl['nfs_exports'] is None:
gl['nfs_exports'] = self.middleware.call_sync('sharing.nfs.query', [['enabled', '=', True]])
if gl['smb_shares'] is None:
gl['smb_shares'] = self.middleware.call_sync('sharing.smb.query', [['enabled', '=', True]])
for share in gl['smb_shares']:
share['auxsmbconf'] = smb_strip_comments(share['auxsmbconf'])
if gl['ad_enabled'] is None:
gl['ad_enabled'] = self.middleware.call_sync('activedirectory.config')['enable']
if gl['fruit_enabled'] is None:
smbconf = self.middleware.call_sync('smb.config')
gl['fruit_enabled'] = smbconf['aapl_extensions']
return gl
@private
def diff_middleware_and_registry(self, share, data):
if share is None:
raise CallError('Share name must be specified.')
if data is None:
data = self.middleware.call_sync('sharing.smb.query', [('name', '=', share)], {'get': True})
data['auxsmbconf'] = smb_strip_comments(data['auxsmbconf'])
share_conf = self.share_to_smbconf(data)
try:
reg_conf = reg_showshare(share if not data['home'] else 'homes')['parameters']
except Exception:
return None
s_keys = set(share_conf.keys())
r_keys = set(reg_conf.keys())
intersect = s_keys.intersection(r_keys)
return {
'added': {x: share_conf[x] for x in s_keys - r_keys},
'removed': {x: reg_conf[x] for x in r_keys - s_keys},
'modified': {x: share_conf[x] for x in intersect if share_conf[x] != reg_conf[x]},
}
@private
def apply_conf_diff(self, share, diff):
set_payload = {"service": share, "parameters": diff["added"] | diff["modified"]}
del_payload = {"service": share, "parameters": diff["removed"]}
if set_payload["parameters"]:
reg_setparm(set_payload)
if del_payload["parameters"]:
reg_delparm(del_payload)
return
@private
def create_domain_paths(self, path):
if not path:
return
for dom in self.middleware.call_sync('smb.domain_choices'):
if dom == 'BUILTIN':
continue
try:
os.mkdir(os.path.join(path, dom))
except FileExistsError:
pass
@private
def share_to_smbconf(self, conf_in, globalconf=None):
data = conf_in.copy()
gl = self.get_global_params(globalconf)
data['auxsmbconf'] = smb_strip_comments(data['auxsmbconf'])
conf = {}
if not data['path_suffix'] and data['home']:
"""
Homes shares must have some macro expansion (to avoid giving users same
homedir) unless path is omitted for share.
Omitting path is special configuration that shares out every user's
home directory (regardless of path).
"""
if gl['ad_enabled']:
data['path_suffix'] = '%D/%U'
self.create_domain_paths(conf_in['path'])
elif data['path']:
data['path_suffix'] = '%U'
ss = ShareSchema(self.middleware)
ss.convert_schema_to_registry(data, conf)
return conf
| 4,695 | Python | .py | 116 | 30.974138 | 104 | 0.58958 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,729 | disable_acl_if_trivial.py | truenas_middleware/src/middlewared/middlewared/plugins/smb_/disable_acl_if_trivial.py | import contextlib
import os
import textwrap
from middlewared.service import private, Service
class SMBService(Service):
class Config:
service = 'cifs'
service_verb = 'restart'
@private
async def disable_acl_if_trivial(self):
share_ids = await self.middleware.call("keyvalue.get", "smb_disable_acl_if_trivial", [])
if not share_ids:
return
share_ids = set(share_ids)
for share in await self.middleware.call("sharing.smb.query", [("locked", "=", False), ("enabled", "=", True)]):
if share["id"] not in share_ids:
continue
try:
acl_is_trivial = await self.middleware.call("filesystem.acl_is_trivial", share["path"])
except Exception:
self.middleware.logger.warning("Error running filesystem.acl_is_trivial for share %r", share["id"],
exc_info=True)
continue
if acl_is_trivial:
self.middleware.logger.info("ACL is trivial for migrated AFP share %r, disabling ACL", share["id"])
await self.middleware.call(
"datastore.update", "sharing.cifs_share", share["id"], {"cifs_acl": False},
)
else:
self.middleware.logger.info("ACL is not trivial for migrated AFP share %r, not disabling ACL",
share["id"])
share_ids.discard(share["id"])
if share_ids:
await self.middleware.call("keyvalue.set", "smb_disable_acl_if_trivial", list(share_ids))
else:
await self.middleware.call("keyvalue.delete", "smb_disable_acl_if_trivial")
| 1,743 | Python | .py | 36 | 35.416667 | 119 | 0.572438 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,730 | util_groupmap.py | truenas_middleware/src/middlewared/middlewared/plugins/smb_/util_groupmap.py | # Utilities that wrap around samba's group_mapping.tdb file
#
# test coverage provided by src/middlewared/middlewared/pytest/unit/utils/test_groupmap.py
# sample tdb contents (via tdbdump)
#
# {
# key(23) = "UNIXGROUP/S-1-5-32-546\00"
# data(16) = "\83J]\05\04\00\00\00Guests\00\00"
# }
# {
# key(58) = "UNIXGROUP/S-1-5-21-1137207236-3870220311-645177593-200042\00"
# data(40) = "\B8\03\00\00\04\00\00\00truenas_sharing_administrators\00\00"
# }
# {
# key(55) = "UNIXGROUP/S-1-5-21-1137207236-3870220311-645177593-512\00"
# data(32) = " \02\00\00\04\00\00\00builtin_administrators\00\00"
# }
# {
# key(58) = "UNIXGROUP/S-1-5-21-1137207236-3870220311-645177593-200090\00"
# data(23) = "!\02\00\00\04\00\00\00builtin_users\00\00"
# }
# {
# key(55) = "UNIXGROUP/S-1-5-21-1137207236-3870220311-645177593-514\00"
# data(24) = "\22\02\00\00\04\00\00\00builtin_guests\00\00"
# }
# {
# key(58) = "UNIXGROUP/S-1-5-21-1137207236-3870220311-645177593-200041\00"
# data(41) = "\B7\03\00\00\04\00\00\00truenas_readonly_administrators\00\00"
# }
# {
# key(54) = "MEMBEROF/S-1-5-21-1137207236-3870220311-645177593-512\00"
# data(13) = "S-1-5-32-544\00"
# }
# {
# key(23) = "UNIXGROUP/S-1-5-32-544\00"
# data(24) = "\81J]\05\04\00\00\00Administrators\00\00"
# }
# {
# key(57) = "MEMBEROF/S-1-5-21-1137207236-3870220311-645177593-200090\00"
# data(13) = "S-1-5-32-545\00"
# }
# {
# key(54) = "MEMBEROF/S-1-5-21-1137207236-3870220311-645177593-514\00"
# data(13) = "S-1-5-32-546\00"
# }
# {
# key(23) = "UNIXGROUP/S-1-5-32-545\00"
# data(15) = "\82J]\05\04\00\00\00Users\00\00"
# }
import enum
from base64 import b64decode, b64encode
from collections.abc import Iterable
from dataclasses import asdict, dataclass
from middlewared.plugins.system_dataset.utils import SYSDATASET_PATH
from middlewared.utils import filter_list
from middlewared.utils.sid import (
lsa_sidtype
)
from middlewared.utils.tdb import (
get_tdb_handle,
TDBBatchAction,
TDBBatchOperation,
TDBDataType,
TDBOptions,
TDBPathType,
)
from socket import htonl, ntohl
UNIX_GROUP_KEY_PREFIX = 'UNIXGROUP/'
MEMBEROF_PREFIX = 'MEMBEROF/'
GROUP_MAPPING_TDB_OPTIONS = TDBOptions(TDBPathType.CUSTOM, TDBDataType.BYTES)
class GroupmapEntryType(enum.Enum):
GROUP_MAPPING = enum.auto() # conventional group mapping entry
MEMBERSHIP = enum.auto() # foreign alias member
class GroupmapFile(enum.Enum):
DEFAULT = f'{SYSDATASET_PATH}/samba4/group_mapping.tdb'
REJECT = f'{SYSDATASET_PATH}/samba4/group_mapping_rejects.tdb'
@dataclass(frozen=True)
class SMBGroupMap:
sid: str
gid: int
sid_type: lsa_sidtype
name: str
comment: str
@dataclass(frozen=True)
class SMBGroupMembership:
sid: str
groups: tuple[str]
def _parse_unixgroup(tdb_key: str, tdb_val: str) -> SMBGroupMap:
"""
parsing function to convert TDB key/value pair into SMBGroupMap
Sample TDB key:
"UNIXGROUP/S-1-5-21-1137207236-3870220311-645177593-200042\00"
Sample TDB value:
"\\B8\03\00\00\04\00\00\00truenas_sharing_administrators\00\00"
first four bytes are gid, second four are sid type,
remainder are two null-terminated strings.
Returns a SMBGroupMap object in which `sid` attribute is populated with
value from key and remaining attributes are populated from
the TDB value.
"""
sid = tdb_key[len(UNIX_GROUP_KEY_PREFIX):]
data = b64decode(tdb_val)
# unix groups are written into tdb file via tdb_pack
gid = htonl(int.from_bytes(data[0:4]))
sid_type = lsa_sidtype(htonl(int.from_bytes(data[4:8])))
# remaining bytes are two null-terminated strings
bname, bcomment = data[8:-1].split(b'\x00')
return SMBGroupMap(sid, gid, sid_type, bname.decode(), bcomment.decode())
def _parse_memberof(tdb_key: str, tdb_val: str) -> SMBGroupMembership:
"""
parsing function to convert TDB key/value pair into SMBGroupMembership
Sample TDB key:
"MEMBEROF/S-1-5-21-1137207236-3870220311-645177593-512\00"
Sample TDB value:
"S-1-5-32-544 S-1-5-32-545\00"
TDB value is space-delimited list of alias SIDS of which the SID
specified in the TDB key is a member of.
Returns SMBGroupMembership object in which the `sid` attribute is set
based on the TDB key and the `groups` attribute is a tuple of the sids
specified in TDB value (groups of which _this_ sid is a member of).
"""
sid = tdb_key[len(MEMBEROF_PREFIX):]
data = b64decode(tdb_val)
groups = tuple(data[:-1].decode().split())
return SMBGroupMembership(sid, groups)
def _groupmap_to_tdb_key_val(group_map: SMBGroupMap) -> tuple[str, str]:
""" convert a SMBGroupMap object to TDB key-value pair for insertion into TDB file """
tdb_key = f'{UNIX_GROUP_KEY_PREFIX}{group_map.sid}'
gid = ntohl(group_map.gid).to_bytes(4)
sid_type = ntohl(group_map.sid_type).to_bytes(4)
name = group_map.name.encode()
comment = group_map.comment.encode()
data = gid + sid_type + name + b'\x00' + comment + b'\x00'
return (tdb_key, b64encode(data))
def _groupmem_to_tdb_key_val(group_mem: SMBGroupMembership) -> tuple[str, str]:
""" convert a SMBGroupMembership object to TDB key-value pair for insertion into TDB file """
tdb_key = f'{MEMBEROF_PREFIX}{group_mem.sid}'
data = ' '.join(set(group_mem.groups)).encode() + b'\x00'
return (tdb_key, b64encode(data))
def groupmap_entries(
groupmap_file: GroupmapFile,
as_dict: bool = False
) -> Iterable[SMBGroupMap, SMBGroupMembership, dict]:
""" iterate the specified group_mapping.tdb file
Params:
as_dict - return as dictionary
Returns:
SMBGroupMap or SMBGroupMembership
Raises:
RuntimeError
FileNotFoundError
"""
if not isinstance(groupmap_file, GroupmapFile):
raise TypeError(f'{type(groupmap_file)}: expected GroupmapFile type.')
with get_tdb_handle(groupmap_file.value, GROUP_MAPPING_TDB_OPTIONS) as hdl:
for entry in hdl.entries():
if entry['key'].startswith(UNIX_GROUP_KEY_PREFIX):
parser_fn = _parse_unixgroup
entry_type = GroupmapEntryType.GROUP_MAPPING.name
elif entry['key'].startswith(MEMBEROF_PREFIX):
parser_fn = _parse_memberof
entry_type = GroupmapEntryType.MEMBERSHIP.name
else:
continue
if as_dict:
yield {'entry_type': entry_type} | asdict(parser_fn(entry['key'], entry['value']))
else:
yield parser_fn(entry['key'], entry['value'])
def query_groupmap_entries(groupmap_file: GroupmapFile, filters: list, options: dict) -> list[dict]:
try:
return filter_list(groupmap_entries(groupmap_file, as_dict=True), filters, options)
except FileNotFoundError:
return []
def insert_groupmap_entries(
groupmap_file: GroupmapFile,
entries: list[SMBGroupMap | SMBGroupMembership]
) -> None:
""" Insert multiple groupmap entries under a transaction lock """
batch_ops = []
for entry in entries:
if isinstance(entry, SMBGroupMap):
tdb_key, tdb_val = _groupmap_to_tdb_key_val(entry)
elif isinstance(entry, SMBGroupMembership):
tdb_key, tdb_val = _groupmem_to_tdb_key_val(entry)
else:
raise TypeError(f'{type(entry)}: unexpected group_mapping.tdb entry type')
batch_ops.append(TDBBatchOperation(action=TDBBatchAction.SET, key=tdb_key, value=tdb_val))
if len(batch_ops) == 0:
# nothing to do, avoid taking lock
return
with get_tdb_handle(groupmap_file.value, GROUP_MAPPING_TDB_OPTIONS) as hdl:
hdl.batch_op(batch_ops)
def delete_groupmap_entry(
groupmap_file: GroupmapFile,
entry_type: GroupmapEntryType,
entry_sid: str
):
if not isinstance(groupmap_file, GroupmapFile):
raise TypeError(f'{type(groupmap_file)}: expected GroupmapFile type.')
if not isinstance(entry_type, GroupmapEntryType):
raise TypeError(f'{type(entry_type)}: expected GroumapEntryType.')
match entry_type:
case GroupmapEntryType.GROUP_MAPPING:
tdb_key = f'{UNIX_GROUP_KEY_PREFIX}{entry_sid}'
case GroupmapEntryType.MEMBERSHIP:
tdb_key = f'{MEMBEROF_PREFIX}{entry_sid}'
case _:
raise TypeError(f'{entry_type}: unexpected GroumapEntryType.')
with get_tdb_handle(groupmap_file.value, GROUP_MAPPING_TDB_OPTIONS) as hdl:
hdl.delete(tdb_key)
def list_foreign_group_memberships(
groupmap_file: GroupmapFile,
alias_sid: str
) -> list[str]:
"""
This performs equivalent of `net groupmap listsmem <sid>`. TDB entries associate
a SID with a list of groups of which it is a member. This function does the reverse
lookup by finding which groups are members of a given SID, and returns a list of
SIDs.
"""
if not isinstance(groupmap_file, GroupmapFile):
raise TypeError(f'{type(groupmap_file)}: expected GroupmapFile type.')
return [
entry['sid'] for entry in query_groupmap_entries(groupmap_file, [
['entry_type', '=', GroupmapEntryType.MEMBERSHIP.name],
['groups', 'rin', alias_sid]
], {})
]
| 9,231 | Python | .py | 228 | 35.451754 | 100 | 0.68984 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,731 | sid.py | truenas_middleware/src/middlewared/middlewared/plugins/smb_/sid.py | import subprocess
from middlewared.service import Service, private
from middlewared.service_exception import CallError
from middlewared.utils.functools_ import cache
from middlewared.utils.sid import random_sid
from .constants import SMBCmd
class SMBService(Service):
class Config:
service = 'cifs'
service_verb = 'restart'
@cache
@private
def local_server_sid(self):
if (db_sid := self.middleware.call_sync('smb.config')['cifs_SID']):
return db_sid
new_sid = random_sid()
self.middleware.call_sync('datastore.update', 'services.cifs', 1, {'cifs_SID': new_sid})
return new_sid
@private
def set_system_sid(self):
server_sid = self.local_server_sid()
setsid = subprocess.run([
SMBCmd.NET.value, '-d', '0',
'setlocalsid', server_sid,
], capture_output=True, check=False)
if setsid.returncode != 0:
raise CallError(f'setlocalsid failed: {setsid.stderr.decode()}')
| 1,021 | Python | .py | 27 | 30.851852 | 96 | 0.661929 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,732 | status.py | truenas_middleware/src/middlewared/middlewared/plugins/smb_/status.py | from middlewared.schema import Bool, Dict, Ref, Str
from middlewared.service import Service, accepts
from middlewared.plugins.smb import SMBCmd
from middlewared.service_exception import CallError
from middlewared.utils import filter_list
import enum
import json
import subprocess
import time
class InfoLevel(enum.Enum):
AUTH_LOG = 'l'
ALL = ''
SESSIONS = 'p'
SHARES = 'S'
LOCKS = 'L'
BYTERANGE = 'B'
NOTIFICATIONS = 'N'
class SMBService(Service):
class Config:
service = 'cifs'
service_verb = 'restart'
@accepts(
Str('info_level', enum=[x.name for x in InfoLevel], default=InfoLevel.ALL.name),
Ref('query-filters'),
Ref('query-options'),
Dict(
'status_options',
Bool('verbose', default=True),
Bool('fast', default=False),
Str('restrict_user', default=''),
Str('restrict_session', default=''),
Bool('resolve_uids', default=True),
), roles=['SHARING_SMB_WRITE', 'READONLY_ADMIN']
)
def status(self, info_level, filters, options, status_options):
"""
Returns SMB server status (sessions, open files, locks, notifications).
`info_level` type of information requests. Defaults to ALL.
`status_options` additional options to filter query results. Supported
values are as follows: `verbose` gives more verbose status output
`fast` causes smbstatus to not check if the status data is valid by
checking if the processes that the status data refer to all still
exist. This speeds up execution on busy systems and clusters but
might display stale data of processes that died without cleaning up
properly. `restrict_user` specifies the limits results to the specified
user.
This API endpoint also supports a legacy `info_level` AUTH_LOG that
provides AUTHENTICATION events from the SMB audit log. Support for
this information level will be removed in a future version.
"""
lvl = InfoLevel[info_level]
if lvl == InfoLevel.AUTH_LOG:
return self.middleware.call_sync('audit.query', {
'services': ['SMB'],
'query-filters': filters + [['event', '=', 'AUTHENTICATION']],
'query-options': options
})
"""
Apply some optimizations for case where filter is only asking
for a specific uid or session id.
"""
if len(filters) == 1:
to_check = filters[0][:2]
if to_check == ["uid", "="]:
status_options['restrict_user'] = str(filters[0][2])
filters = []
elif to_check == ["session_id", "="]:
status_options['restrict_session'] = str(filters[0][2])
filters = []
flags = '-j'
flags = flags + lvl.value
flags = flags + 'v' if status_options['verbose'] else flags
flags = flags + 'f' if status_options['fast'] else flags
statuscmd = [SMBCmd.STATUS.value, '-d' '0', flags]
if status_options['restrict_user']:
statuscmd.extend(['-U', status_options['restrict_user']])
if status_options['restrict_session']:
statuscmd.extend(['-s', status_options['restrict_session']])
if status_options['resolve_uids']:
statuscmd.append('--resolve-uids')
smbstatus = subprocess.run(statuscmd, capture_output=True)
if smbstatus.returncode != 0:
raise CallError(f'Failed to retrieve SMB status: {smbstatus.stderr.decode().strip()}')
json_status = json.loads(smbstatus.stdout.decode() or '{"sessions": {}}')
if lvl == InfoLevel.SESSIONS:
to_filter = list(json_status.get("sessions", {}).values())
elif lvl == InfoLevel.LOCKS:
to_filter = list(json_status.get("open_files", {}).values())
elif lvl == InfoLevel.BYTERANGE:
to_filter = list(json_status.get("byte_range_locks", {}).values())
elif lvl == InfoLevel.NOTIFICATIONS:
to_filter = list(json_status.get("notifies", {}).values())
elif lvl == InfoLevel.SHARES:
to_filter = list(json_status.get("tcons", {}).values())
else:
for tcon in json_status.get("tcons", {}).values():
if not (session := json_status['sessions'].get(tcon['session_id'])):
continue
if not session.get('share_connections'):
session['share_connections'] = [tcon]
else:
session['share_connections'].append(tcon)
to_filter = list(json_status['sessions'].values())
return filter_list(to_filter, filters, options)
@accepts(roles=['SHARING_SMB_READ'])
def client_count(self):
"""
Return currently connected clients count.
"""
return self.status("SESSIONS", [], {"count": True}, {"fast": True})
| 5,031 | Python | .py | 110 | 35.881818 | 98 | 0.602944 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,733 | utils.py | truenas_middleware/src/middlewared/middlewared/plugins/smb_/utils.py | from .constants import SMBSharePreset
from secrets import randbits
def random_sid():
""" See MS-DTYP 2.4.2 SID """
subauth_1 = randbits(32)
subauth_2 = randbits(32)
subauth_3 = randbits(32)
return f'S-1-5-21-{subauth_1}-{subauth_2}-{subauth_3}'
def smb_strip_comments(auxparam_in):
"""
Strips out all comments from auxiliary parameters and returns
as new-line separated list.
"""
parsed_config = ""
for entry in auxparam_in.splitlines():
entry = entry.strip()
if entry == "" or entry.startswith(('#', ';')):
continue
# For some reason user may have added more comments after the value
# For example "socket options = IPTOS_LOWDELAY # I read about this on the internet"
entry = entry.split("#")[0].strip()
parsed_config += entry if len(parsed_config) == 0 else f'\n{entry}'
return parsed_config
def auxsmbconf_dict(aux, direction="TO"):
"""
Auxiliary parameters may be converted freely between key-value form and
concatenated strings. This method either goes from string `TO` a dict or
`FROM` a dict into a string.
"""
match direction:
case 'TO':
if not isinstance(aux, str):
raise ValueError(f'{type(aux)}: wrong input type. Expected str.')
ret = {}
stripped = smb_strip_comments(aux)
for entry in stripped.splitlines():
kv = entry.split('=', 1)
ret[kv[0].strip()] = kv[1].strip()
return ret
case 'FROM':
if not isinstance(aux, dict):
raise ValueError(f'{type(aux)}: wrong input type. Expected dict.')
return '\n'.join([f'{k}={v}' if v is not None else k for k, v in aux.items()])
case _:
raise ValueError(f'{direction}: unexpected conversion direction')
def apply_presets(data_in):
"""
Apply settings from presets. Only include auxiliary parameters
from preset if user-defined aux parameters already exist. In this
case user-defined takes precedence.
"""
data = data_in.copy()
params = (SMBSharePreset[data["purpose"]].value)["params"].copy()
if data.get('home'):
params.pop('path_suffix', None)
aux = params.pop("auxsmbconf")
data.update(params)
if data["auxsmbconf"]:
preset_aux = auxsmbconf_dict(aux, direction="TO")
data_aux = auxsmbconf_dict(data["auxsmbconf"], direction="TO")
preset_aux.update(data_aux)
data["auxsmbconf"] = auxsmbconf_dict(preset_aux, direction="FROM")
return data
def is_time_machine_share(share):
return share.get('timemachine', False) or share.get('purpose') in [SMBSharePreset.TIMEMACHINE.name, SMBSharePreset.ENHANCED_TIMEMACHINE.name]
| 2,794 | Python | .py | 65 | 35.415385 | 145 | 0.6369 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,734 | groupmap.py | truenas_middleware/src/middlewared/middlewared/plugins/smb_/groupmap.py | import os
import tdb
import struct
from middlewared.service import Service, job, private
from middlewared.service_exception import CallError
from middlewared.utils.directoryservices.constants import DSStatus, DSType
from middlewared.utils.sid import (
db_id_to_rid,
get_domain_rid,
lsa_sidtype,
sid_is_valid,
BASE_RID_USER,
DomainRid
)
from middlewared.utils.tdb import (
get_tdb_handle,
TDBDataType,
TDBPathType,
TDBOptions,
)
from middlewared.plugins.idmap_.idmap_constants import IDType
from middlewared.plugins.smb_.constants import SMBBuiltin, SMBPath
from middlewared.plugins.smb_.util_groupmap import (
delete_groupmap_entry,
insert_groupmap_entries,
list_foreign_group_memberships,
query_groupmap_entries,
GroupmapFile,
GroupmapEntryType,
SMBGroupMap,
SMBGroupMembership,
)
WINBINDD_AUTO_ALLOCATED = ('S-1-5-32-544', 'S-1-5-32-545', 'S-1-5-32-546')
WINBINDD_WELL_KNOWN_PADDING = 100
WINBIND_IDMAP_CACHE = f'{SMBPath.CACHE_DIR.platform()}/winbindd_cache.tdb'
WINBIND_IDMAP_TDB_OPTIONS = TDBOptions(TDBPathType.CUSTOM, TDBDataType.BYTES)
def clear_winbind_idmap_cache():
with get_tdb_handle(WINBIND_IDMAP_CACHE, WINBIND_IDMAP_TDB_OPTIONS) as hdl:
return hdl.clear()
class SMBService(Service):
class Config:
service = 'cifs'
service_verb = 'restart'
@private
def add_groupmap(self, group):
server_sid = self.middleware.call_sync('smb.local_server_sid')
rid = db_id_to_rid(IDType.GROUP, group['id'])
entry = SMBGroupMap(
sid=f'{server_sid}-{rid}',
gid=group['gid'],
sid_type=lsa_sidtype.ALIAS,
name=group['group'],
comment=''
)
insert_groupmap_entries(GroupmapFile.DEFAULT, [entry])
@private
def del_groupmap(self, db_id):
server_sid = self.middleware.call_sync('smb.local_server_sid')
rid = db_id_to_rid(IDType.GROUP, db_id)
delete_groupmap_entry(
GroupmapFile.DEFAULT,
GroupmapEntryType.GROUP_MAPPING,
entry_sid=f'{server_sid}-{rid}',
)
@private
def sync_foreign_groups(self):
"""
Domain Users, and Domain Admins must have S-1-5-32-545 and S-1-5-32-544
added to their respective Unix tokens for correct behavior in AD domain.
These are added by making them foreign members in the group_mapping for
the repsective alias. This membership is generated during samba startup
when newly creating these groups (if they don't exist), but can get
lost, resulting in unexpected / erratic permissions behavior.
"""
# fresh groupmap listing is to ensure we have accurate / current info.
groupmap = self.groupmap_list()
localsid = groupmap['localsid']
entries = [
SMBGroupMembership(
sid=f'{localsid}-{DomainRid.ADMINS}',
groups=(SMBBuiltin.ADMINISTRATORS.sid,)
),
SMBGroupMembership(
sid=f'{localsid}-{DomainRid.GUESTS}',
groups=(SMBBuiltin.GUESTS.sid,)
),
SMBGroupMembership(
sid=groupmap['local'][SMBBuiltin.USERS.rid]['sid'],
groups=(SMBBuiltin.USERS.sid,)
),
]
# We keep separate list of what members we expect for these groups
admins = [f'{localsid}-{DomainRid.ADMINS}']
guests = [f'{localsid}-{DomainRid.GUESTS}']
# Samba has special behavior if DomainRid.USERS is set for local domain
# and so we map the builtin_users account to a normal sid then make it
# a member of S-1-5-32-545
users = [groupmap['local'][SMBBuiltin.USERS.rid]['sid']]
if (admin_group := self.middleware.call_sync('smb.config')['admin_group']):
if (found := self.middleware.call_sync('group.query', [('group', '=', admin_group)])):
entries.append(SMBGroupMembership(
sid=found[0]['sid'],
groups=(SMBBuiltin.ADMINISTRATORS.sid,)
))
admins.append(found[0]['sid'])
else:
self.logger.warning('%s: SMB admin group does not exist', admin_group)
ds = self.middleware.call_sync('directoryservices.status')
match ds['type']:
case DSType.AD.value:
ad_state = ds['status']
case _:
ad_state = DSStatus.DISABLED.name
if ad_state == DSStatus.HEALTHY.name:
try:
domain_info = self.middleware.call_sync('idmap.domain_info',
'DS_TYPE_ACTIVEDIRECTORY')
domain_sid = domain_info['sid']
# add domain account SIDS
entries.append((SMBGroupMembership(
sid=f'{domain_sid}-{DomainRid.ADMINS}',
groups=(SMBBuiltin.ADMINISTRATORS.sid,)
)))
admins.append(f'{domain_sid}-{DomainRid.ADMINS}')
entries.append((SMBGroupMembership(
sid=f'{domain_sid}-{DomainRid.USERS}',
groups=(SMBBuiltin.USERS.sid,)
)))
users.append(f'{domain_sid}-{DomainRid.USERS}')
entries.append((SMBGroupMembership(
sid=f'{domain_sid}-{DomainRid.GUESTS}',
groups=(SMBBuiltin.GUESTS.sid,)
)))
guests.append(f'{domain_sid}-{DomainRid.GUESTS}')
except Exception:
self.logger.warning('Failed to retrieve idmap domain info', exc_info=True)
insert_groupmap_entries(GroupmapFile.DEFAULT, entries)
# double-check that we have expected memberships now and no extras
unexpected_memberof_entries = query_groupmap_entries(GroupmapFile.DEFAULT, [
['entry_type', '=', GroupmapEntryType.MEMBERSHIP.name],
['sid', 'nin', admins + guests + users]
], {})
for entry in unexpected_memberof_entries:
self.logger.error(
'%s: unexpected account present in group mapping configuration for groups '
'with the following sids %s. This grants the account privileges beyond what '
'would normally be granted by the backend in TrueNAS potentially indicating '
'an underlying security issue. This mapping entry will be automatically '
'removed to restore TrueNAS to its expected configuration.',
entry['sid'], entry['groups']
)
try:
delete_groupmap_entry(
GroupmapFile.DEFAULT,
GroupmapEntryType.MEMBERSHIP,
entry_sid=entry['sid'],
)
except Exception:
self.logger.error('Failed to remove unexpected groupmap entry', exc_info=True)
@private
def initialize_idmap_tdb(self, low_range):
tdb_path = f'{SMBPath.STATEDIR.platform()}/winbindd_idmap.tdb'
tdb_flags = tdb.DEFAULT
open_flags = os.O_CREAT | os.O_RDWR
try:
tdb_handle = tdb.Tdb(tdb_path, 0, tdb_flags, open_flags, 0o644)
except Exception:
self.logger.warning("Failed to create winbindd_idmap.tdb", exc_info=True)
return None
try:
for key, val in [
(b'IDMAP_VERSION\x00', 2),
(b'USER HWM\x00', low_range),
(b'GROUP HWM\x00', low_range)
]:
tdb_handle.store(key, struct.pack("<L", val))
except Exception:
self.logger.warning('Failed to initialize winbindd_idmap.tdb', exc_info=True)
tdb_handle.close()
return None
return tdb_handle
@private
def validate_groupmap_hwm(self, low_range):
"""
Middleware forces allocation of GIDs for Users, Groups, and Administrators
to be deterministic with the default idmap backend. Bump up the idmap_tdb
high-water mark to avoid conflicts with these and remove any mappings that
conflict. Winbindd will regenerate the removed ones as-needed.
"""
def add_key(tdb_handle, gid, sid):
gid_val = f'GID {gid}\x00'.encode()
sid_val = f'{sid}\x00'.encode()
tdb_handle.store(gid_val, sid_val)
tdb_handle.store(sid_val, gid_val)
def remove_key(tdb_handle, key, reverse):
tdb_handle.delete(key)
if reverse:
tdb_handle.delete(reverse)
must_reload = False
len_wb_groups = len(WINBINDD_AUTO_ALLOCATED)
builtins = self.middleware.call_sync('idmap.builtins')
try:
tdb_handle = tdb.open(f"{SMBPath.STATEDIR.platform()}/winbindd_idmap.tdb")
except FileNotFoundError:
tdb_handle = self.initialize_idmap_tdb(low_range)
if not tdb_handle:
return False
try:
tdb_handle.transaction_start()
group_hwm_bytes = tdb_handle.get(b'GROUP HWM\00')
hwm = struct.unpack("<L", group_hwm_bytes)[0]
if hwm < low_range + len_wb_groups + len(builtins):
hwm = low_range + len_wb_groups + len(builtins) + WINBINDD_WELL_KNOWN_PADDING
new_hwm_bytes = struct.pack("<L", hwm)
tdb_handle.store(b'GROUP HWM\00', new_hwm_bytes)
must_reload = True
for key in tdb_handle.keys():
# sample key: b'GID 9000020\x00'
if key[:3] == b'GID' and int(key.decode()[4:-1]) < (low_range + len_wb_groups):
reverse = tdb_handle.get(key)
remove_key(tdb_handle, key, reverse)
must_reload = True
for entry in builtins:
if not entry['set']:
continue
sid_key = f'{entry["sid"]}\x00'.encode()
val = tdb_handle.get(f'{entry["sid"]}\x00'.encode())
if val is None or val.decode() != f'GID {entry["gid"]}\x00':
if sid_key in tdb_handle.keys():
self.logger.debug(
"incorrect sid mapping detected %s -> %s"
"replacing with %s -> %s",
entry['sid'], val.decode()[4:-1] if val else "None",
entry['sid'], entry['gid']
)
remove_key(tdb_handle, f'{entry["sid"]}\x00'.encode(), val)
add_key(tdb_handle, entry['gid'], entry['sid'])
must_reload = True
tdb_handle.transaction_commit()
except Exception as e:
tdb_handle.transaction_cancel()
self.logger.warning("TDB maintenace failed: %s", e)
finally:
tdb_handle.close()
if must_reload:
self.middleware.call_sync('idmap.snapshot_samba4_dataset')
return must_reload
@private
def groupmap_list(self):
"""
Separate out the groupmap output into builtins, locals, and invalid entries.
Invalid entries are ones that aren't from our domain, or are mapped to gid -1.
Latter occurs when group mapping is lost.
"""
rv = {"builtins": {}, "local": {}, "local_builtins": {}}
localsid = self.middleware.call_sync('smb.local_server_sid')
entries_to_fix = []
for g in query_groupmap_entries(GroupmapFile.DEFAULT, [
['entry_type', '=', GroupmapEntryType.GROUP_MAPPING.name]
], {}):
gid = g['gid']
if gid == -1:
# This entry must be omitted because it does not contain a mapping
# to a valid group id.
entries_to_fix.append(g)
continue
if g['sid'].startswith("S-1-5-32"):
key = 'builtins'
elif g['sid'].startswith(localsid) and g['gid'] in (544, 546):
key = 'local_builtins'
elif g['sid'].startswith(localsid):
if int(get_domain_rid(g['sid'])) < BASE_RID_USER:
# This is an entry that is for an existing group account
# but it currently maps to the wrong SID (generated by
# algorithmic base for pdb backend)
entries_to_fix.append(g)
continue
key = 'local'
else:
# There is a groupmap that is not for local machine sid to
# a local group account. This can potentially happen if somehow our
# machine account sid was manually cleared from DB by the end-user.
# Add to our legacy entries so that we can try to map it to the
# proper local account if it is in use in share_info.tdb.
entries_to_fix.append(g)
continue
rv[key][gid] = g
rv['localsid'] = localsid
for entry in entries_to_fix:
# Write the various incorrect or invalid groupmap entries to our rejects
# tdb file so that we can use them for checks against current share ACL.
if entry['gid'] != -1:
gm = SMBGroupMap(
sid=entry['sid'],
gid=entry['gid'],
sid_type=lsa_sidtype.ALIAS,
name=entry['name'],
comment=entry['comment']
)
insert_groupmap_entries(GroupmapFile.REJECT, [gm])
try:
delete_groupmap_entry(
GroupmapFile.DEFAULT,
GroupmapEntryType.GROUP_MAPPING,
entry_sid=entry['sid'],
)
except Exception:
self.logger.debug('Failed to delete invalid entry', exc_info=True)
if entries_to_fix:
self.migrate_share_groupmap()
return rv
@private
def groupmap_listmem(self, sid):
"""
This method returns a list of SIDS that are members of the specified SID.
Samba's group mapping database can contain foreign group mappings for particular SID entries
This provides nesting for groups, and SID membership is evaluated when samba overrides
POSIX permissions for example when a user is a member of the S-1-5-32-544 (BUILTIN\\admininstrators)
Per MS-DTYP certain well-known SIDs / rids must be members of certain builtin groups. For
example, the administrators RID for a domain (remote and local) must be a member of S-1-5-32-544
otherwise domain admins won't have DACL override privileges.
"""
if not sid_is_valid(sid):
raise ValueError(f'{sid}: not a valid SID')
return list_foreign_group_memberships(GroupmapFile.DEFAULT, sid)
@private
def sync_builtins(self, to_add):
"""
builtin groups are automatically allocated by winbindd / idmap_tdb. We want these
mappings to be written deterministically so that if for some horrible reason an
end-users decides to write these GIDs to an ACL entry it is consistent between
TrueNAS servers and persistent across updates.
"""
# Because the beginning range is determined by the range of IDs allocated for BUILTIN
# users we have to request from the samba running configuration
idmap_backend = self.middleware.call_sync("smb.getparm", "idmap config * : backend", "GLOBAL")
idmap_range = self.middleware.call_sync("smb.getparm", "idmap config * : range", "GLOBAL")
if idmap_backend != "tdb":
"""
idmap_autorid and potentially other allocating idmap backends may be used for
the default domain. We do not want to touch how these are allocated.
"""
return False
low_range = int(idmap_range.split("-")[0].strip())
for b in (SMBBuiltin.ADMINISTRATORS, SMBBuiltin.USERS, SMBBuiltin.GUESTS):
offset = b.rid - SMBBuiltin.ADMINISTRATORS.rid
gid = low_range + offset
to_add.append(SMBGroupMap(
sid=b.sid,
gid=gid,
sid_type=lsa_sidtype.ALIAS,
name=b.nt_name,
comment=''
))
return self.validate_groupmap_hwm(low_range)
@private
@job(lock="groupmap_sync", lock_queue_size=1)
def synchronize_group_mappings(self, job, bypass_sentinel_check=False):
"""
This method does the following:
1) ensures that group_mapping.tdb has all required groupmap entries
2) ensures that builtin SIDs S-1-5-32-544, S-1-5-32-545, and S-1-5-32-546
exist and are deterministically mapped to expected GIDs
3) ensures that all expected foreign aliases for builtin SIDs above exist.
4) flush various caches if required.
"""
entries = []
if (status := self.middleware.call_sync('failover.status')) not in ('SINGLE', 'MASTER'):
self.middleware.logger.debug('%s: skipping groupmap sync due to failover status', status)
return
if not bypass_sentinel_check and not self.middleware.call_sync('smb.is_configured'):
raise CallError(
"SMB server configuration is not complete. "
"This may indicate system dataset setup failure."
)
groupmap = self.groupmap_list()
groups = self.middleware.call_sync('group.query', [('local', '=', True), ('smb', '=', True)])
groups.append(self.middleware.call_sync('group.query', [
('gid', '=', SMBBuiltin.ADMINISTRATORS.rid), ('local', '=', True)
], {'get': True}))
gid_set = {x["gid"] for x in groups}
for group in groups:
entries.append(SMBGroupMap(
sid=group['sid'],
gid=group['gid'],
sid_type=lsa_sidtype.ALIAS,
name=group['group'],
comment=''
))
for entry in groupmap['local'].values():
# delete entries that don't map to a local account
if entry['gid'] in gid_set:
continue
try:
delete_groupmap_entry(
GroupmapFile.DEFAULT,
GroupmapEntryType.GROUP_MAPPING,
sid=entry['sid'],
)
except Exception:
self.logger.warning('%s: failed to remove group mapping', entry['sid'], exc_info=True)
must_remove_cache = self.sync_builtins(entries)
insert_groupmap_entries(GroupmapFile.DEFAULT, entries)
self.sync_foreign_groups()
if must_remove_cache:
clear_winbind_idmap_cache()
try:
self.middleware.call_sync('idmap.gencache.flush')
except Exception:
self.logger.warning('Failed to flush caches after groupmap changes.', exc_info=True)
@private
def migrate_share_groupmap(self):
"""
Samba's share_info.tdb file contains key-value pairs of share, and windows security
descriptor. The share ACL defines access rights for the SMB share itself and is usually
empty / grants full control. Share ACL entries use SIDs exclusively to identify the
user / group.
As part of synchronization of the contents of our groups config with samba's
group_mapping database the contents of the latter are checked for incorrect mappings and
any entries found are written to a groupmap rejects file before removing them from
the group_mapping.tdb.
This method reads the contents of the rejects file and checks whether its entries
correspond with existing local groups. If they do then we iterate the ACL entries for
all existing SMB share ACLs and remap the rejected SID to the correct current value
assigned to the GID.
"""
rejects = {g['sid']: g for g in query_groupmap_entries(GroupmapFile.REJECT, [
['entry_type', '=', GroupmapEntryType.GROUP_MAPPING.name]
], {})}
if not rejects:
# The tdb file is empty, hence nothing to do.
return
set_reject_sids = set(list(rejects.keys()))
# generate dictionary for current valid groups that have SIDs in the rejects list
current = {g['gid']: g['sid'] for g in self.middleware.call_sync('group.query', [
['gid', 'in', [g['gid'] for g in rejects.values()]], ['local', '=', True]
])}
for sid, gm in rejects.copy().items():
rejects[sid]['new_sid'] = current.get(gm['gid'])
for share in self.middleware.call_sync('sharing.smb.query'):
acl = self.middleware.call_sync('smb.sharesec.getacl', share['name'])['share_acl']
acl_sids = set([ace['ae_who_sid'] for ace in acl])
if not set_reject_sids & acl_sids:
# the share ACL does not use any of the affected SIDs.
continue
for idx, ace in enumerate(acl):
if (entry := rejects.get(ace['ae_who_sid'])) is None:
continue
if entry['new_sid'] is None:
self.logger.warning(
'Share ACL entry [%d] references SID [%s] which at one point mapped to '
'a local group with gid [%d] that no longer exists.',
share['name'], ace['ae_who_sid'], entry['gid']
)
# preserve just in case user restores group
continue
ace['ae_who_sid'] = entry['new_sid']
self.logger.debug(
'%s: correcting ACL entry in share for group [%d] from SID %s to %s',
share['name'], entry['gid'], ace['ae_who_sid'], entry['new_sid']
)
self.middleware.call_sync('smb.sharesec.setacl', {'share_name': share['name'], 'share_acl': acl})
| 22,273 | Python | .py | 462 | 35.623377 | 109 | 0.58122 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,735 | registry_base.py | truenas_middleware/src/middlewared/middlewared/plugins/smb_/registry_base.py | class RegObj():
def __init__(self, name, smbconf, default, **kwargs):
self.name = name
self.smbconf = smbconf
self.default = default
self.smbconf_parser = kwargs.get("smbconf_parser", None)
self.schema_parser = kwargs.get("schema_parser", None)
self.middleware = kwargs.get("middleware", None)
class RegistrySchema():
def __init__(self, schema):
self.schema = schema
def _normalize_config(self, conf):
for v in conf.values():
if type(v.get('parsed')) == list:
v['raw'] = ' '.join(v['parsed'])
elif not v.get('raw'):
v['raw'] = str(v['parsed'])
return
def convert_schema_to_registry(self, data_in, data_out):
"""
This function converts the our schema into smb.conf
parameters. Where there is trivial / noncomplex / 1-1
mapping, the parameter gets directly mapped. In
Cases where mapping is complex, a parser function is
supplied for the schema member.
"""
map_ = self.schema_map()
for entry, val in data_in.items():
regobj = map_.get(entry)
if regobj is None:
continue
if regobj.schema_parser is not None:
regobj.schema_parser(regobj, val, data_in, data_out)
continue
if val is None:
data_out[regobj.smbconf] = {"parsed": ""}
data_out[regobj.smbconf] = {"parsed": val}
self._normalize_config(data_out)
return
def schema_map(self):
return {x.name: x for x in self.schema}
def schema_items(self):
return [x.name for x in self.schema]
| 1,721 | Python | .py | 43 | 29.953488 | 68 | 0.573141 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,736 | util_param.py | truenas_middleware/src/middlewared/middlewared/plugins/smb_/util_param.py | import os
import threading
from middlewared.service_exception import CallError, MatchNotFound
try:
from samba.samba3 import param as s3param
except ImportError:
s3param = None
try:
from samba import param
except ImportError:
param = None
from .constants import SMBPath
from .util_net_conf import reg_getparm
LP_CTX = s3param.get_context()
LP_CTX_LOCK = threading.Lock()
def smbconf_getparm_lpctx(parm):
with LP_CTX_LOCK:
LP_CTX.load(SMBPath.GLOBALCONF.platform())
return LP_CTX.get(parm)
def smbconf_getparm(parm, section='GLOBAL'):
"""
The global SMB server settings can be retrieved using a samba3 loadparm
context. This is required (as opposed to importing `param` from samba
due to presence of registry shares.
Share parameter must be queried directly from libsmbconf using `net conf`.
"""
if section.upper() == 'GLOBAL':
return smbconf_getparm_lpctx(parm)
try:
return reg_getparm(section, parm)
except Exception as e:
raise CallError(f'Attempt to query smb4.conf parameter [{parm}] failed with error: {e}')
def lpctx_validate_global_parm(parm, value):
"""
Validate a given parameter using a temporary loadparm context from
a stub smb.conf file
WARNING: lib/param doesn't validate params containing a colon
"""
with LP_CTX_LOCK:
testconf = param.LoadParm(SMBPath.STUBCONF.platform())
testconf.set(parm, value)
| 1,465 | Python | .py | 41 | 31.04878 | 96 | 0.728886 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,737 | attachments.py | truenas_middleware/src/middlewared/middlewared/plugins/smb_/attachments.py | from middlewared.common.ports import ServicePortDelegate
class SMBServicePortDelegate(ServicePortDelegate):
name = 'smb'
namespace = 'smb'
title = 'SMB Service'
async def get_ports_bound_on_wildcards(self):
return [137, 138, 139, 445]
async def setup(middleware):
await middleware.call('port.register_attachment_delegate', SMBServicePortDelegate(middleware))
| 393 | Python | .py | 9 | 38.888889 | 98 | 0.76455 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,738 | reg_service.py | truenas_middleware/src/middlewared/middlewared/plugins/smb_/smbconf/reg_service.py | from middlewared.plugins.smb_.registry_base import RegObj, RegistrySchema
from middlewared.plugins.smb_.utils import apply_presets
from middlewared.utils.path import FSLocation, path_location, strip_location_prefix
from middlewared.utils.directoryservices.constants import DSType
FRUIT_CATIA_MAPS = [
"0x01:0xf001,0x02:0xf002,0x03:0xf003,0x04:0xf004",
"0x05:0xf005,0x06:0xf006,0x07:0xf007,0x08:0xf008",
"0x09:0xf009,0x0a:0xf00a,0x0b:0xf00b,0x0c:0xf00c",
"0x0d:0xf00d,0x0e:0xf00e,0x0f:0xf00f,0x10:0xf010",
"0x11:0xf011,0x12:0xf012,0x13:0xf013,0x14:0xf014",
"0x15:0xf015,0x16:0xf016,0x17:0xf017,0x18:0xf018",
"0x19:0xf019,0x1a:0xf01a,0x1b:0xf01b,0x1c:0xf01c",
"0x1d:0xf01d,0x1e:0xf01e,0x1f:0xf01f",
"0x22:0xf020,0x2a:0xf021,0x3a:0xf022,0x3c:0xf023",
"0x3e:0xf024,0x3f:0xf025,0x5c:0xf026,0x7c:0xf027"
]
class ShareSchema(RegistrySchema):
def convert_schema_to_registry(self, data_in, data_out):
"""
Convert middleware schema SMB shares to an SMB service definition
"""
def order_vfs_objects(vfs_objects, fruit_enabled, purpose):
vfs_objects_special = ('truenas_audit', 'catia', 'fruit', 'streams_xattr', 'shadow_copy_zfs',
'acl_xattr', 'ixnas', 'winmsa', 'recycle', 'crossrename',
'zfs_core', 'aio_fbsd', 'io_uring')
invalid_vfs_objects = ['noacl']
vfs_objects_ordered = []
if fruit_enabled and 'fruit' not in vfs_objects:
vfs_objects.append('fruit')
if 'fruit' in vfs_objects:
if 'streams_xattr' not in vfs_objects:
vfs_objects.append('streams_xattr')
if purpose == 'ENHANCED_TIMEMACHINE':
vfs_objects.append('tmprotect')
elif purpose == 'WORM_DROPBOX':
vfs_objects.append('worm')
for obj in vfs_objects:
if obj in invalid_vfs_objects:
raise ValueError(f'[{obj}] is an invalid VFS object')
if obj not in vfs_objects_special:
vfs_objects_ordered.append(obj)
for obj in vfs_objects_special:
if obj in vfs_objects:
vfs_objects_ordered.append(obj)
return vfs_objects_ordered
data_out['vfs objects'] = {'parsed': ['zfs_core', 'io_uring']}
data_out['ea support'] = {'parsed': False}
data_in['fruit_enabled'] = self.middleware.call_sync('smb.config')['aapl_extensions']
data_in = apply_presets(data_in)
super().convert_schema_to_registry(data_in, data_out)
ordered_vfs_objects = order_vfs_objects(
data_out['vfs objects']['parsed'],
data_in['fruit_enabled'],
data_in['purpose'],
)
data_out['vfs objects']['parsed'] = ordered_vfs_objects
"""
Some presets contain values that users can override via aux
parameters. Set them prior to aux parameter processing.
"""
if data_in['purpose'] not in ['NO_SHARE', 'DEFAULT_SHARE']:
preset = self.middleware.call_sync('sharing.smb.presets')
purpose = preset[data_in['purpose']]
for param in purpose['params']['auxsmbconf'].splitlines():
auxparam, val = param.split('=', 1)
data_out[auxparam.strip()] = {"raw": val.strip()}
for param in data_in['auxsmbconf'].splitlines():
if not param.strip():
continue
try:
auxparam, val = param.split('=', 1)
"""
vfs_fruit must be added to all shares if fruit is enabled.
Support for SMB2 AAPL extensions is determined on first tcon
to server, and so if they aren't appended to any vfs objects
overrides via auxiliary parameters, then users may experience
unexpected behavior.
"""
if auxparam.strip() == "vfs objects":
vfsobjects = val.strip().split()
if data_in['shadowcopy']:
vfsobjects.append('shadow_copy_zfs')
data_out['vfs objects'] = {"parsed": order_vfs_objects(vfsobjects, data_in['fruit_enabled'], None)}
else:
data_out[auxparam.strip()] = {"raw": val.strip()}
except ValueError:
raise
except Exception:
self.middleware.logger.debug(
"[%s] contains invalid auxiliary parameter: [%s]",
data_in['auxsmbconf'], param
)
# There are two situations in which a share may be unavailable:
# 1) it's encypted and locked
# 2) it's specifically flagged as disabled
if data_in.get('locked'):
data_out['available'] = {'parsed': False}
self._normalize_config(data_out)
return
def path_local_get(entry, conf):
path = conf.get('path', {'raw': ""})
return str(path['raw'])
def path_local_set(entry, val, data_in, data_out):
return
def path_get(entry, conf):
val = conf.pop(entry.smbconf, entry.default)
if type(val) != dict:
return val
path = val['parsed']
if path == "":
"""
Empty path is valid for homes shares.
"""
return path
path_suffix = conf.get("tn:path_suffix", {"raw": ""})
"""
remove any path suffix from path before returning.
"""
if path_suffix['raw']:
suffix_len = len(path_suffix['raw'].split('/'))
path = path.rsplit('/', suffix_len)[0]
"""
If this is a DFS proxy, covert back to our special designator
"""
if 'msdfs proxy' in conf:
conf.pop('msdfs root', None)
proxy_addr = conf.pop('msdfs proxy')
path = f'EXTERNAL:{proxy_addr["raw"]}'
return path
def path_set(entry, val, data_in, data_out):
if not val:
data_out["path"] = {"parsed": ""}
return
loc = path_location(val)
path = strip_location_prefix(val)
if loc is FSLocation.EXTERNAL:
data_out['msdfs root'] = {'parsed': True}
data_out['msdfs proxy'] = {'parsed': path}
path = '/var/empty'
path_suffix = data_in["path_suffix"]
if path_suffix and loc is not FSLocation.EXTERNAL:
path = '/'.join([path, path_suffix])
data_out['path'] = {"parsed": path}
def durable_get(entry, conf):
"""
Durable handles are inverse of "posix locking" parmaeter.
"""
val = conf.pop(entry.smbconf, entry.default)
if type(val) != dict:
return val
kernel_oplocks = conf.get('kernel oplocks', {'parsed': False})
if not kernel_oplocks['parsed']:
conf.pop('kernel oplocks', None)
return not val['parsed']
def durable_set(entry, val, data_in, data_out):
data_out['posix locking'] = {"parsed": not val}
data_out['kernel oplocks'] = {"parsed": not val}
return
def recycle_get(entry, conf):
"""
Recycle bin has multiple associated parameters, remove them
so that they don't appear as auxiliary parameters (unless
they deviate from our defaults).
"""
vfs_objects = conf.get("vfs objects", [])
if "recycle" not in vfs_objects['parsed']:
return False
conf.pop("recycle:repository", "")
for parm in ["keeptree", "versions", "touch"]:
to_check = f"recycle:{parm}"
if conf[to_check]["parsed"]:
conf.pop(to_check)
if conf["recycle:directory_mode"]['raw'] == "0777":
conf.pop("recycle:directory_mode")
if conf["recycle:subdir_mode"]['raw'] == "0700":
conf.pop("recycle:subdir_mode")
return True
def recycle_set(entry, val, data_in, data_out):
if not val:
return
ds = entry.middleware.call_sync("directoryservices.status")
data_out.update({
"recycle:repository": {"parsed": ".recycle/%D/%U" if ds["type"] == DSType.AD.value else ".recycle/%U"},
"recycle:keeptree": {"parsed": True},
"recycle:versions": {"parsed": True},
"recycle:touch": {"parsed": True},
"recycle:directory_mode": {"parsed": "0777"},
"recycle:subdir_mode": {"parsed": "0700"},
})
data_out['vfs objects']['parsed'].append("recycle")
return
def shadowcopy_get(entry, conf):
vfs_objects = conf.get("vfs objects", [])
return "shadow_copy_zfs" in vfs_objects
def shadowcopy_set(entry, val, data_in, data_out):
if not val:
return
data_out['vfs objects']['parsed'].append("shadow_copy_zfs")
return
def tmquot_get(entry, conf):
val = conf.pop(entry.smbconf, entry.default)
if type(val) != dict:
return 0
return int(val['raw'])
def acl_get(entry, conf):
conf.pop("nfs4:chown", None)
val = conf.pop(entry.smbconf, entry.default)
if type(val) != dict:
return val
return val['parsed']
def acl_set(entry, val, data_in, data_out):
if not val:
data_out['nt acl support'] = {"parsed": False}
loc = path_location(data_in['path'])
if loc == FSLocation.EXTERNAL:
return
try:
acltype = entry.middleware.call_sync('filesystem.path_get_acltype', data_in['path'])
except FileNotFoundError:
entry.middleware.logger.warning(
"%s: path does not exist. This is unexpected situation and "
"may indicate a failure of pool import.", data_in["path"]
)
raise ValueError(f"{data_in['path']}: path does not exist")
except NotImplementedError:
acltype = "DISABLED"
except OSError:
entry.middleware.logger.warning(
"%s: failed to determine acltype for path.",
data_in['path'], exc_info=True
)
acltype = "DISABLED"
if acltype == "NFS4":
data_out['vfs objects']['parsed'].append("ixnas")
data_out.update({"nfs4:chown": {"parsed": True}})
elif acltype == 'POSIX1E':
data_out['vfs objects']['parsed'].append("acl_xattr")
else:
entry.middleware.logger.debug(
"ACLs are disabled on path %s. Disabling NT ACL support.",
data_out['path']
)
data_out['nt acl support'] = {"parsed": False}
return
def fsrvp_get(entry, conf):
vfs_objects = conf.get("vfs objects", [])
return "zfs_fsrvp" in vfs_objects
def fsrvp_set(entry, val, data_in, data_out):
if not val:
return
data_out['vfs objects']['parsed'].append("zfs_fsrvp")
return
def streams_get(entry, conf):
vfs_objects = conf.get("vfs objects", [])
return "streams_xattr" in vfs_objects
def streams_set(entry, val, data_in, data_out):
"""
vfs_fruit requires streams_xattr to be enabled
"""
if not val and not data_in['fruit_enabled']:
return
data_out['vfs objects']['parsed'].append("streams_xattr")
data_out['smbd max xattr size'] = {"parsed": 2097152}
if data_in['fruit_enabled']:
data_out["fruit:metadata"] = {"parsed": "stream"}
data_out["fruit:resource"] = {"parsed": "stream"}
return
def mangling_get(entry, conf):
encoding = conf.get("fruit: encoding", None)
if encoding and encoding['raw'] == "native":
return True
mapping = conf.get("catia: mappings", None)
return bool(mapping)
def mangling_set(entry, val, data_in, data_out):
if not val:
return
data_out['vfs objects']['parsed'].append("catia")
fruit_enabled = data_in.get("fruit_enabled")
if fruit_enabled:
data_out.update({
'fruit:encoding': {"parsed": 'native'},
'mangled names': {"parsed": False},
})
else:
data_out.update({
'catia:mappings': {"parsed": ','.join(FRUIT_CATIA_MAPS)},
'mangled names': {"parsed": False},
})
return
def afp_get(entry, conf):
val = conf.pop(entry.smbconf, entry.default)
if type(val) != dict:
return val
if not val['parsed']:
return False
conf.pop('fruit:encoding', None)
conf.pop('fruit:metadata', None)
conf.pop('fruit:resource', None)
conf.pop('streams_xattr:store_prefix', None)
conf.pop('streams_xattr:store_stream_type', None)
conf.pop('streams_xattr:xattr_compat', None)
return True
def afp_set(entry, val, data_in, data_out):
if not val:
return
if 'fruit' not in data_out['vfs objects']['parsed']:
data_out['vfs objects']['parsed'].append("fruit")
if 'catia' not in data_out['vfs objects']['parsed']:
data_out['vfs objects']['parsed'].append("catia")
data_out['fruit:encoding'] = {"parsed": 'native'}
data_out['fruit:metadata'] = {"parsed": 'netatalk'}
data_out['fruit:resource'] = {"parsed": 'file'}
data_out['streams_xattr:prefix'] = {"parsed": 'user.'}
data_out['streams_xattr:store_stream_type'] = {"parsed": False}
data_out['streams_xattr:xattr_compat'] = {"parsed": True}
return
def audit_get(entry, conf):
vfs_objects = conf.get('vfs objects', [])
enabled = 'truenas_audit' in vfs_objects
watch_list = conf.pop('truenas_audit:watch_list', [])
ignore_list = conf.pop('trueans_audit:ignore_list', [])
return {'enable': enabled, 'watch_list': watch_list, 'ignore_list': ignore_list}
def audit_set(entry, val, data_in, data_out):
if not val:
return
if val['enable']:
data_out['vfs objects']['parsed'].append("truenas_audit")
for key in ['watch_list', 'ignore_list']:
if not val[key]:
continue
data_out[f'truenas_audit:{key}'] = {'parsed': ', '.join(val[key])}
return
schema = [
RegObj("purpose", "tn:purpose", ""),
RegObj("path_local", None, "",
smbconf_parser=path_local_get, schema_parser=path_local_set),
RegObj("path", "path", "",
smbconf_parser=path_get, schema_parser=path_set),
RegObj("path_suffix", "tn:path_suffix", ""),
RegObj("home", "tn:home", False),
RegObj("vuid", "tn:vuid", ''),
RegObj("comment", "comment", ""),
RegObj("guestok", "guest ok", False),
RegObj("enabled", "available", True),
RegObj("hostsallow", "hosts allow", []),
RegObj("hostsdeny", "hosts deny", []),
RegObj("abe", "access based share enum", False),
RegObj("ro", "read only", True),
RegObj("browsable", "browseable", True),
RegObj("timemachine", "fruit:time machine", True),
RegObj("timemachine_quota", "fruit:time machine max size", "",
smbconf_parser=tmquot_get),
RegObj("durablehandle", "posix locking", True,
smbconf_parser=durable_get, schema_parser=durable_set),
RegObj("recyclebin", None, False,
smbconf_parser=recycle_get, schema_parser=recycle_set),
RegObj("shadowcopy", None, True,
smbconf_parser=shadowcopy_get, schema_parser=shadowcopy_set),
RegObj("acl", "nt acl support", True,
smbconf_parser=acl_get, schema_parser=acl_set),
RegObj("aapl_name_mangling", None, False,
smbconf_parser=mangling_get, schema_parser=mangling_set),
RegObj("fsrvp", None, False,
smbconf_parser=fsrvp_get, schema_parser=fsrvp_set),
RegObj("streams", None, True,
smbconf_parser=streams_get, schema_parser=streams_set),
RegObj("afp", "tn:afp", False,
smbconf_parser=afp_get, schema_parser=afp_set),
RegObj("audit", None, None,
smbconf_parser=audit_get, schema_parser=audit_set),
]
def __init__(self, middleware):
self.middleware = middleware
for entry in self.schema:
entry.middleware = middleware
super().__init__(self.schema)
| 16,768 | Python | .py | 376 | 33.470745 | 119 | 0.565969 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,739 | network.py | truenas_middleware/src/middlewared/middlewared/plugins/docker/network.py | from middlewared.plugins.apps.ix_apps.docker.networks import list_networks
from middlewared.schema import Dict, Str
from middlewared.service import CRUDService, filterable, private
from middlewared.utils import filter_list
class DockerNetworkService(CRUDService):
class Config:
namespace = 'docker.network'
datastore_primary_key_type = 'string'
cli_namespace = 'docker.network'
role_prefix = 'DOCKER'
ENTRY = Dict(
'docker_network_entry',
Dict('ipam', additional_attrs=True, null=True),
Dict('labels', additional_attrs=True, null=True),
Str('created', required=True, null=True),
Str('driver', required=True, null=True),
Str('id', required=True, null=True),
Str('name', required=True, null=True),
Str('scope', required=True, null=True),
Str('short_id', required=True, null=True),
additional_attrs=True,
)
@filterable
def query(self, filters, options):
"""
Query all docker networks
"""
if not self.middleware.call_sync('docker.state.validate', False):
return filter_list([], filters, options)
networks = []
for network in list_networks():
networks.append({
k: network.get(k) for k in (
'ipam', 'labels', 'created', 'driver', 'id', 'name', 'scope', 'short_id',
)
})
return filter_list(networks, filters, options)
@private
def interfaces_mapping(self):
try:
return [f'br-{network["short_id"]}' for network in self.query()]
except Exception as e:
# We don't want this to fail ever because this is used in interface.sync
self.logger.error('Failed to get docker interfaces mapping: %s', e)
return []
| 1,850 | Python | .py | 45 | 32.2 | 93 | 0.616027 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,740 | validation_utils.py | truenas_middleware/src/middlewared/middlewared/plugins/docker/validation_utils.py | import ipaddress
from middlewared.schema import ValidationErrors
def validate_address_pools(system_ips: list[dict], user_specified_networks: list[dict]):
verrors = ValidationErrors()
if not user_specified_networks:
verrors.add('docker_update.address_pools', 'At least one address pool must be specified')
verrors.check()
network_cidrs = set([
ipaddress.ip_network(f'{ip["address"]}/{ip["netmask"]}', False)
for ip in system_ips
])
seen_networks = set()
for index, user_network in enumerate(user_specified_networks):
base_network = ipaddress.ip_network(user_network['base'], False)
# Validate subnet size vs. base network
if base_network.prefixlen > user_network['size']:
verrors.add(
f'docker_update.address_pools.{index}.base',
f'Base network {user_network["base"]} cannot be smaller than '
f'the specified subnet size {user_network["size"]}'
)
# Validate no overlaps with system networks
if any(base_network.overlaps(system_network) for system_network in network_cidrs):
verrors.add(
f'docker_update.address_pools.{index}.base',
f'Base network {user_network["base"]} overlaps with an existing system network'
)
# Validate no duplicate networks
if base_network in seen_networks:
verrors.add(
f'docker_update.address_pools.{index}.base',
f'Base network {user_network["base"]} is a duplicate of another specified network'
)
seen_networks.add(base_network)
verrors.check()
| 1,684 | Python | .py | 35 | 38.171429 | 98 | 0.640854 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,741 | utils.py | truenas_middleware/src/middlewared/middlewared/plugins/docker/utils.py | import os
def applications_ds_name(pool: str) -> str:
return os.path.join(pool, 'ix-apps')
| 97 | Python | .py | 3 | 29.333333 | 43 | 0.706522 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,742 | events.py | truenas_middleware/src/middlewared/middlewared/plugins/docker/events.py | from middlewared.plugins.apps.ix_apps.docker.utils import get_docker_client, PROJECT_KEY
from middlewared.service import Service
class DockerEventService(Service):
class Config:
namespace = 'docker.events'
private = True
def setup(self):
if not self.middleware.call_sync('docker.state.validate', False):
return
try:
self.process()
except Exception:
if not self.middleware.call('service.started', 'docker'):
# This is okay and can happen when docker is stopped
return
raise
def process(self):
with get_docker_client() as docker_client:
self.process_internal(docker_client)
def process_internal(self, client):
for container_event in client.events(
decode=True, filters={
'type': ['container'],
'event': [
'create', 'destroy', 'detach', 'die', 'health_status', 'kill', 'unpause',
'oom', 'pause', 'rename', 'resize', 'restart', 'start', 'stop', 'update',
]
}
):
if not isinstance(container_event, dict):
continue
if project := container_event.get('Actor', {}).get('Attributes', {}).get(PROJECT_KEY):
self.middleware.send_event('docker.events', 'ADDED', id=project, fields=container_event)
async def setup(middleware):
middleware.event_register('docker.events', 'Docker container events', roles=['DOCKER_READ'])
# We are going to check in setup docker events if setting up events is relevant or not
middleware.create_task(middleware.call('docker.events.setup'))
| 1,719 | Python | .py | 37 | 35.675676 | 104 | 0.606459 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,743 | state_management.py | truenas_middleware/src/middlewared/middlewared/plugins/docker/state_management.py | from middlewared.service import CallError, periodic, Service, private
from .state_utils import APPS_STATUS, Status, STATUS_DESCRIPTIONS
class DockerStateService(Service):
class Config:
namespace = 'docker.state'
private = True
STATUS = APPS_STATUS(Status.PENDING, STATUS_DESCRIPTIONS[Status.PENDING])
async def before_start_check(self):
try:
await self.middleware.call('docker.setup.validate_fs')
except CallError as e:
if e.errno != CallError.EDATASETISLOCKED:
await self.middleware.call(
'alert.oneshot_create',
'ApplicationsConfigurationFailed',
{'error': e.errmsg},
)
await self.set_status(Status.FAILED.value, f'Could not validate applications setup ({e.errmsg})')
raise
await self.middleware.call('alert.oneshot_delete', 'ApplicationsConfigurationFailed', None)
async def after_start_check(self):
if await self.middleware.call('service.started', 'docker'):
await self.set_status(Status.RUNNING.value)
await self.middleware.call('alert.oneshot_delete', 'ApplicationsStartFailed', None)
else:
await self.set_status(Status.FAILED.value, 'Failed to start docker service')
await self.middleware.call('alert.oneshot_create', 'ApplicationsStartFailed', {
'error': 'Docker service could not be started'
})
async def start_service(self, mount_datasets: bool = False):
await self.set_status(Status.INITIALIZING.value)
catalog_sync_job = None
try:
if mount_datasets:
catalog_sync_job = await self.middleware.call('docker.fs_manage.mount')
# TODO: Check license active
await self.before_start_check()
await self.middleware.call('service.start', 'docker')
except Exception as e:
await self.set_status(Status.FAILED.value, str(e))
raise
else:
await self.middleware.call('app.certificate.redeploy_apps_consuming_outdated_certs')
finally:
if catalog_sync_job:
await catalog_sync_job.wait()
async def set_status(self, new_status, extra=None):
assert new_status in Status.__members__
new_status = Status(new_status)
self.STATUS = APPS_STATUS(
new_status,
f'{STATUS_DESCRIPTIONS[new_status]}:\n{extra}' if extra else STATUS_DESCRIPTIONS[new_status],
)
self.middleware.send_event('docker.state', 'CHANGED', fields=await self.get_status_dict())
async def get_status_dict(self):
return {'status': self.STATUS.status.value, 'description': self.STATUS.description}
async def initialize(self):
if not await self.middleware.call('system.ready'):
# Status will be automatically updated when system is ready
return
if not (await self.middleware.call('docker.config'))['pool']:
await self.set_status(Status.UNCONFIGURED.value)
else:
if await self.middleware.call('service.started', 'docker'):
await self.set_status(Status.RUNNING.value)
else:
await self.set_status(Status.FAILED.value)
async def validate(self, raise_error=True):
# When `raise_error` is unset, we return boolean true if there was no issue with the state
error_str = ''
if not (await self.middleware.call('docker.config'))['pool']:
error_str = 'No pool configured for Docker'
if not error_str and not await self.middleware.call('service.started', 'docker'):
error_str = 'Docker service is not running'
if error_str and raise_error:
raise CallError(error_str)
return bool(error_str) is False
@periodic(interval=86400)
async def periodic_check(self):
if await self.validate(False) is False:
return
await (await self.middleware.call('catalog.sync')).wait()
docker_config = await self.middleware.call('docker.config')
if docker_config['enable_image_updates']:
self.middleware.create_task(self.middleware.call('app.image.op.check_update'))
async def _event_system_ready(middleware, event_type, args):
# we ignore the 'ready' event on an HA system since the failover event plugin
# is responsible for starting this service
if await middleware.call('failover.licensed'):
return
if (
(await middleware.call('docker.config'))['nvidia'] and
await middleware.call('nvidia.present') and
not await middleware.call('nvidia.installed')
):
await middleware.call('nvidia.install', False)
if (await middleware.call('docker.config'))['pool']:
middleware.create_task(middleware.call('docker.state.start_service', True))
else:
await middleware.call('docker.state.set_status', Status.UNCONFIGURED.value)
async def _event_system_shutdown(middleware, event_type, args):
if await middleware.call('service.started', 'docker'):
middleware.create_task(middleware.call('service.stop', 'docker'))
async def setup(middleware):
middleware.event_register('docker.state', 'Docker state events')
middleware.event_subscribe('system.ready', _event_system_ready)
middleware.event_subscribe('system.shutdown', _event_system_shutdown)
await middleware.call('docker.state.initialize')
| 5,547 | Python | .py | 108 | 41.472222 | 109 | 0.658843 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,744 | state_setup.py | truenas_middleware/src/middlewared/middlewared/plugins/docker/state_setup.py | import contextlib
import os
import shutil
import uuid
from datetime import datetime
from middlewared.service import CallError, private, Service
from middlewared.utils.interface import wait_for_default_interface_link_state_up
from .state_utils import (
DatasetDefaults, DOCKER_DATASET_NAME, docker_datasets, IX_APPS_MOUNT_PATH, missing_required_datasets,
)
class DockerSetupService(Service):
class Config:
namespace = 'docker.setup'
private = True
@private
async def validate_fs(self):
config = await self.middleware.call('docker.config')
if not config['pool']:
raise CallError(f'{config["pool"]!r} pool not found.')
if missing_datasets := missing_required_datasets({
d['id'] for d in await self.middleware.call(
'zfs.dataset.query', [['id', 'in', docker_datasets(config['dataset'])]], {
'extra': {'retrieve_properties': False, 'retrieve_children': False}
}
)
}, config['dataset']):
raise CallError(f'Missing "{", ".join(missing_datasets)}" dataset(s) required for starting docker.')
await self.create_update_docker_datasets(config['dataset'])
locked_datasets = [
d['id'] for d in filter(
lambda d: d['mountpoint'], await self.middleware.call('zfs.dataset.locked_datasets')
)
if d['mountpoint'].startswith(f'{config["dataset"]}/') or d['mountpoint'] in (
f'/mnt/{k}' for k in (config['dataset'], config['pool'])
)
]
if locked_datasets:
raise CallError(
f'Please unlock following dataset(s) before starting docker: {", ".join(locked_datasets)}',
errno=CallError.EDATASETISLOCKED,
)
# What we want to validate now is that the interface on default route is up and running
# This is problematic for bridge interfaces which can or cannot come up in time
await self.validate_interfaces()
# Make sure correct ix-apps dataset is mounted
if not await self.middleware.call('docker.fs_manage.ix_apps_is_mounted', config['dataset']):
raise CallError(f'{config["dataset"]!r} dataset is not mounted on {IX_APPS_MOUNT_PATH!r}')
@private
async def validate_interfaces(self):
default_iface, success = await self.middleware.run_in_thread(wait_for_default_interface_link_state_up)
if default_iface is None:
raise CallError('Unable to determine default interface')
elif not success:
raise CallError(f'Default interface {default_iface!r} is not in active state')
@private
async def status_change(self):
config = await self.middleware.call('docker.config')
if not config['pool']:
await (await self.middleware.call('catalog.sync')).wait()
return
await self.create_update_docker_datasets(config['dataset'])
# Docker dataset would not be mounted at this point, so we will explicitly mount them now
catalog_sync_job = await self.middleware.call('docker.fs_manage.mount')
if catalog_sync_job:
await catalog_sync_job.wait()
await self.middleware.call('docker.state.start_service')
self.middleware.create_task(self.middleware.call('docker.state.periodic_check'))
@private
def move_conflicting_dir(self, ds_name):
base_ds_name = os.path.basename(ds_name)
from_path = os.path.join(IX_APPS_MOUNT_PATH, base_ds_name)
if ds_name == DOCKER_DATASET_NAME:
from_path = IX_APPS_MOUNT_PATH
with contextlib.suppress(FileNotFoundError):
# can't stop someone from manually creating same name
# directories on disk so we'll just move them
shutil.move(from_path, f'{from_path}-{str(uuid.uuid4())[:4]}-{datetime.now().isoformat()}')
@private
def create_update_docker_datasets_impl(self, docker_ds):
expected_docker_datasets = docker_datasets(docker_ds)
actual_docker_datasets = {
k['id']: k['properties'] for k in self.middleware.call_sync(
'zfs.dataset.query', [['id', 'in', expected_docker_datasets]], {
'extra': {
'properties': list(DatasetDefaults.update_only(skip_ds_name_check=True).keys()),
'retrieve_children': False,
'user_properties': False,
}
}
)
}
for dataset_name in expected_docker_datasets:
if existing_dataset := actual_docker_datasets.get(dataset_name):
update_props = DatasetDefaults.update_only(os.path.basename(dataset_name))
if any(val['value'] != update_props[name] for name, val in existing_dataset.items()):
# if any of the zfs properties don't match what we expect we'll update all properties
self.middleware.call_sync(
'zfs.dataset.update', dataset_name, {
'properties': {k: {'value': v} for k, v in update_props.items()}
}
)
else:
self.move_conflicting_dir(dataset_name)
self.middleware.call_sync('zfs.dataset.create', {
'name': dataset_name, 'type': 'FILESYSTEM', 'properties': DatasetDefaults.create_time_props(
os.path.basename(dataset_name)
),
})
@private
async def create_update_docker_datasets(self, docker_ds):
"""The following logic applies:
1. create the docker datasets fresh (if they dont exist)
2. OR update the docker datasets zfs properties if they
don't match reality.
NOTE: this method needs to be optimized as much as possible
since this is called on docker state change for each docker
dataset
"""
await self.middleware.run_in_thread(self.create_update_docker_datasets_impl, docker_ds)
| 6,161 | Python | .py | 119 | 40.042017 | 112 | 0.613519 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,745 | fs_manage.py | truenas_middleware/src/middlewared/middlewared/plugins/docker/fs_manage.py | import errno
from middlewared.service import CallError, Service
from .state_utils import IX_APPS_MOUNT_PATH, Status
class DockerFilesystemManageService(Service):
class Config:
namespace = 'docker.fs_manage'
private = True
async def common_func(self, mount):
if docker_ds := (await self.middleware.call('docker.config'))['dataset']:
try:
if mount:
await self.middleware.call('zfs.dataset.mount', docker_ds, {'recursive': True, 'force_mount': True})
else:
await self.middleware.call('zfs.dataset.umount', docker_ds, {'force': True})
return await self.middleware.call('catalog.sync')
except Exception as e:
await self.middleware.call(
'docker.state.set_status', Status.FAILED.value,
f'Failed to {"mount" if mount else "umount"} {docker_ds!r}: {e}',
)
raise
async def mount(self):
return await self.common_func(True)
async def umount(self):
return await self.common_func(False)
async def ix_apps_is_mounted(self, dataset_to_check=None):
"""
This will tell us if some dataset is mounted on /mnt/.ix-apps or not.
"""
try:
fs_details = await self.middleware.call('filesystem.statfs', IX_APPS_MOUNT_PATH)
except CallError as e:
if e.errno == errno.ENOENT:
return False
raise
if fs_details['source'].startswith('boot-pool/'):
return False
if dataset_to_check:
return fs_details['source'] == dataset_to_check
return True
| 1,723 | Python | .py | 40 | 31.775 | 120 | 0.589467 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,746 | state_utils.py | truenas_middleware/src/middlewared/middlewared/plugins/docker/state_utils.py | import dataclasses
import collections
import enum
import os
import typing
APPS_STATUS: collections.namedtuple = collections.namedtuple('Status', ['status', 'description'])
CATALOG_DATASET_NAME: str = 'truenas_catalog'
DOCKER_DATASET_NAME: str = 'ix-apps'
IX_APPS_DIR_NAME = '.ix-apps'
IX_APPS_MOUNT_PATH: str = os.path.join('/mnt', IX_APPS_DIR_NAME)
@dataclasses.dataclass(slots=True, frozen=True)
class DatasetProp:
value: str
create_time_only: bool
ds_name: str | None = None
@dataclasses.dataclass(slots=True, frozen=True)
class DatasetDefaults:
aclmode: DatasetProp = DatasetProp('discard', False)
acltype: DatasetProp = DatasetProp('posix', False)
atime: DatasetProp = DatasetProp('off', False)
casesensitivity: DatasetProp = DatasetProp('sensitive', True)
canmount: DatasetProp = DatasetProp('noauto', False)
dedup: DatasetProp = DatasetProp('off', False)
encryption: DatasetProp = DatasetProp('off', True, DOCKER_DATASET_NAME)
exec: DatasetProp = DatasetProp('on', False)
mountpoint: DatasetProp = DatasetProp(f'/{IX_APPS_DIR_NAME}', True, DOCKER_DATASET_NAME)
normalization: DatasetProp = DatasetProp('none', True)
overlay: DatasetProp = DatasetProp('on', False)
setuid: DatasetProp = DatasetProp('on', False)
snapdir: DatasetProp = DatasetProp('hidden', False)
xattr: DatasetProp = DatasetProp('sa', False)
@classmethod
def create_time_props(cls, ds_name: str | None = None):
return {
k: v['value'] for k, v in dataclasses.asdict(cls()).items()
if v['ds_name'] in (ds_name, None)
}
@classmethod
def update_only(cls, ds_name: str | None = None, skip_ds_name_check: bool = False):
return {
k: v['value'] for k, v in dataclasses.asdict(cls()).items()
if v['create_time_only'] is False and (skip_ds_name_check or v['ds_name'] in (ds_name, None))
}
class Status(enum.Enum):
PENDING = 'PENDING'
RUNNING = 'RUNNING'
INITIALIZING = 'INITIALIZING'
STOPPING = 'STOPPING'
STOPPED = 'STOPPED'
UNCONFIGURED = 'UNCONFIGURED'
FAILED = 'FAILED'
STATUS_DESCRIPTIONS = {
Status.PENDING: 'Application(s) state is to be determined yet',
Status.RUNNING: 'Application(s) are currently running',
Status.INITIALIZING: 'Application(s) are being initialized',
Status.STOPPING: 'Application(s) are being stopped',
Status.STOPPED: 'Application(s) have been stopped',
Status.UNCONFIGURED: 'Application(s) are not configured',
Status.FAILED: 'Application(s) have failed to start',
}
def catalog_ds_path() -> str:
return os.path.join(IX_APPS_MOUNT_PATH, CATALOG_DATASET_NAME)
def docker_datasets(docker_ds: str) -> typing.List[str]:
return [docker_ds] + [
os.path.join(docker_ds, d) for d in (
CATALOG_DATASET_NAME,
'app_configs',
'app_mounts',
'docker',
)
]
def docker_dataset_custom_props(ds: str) -> typing.Dict:
props = {
'ix-apps': {
'encryption': 'off',
'mountpoint': f'/{IX_APPS_DIR_NAME}',
},
}
return props.get(ds, dict())
def docker_dataset_update_props(props: dict) -> typing.Dict[str, str]:
return {
attr: value
for attr, value in props.items()
if attr not in ('casesensitivity', 'mountpoint', 'encryption')
}
def missing_required_datasets(existing_datasets: set, docker_ds: str) -> set:
diff = existing_datasets ^ set(docker_datasets(docker_ds))
if fatal_diff := diff.intersection(
set(docker_ds) | {
os.path.join(docker_ds, k) for k in (
'app_configs', 'app_mounts', 'docker', CATALOG_DATASET_NAME,
)
}
):
return fatal_diff
return set()
| 3,812 | Python | .py | 96 | 33.677083 | 105 | 0.658814 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,747 | attachments.py | truenas_middleware/src/middlewared/middlewared/plugins/docker/attachments.py | import os
from middlewared.common.attachment import FSAttachmentDelegate
class DockerFSAttachmentDelegate(FSAttachmentDelegate):
name = 'docker'
title = 'Docker'
service = 'docker'
async def query(self, path, enabled, options=None):
results = []
k8s_config = await self.middleware.call('docker.config')
if not k8s_config['pool']:
return results
query_dataset = os.path.relpath(path, '/mnt')
if query_dataset in (k8s_config['dataset'], k8s_config['pool']) or query_dataset.startswith(
f'{k8s_config["dataset"]}/'
):
results.append({'id': k8s_config['pool']})
return results
async def get_attachment_name(self, attachment):
return attachment['id']
async def delete(self, attachments):
if attachments:
await (await self.middleware.call('docker.update', {'pool': None})).wait(raise_error=True)
async def toggle(self, attachments, enabled):
await getattr(self, 'start' if enabled else 'stop')(attachments)
async def stop(self, attachments):
if not attachments:
return
try:
await self.middleware.call('service.stop', self.service)
except Exception as e:
self.middleware.logger.error('Failed to stop docker: %s', e)
async def start(self, attachments):
if not attachments:
return
try:
await self.middleware.call('docker.state.start_service', True)
except Exception:
self.middleware.logger.error('Failed to start docker')
async def setup(middleware):
await middleware.call('pool.dataset.register_attachment_delegate', DockerFSAttachmentDelegate(middleware))
| 1,751 | Python | .py | 40 | 35.225 | 110 | 0.65822 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,748 | update.py | truenas_middleware/src/middlewared/middlewared/plugins/docker/update.py | import errno
import middlewared.sqlalchemy as sa
from middlewared.schema import accepts, Bool, Dict, Int, IPAddr, List, Patch, Str, ValidationErrors
from middlewared.service import CallError, ConfigService, job, private, returns
from middlewared.utils.zfs import query_imported_fast_impl
from middlewared.validators import Range
from .state_utils import Status
from .utils import applications_ds_name
from .validation_utils import validate_address_pools
class DockerModel(sa.Model):
__tablename__ = 'services_docker'
id = sa.Column(sa.Integer(), primary_key=True)
pool = sa.Column(sa.String(255), default=None, nullable=True)
enable_image_updates = sa.Column(sa.Boolean(), default=True)
nvidia = sa.Column(sa.Boolean(), default=False)
address_pools = sa.Column(sa.JSON(list), default=[{'base': '172.17.0.0/12', 'size': 24}])
class DockerService(ConfigService):
class Config:
datastore = 'services.docker'
datastore_extend = 'docker.config_extend'
cli_namespace = 'app.docker'
role_prefix = 'DOCKER'
ENTRY = Dict(
'docker_entry',
Bool('enable_image_updates', required=True),
Int('id', required=True),
Str('dataset', required=True),
Str('pool', required=True, null=True),
Bool('nvidia', required=True),
List('address_pools', items=[
Dict(
'address_pool',
IPAddr('base', cidr=True),
Int('size', validators=[Range(min_=1, max_=32)])
)
]),
update=True,
)
@private
async def config_extend(self, data):
data['dataset'] = applications_ds_name(data['pool']) if data.get('pool') else None
return data
@accepts(
Patch(
'docker_entry', 'docker_update',
('rm', {'name': 'id'}),
('rm', {'name': 'dataset'}),
('attr', {'update': True}),
)
)
@job(lock='docker_update')
async def do_update(self, job, data):
"""
Update Docker service configuration.
"""
old_config = await self.config()
old_config.pop('dataset')
config = old_config.copy()
config.update(data)
verrors = ValidationErrors()
if config['pool'] and not await self.middleware.run_in_thread(query_imported_fast_impl, [config['pool']]):
verrors.add('docker_update.pool', 'Pool not found.')
verrors.check()
if config['address_pools'] != old_config['address_pools']:
validate_address_pools(
await self.middleware.call('interface.ip_in_use', {'static': True}), config['address_pools']
)
if old_config != config:
if config['pool'] != old_config['pool']:
# We want to clear upgrade alerts for apps at this point
await self.middleware.call('app.clear_upgrade_alerts_for_all')
if any(config[k] != old_config[k] for k in ('pool', 'address_pools')):
job.set_progress(20, 'Stopping Docker service')
try:
await self.middleware.call('service.stop', 'docker')
except Exception as e:
raise CallError(f'Failed to stop docker service: {e}')
catalog_sync_job = None
try:
catalog_sync_job = await self.middleware.call('docker.fs_manage.umount')
except CallError as e:
# We handle this specially, if for whatever reason ix-apps dataset is not there,
# we don't make it fatal to change pools etc - however if some dataset other then
# boot pool is mounted at ix-apps dir, then we will error out as it's a problem
# and needs to be fixed before we can proceed
if e.errno != errno.ENOENT or await self.middleware.call('docker.fs_manage.ix_apps_is_mounted'):
raise
finally:
if catalog_sync_job:
await catalog_sync_job.wait()
await self.middleware.call('docker.state.set_status', Status.UNCONFIGURED.value)
await self.middleware.call('datastore.update', self._config.datastore, old_config['id'], config)
if config['pool'] != old_config['pool']:
job.set_progress(60, 'Applying requested configuration')
await self.middleware.call('docker.setup.status_change')
elif config['pool'] and config['address_pools'] != old_config['address_pools']:
job.set_progress(60, 'Starting docker')
catalog_sync_job = await self.middleware.call('docker.fs_manage.mount')
if catalog_sync_job:
await catalog_sync_job.wait()
await self.middleware.call('service.start', 'docker')
if not old_config['nvidia'] and config['nvidia']:
await (
await self.middleware.call(
'nvidia.install',
job_on_progress_cb=lambda encoded: job.set_progress(
70 + int(encoded['progress']['percent'] * 0.2),
encoded['progress']['description'],
)
)
).wait(raise_error=True)
if config['pool'] and config['address_pools'] != old_config['address_pools']:
job.set_progress(95, 'Initiating redeployment of applications to apply new address pools changes')
await self.middleware.call(
'core.bulk', 'app.redeploy', [
[app['name']] for app in await self.middleware.call('app.query', [['state', '!=', 'STOPPED']])
]
)
job.set_progress(100, 'Requested configuration applied')
return await self.config()
@accepts(roles=['DOCKER_READ'])
@returns(Dict(
Str('status', enum=[e.value for e in Status]),
Str('description'),
))
async def status(self):
"""
Returns the status of the docker service.
"""
return await self.middleware.call('docker.state.get_status_dict')
@accepts(roles=['DOCKER_READ'])
@returns(Bool())
async def lacks_nvidia_drivers(self):
"""
Returns true if an NVIDIA GPU is present, but NVIDIA drivers are not installed.
"""
return await self.middleware.call('nvidia.present') and not await self.middleware.call('nvidia.installed')
| 6,621 | Python | .py | 137 | 36.233577 | 118 | 0.5803 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,749 | firstboot.py | truenas_middleware/src/middlewared/middlewared/plugins/usage_/firstboot.py | from middlewared.service import Service
class UsageService(Service):
FAILED_RETRIES = 3
class Config:
private = True
async def firstboot(self):
_hash = await self.middleware.call('system.host_id')
version = await self.middleware.call('system.version')
retries = self.FAILED_RETRIES
while retries:
try:
await self.middleware.call('usage.submit_stats', {
'platform': 'TrueNAS-SCALE',
'system_hash': _hash,
'firstboot': [{'version': version}]
})
except Exception as e:
retries -= 1
if not retries:
self.logger.error('Failed to send firstboot statistics: %s', e)
else:
break
| 824 | Python | .py | 22 | 24.954545 | 83 | 0.537014 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,750 | update.py | truenas_middleware/src/middlewared/middlewared/plugins/security/update.py | import middlewared.sqlalchemy as sa
from middlewared.plugins.failover_.disabled_reasons import DisabledReasonsEnum
from middlewared.plugins.system.reboot import RebootReason
from middlewared.schema import accepts, Bool, Dict, Int, Patch
from middlewared.service import ConfigService, ValidationError, job, private
class SystemSecurityModel(sa.Model):
__tablename__ = 'system_security'
id = sa.Column(sa.Integer(), primary_key=True)
enable_fips = sa.Column(sa.Boolean(), default=False)
class SystemSecurityService(ConfigService):
class Config:
cli_namespace = 'system.security'
datastore = 'system.security'
namespace = 'system.security'
ENTRY = Dict(
'system_security_entry',
Bool('enable_fips', required=True),
Int('id', required=True),
)
@private
async def configure_fips_on_ha(self, is_ha, job):
if not is_ha:
return
await self.middleware.call('failover.call_remote', 'etc.generate', ['fips'])
remote_reboot_reasons = await self.middleware.call('failover.call_remote', 'system.reboot.list_reasons')
if RebootReason.FIPS.name in remote_reboot_reasons:
# means FIPS is being toggled but other node is already pending a reboot,
# so it means the user toggled FIPS twice and somehow the other node
# didn't reboot (even though we do this automatically). This is an edge
# case and means someone or something is doing things behind our backs
await self.middleware.call('failover.call_remote', 'system.reboot.remove_reason', [RebootReason.FIPS.name])
else:
try:
# we automatically reboot (and wait for) the other controller
reboot_job = await self.middleware.call('failover.reboot.other_node')
await job.wrap(reboot_job)
except Exception:
# something extravagant happened, so we'll just play it safe and say that
# another reboot is required
await self.middleware.call('failover.reboot.add_remote_reason', RebootReason.FIPS.name,
RebootReason.FIPS.value)
@private
async def validate(self, is_ha, ha_disabled_reasons):
schema = 'system_security_update.enable_fips'
if not await self.middleware.call('system.security.info.fips_available'):
raise ValidationError(
schema,
'This feature can only be enabled on licensed iX enterprise systems. '
'Please contact iX sales for more information.'
)
if is_ha and ha_disabled_reasons:
bad_reasons = set(ha_disabled_reasons) - {
DisabledReasonsEnum.LOC_FIPS_REBOOT_REQ.name,
DisabledReasonsEnum.REM_FIPS_REBOOT_REQ.name,
}
if bad_reasons:
formatted = '\n'.join([DisabledReasonsEnum[i].value for i in bad_reasons])
raise ValidationError(
schema,
f'Security settings cannot be updated while HA is in an unhealthy state: ({formatted})'
)
@accepts(
Patch(
'system_security_entry', 'system_security_update',
('rm', {'name': 'id'}),
('attr', {'update': True}),
)
)
@job(lock='security_update')
async def do_update(self, job, data):
"""
Update System Security Service Configuration.
`enable_fips` when set, enables FIPS mode.
"""
is_ha = await self.middleware.call('failover.licensed')
reasons = await self.middleware.call('failover.disabled.reasons')
await self.validate(is_ha, reasons)
old = await self.config()
new = old.copy()
new.update(data)
if new == old:
return new
await self.middleware.call('datastore.update', self._config.datastore, old['id'], new)
if new['enable_fips'] != old['enable_fips']:
# TODO: We likely need to do some SSH magic as well
# let's investigate the exact configuration there
await self.middleware.call('etc.generate', 'fips')
await self.middleware.call('system.reboot.toggle_reason', RebootReason.FIPS.name, RebootReason.FIPS.value)
await self.configure_fips_on_ha(is_ha, job)
return await self.config()
| 4,452 | Python | .py | 90 | 38.622222 | 119 | 0.631361 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,751 | info.py | truenas_middleware/src/middlewared/middlewared/plugins/security/info.py | from subprocess import run
from middlewared.schema import accepts, returns, Bool
from middlewared.service import CallError, Service
class SystemSecurityInfoService(Service):
class Config:
namespace = 'system.security.info'
cli_namespace = 'system.security.info'
@accepts(roles=['READONLY_ADMIN'])
@returns(Bool('fips_available'))
def fips_available(self):
"""Returns a boolean identifying whether or not FIPS
mode may be toggled on this system"""
# being able to toggle fips mode is hinged on whether
# or not this is an iX licensed piece of hardware
return bool(self.middleware.call_sync('system.license'))
@accepts(roles=['READONLY_ADMIN'])
@returns(Bool('fips_available'))
def fips_enabled(self):
"""Returns a boolean identifying whether or not FIPS
mode has been enabled on this system"""
cp = run(['openssl', 'list', '-providers'], capture_output=True)
if cp.returncode:
raise CallError(f'Failed to determine if fips is enabled: {cp.stderr.decode()}')
return b'OpenSSL FIPS Provider' in cp.stdout
| 1,145 | Python | .py | 24 | 40.75 | 92 | 0.693896 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,752 | pool_wait.py | truenas_middleware/src/middlewared/middlewared/plugins/zfs_/pool_wait.py | import libzfs
from middlewared.schema import accepts, Dict, Str
from middlewared.service import CallError, Service
POOL_ACTIVITY_TYPES = [a.name for a in libzfs.ZpoolWaitActivity]
class ZFSPoolService(Service):
class Config:
namespace = 'zfs.pool'
private = True
process_pool = True
@accepts(
Str('pool_name'),
Dict(
'options',
Str('activity_type', enum=POOL_ACTIVITY_TYPES, required=True),
)
)
def wait(self, pool_name, options):
try:
with libzfs.ZFS() as zfs:
pool = zfs.get(pool_name)
pool.wait(options['activity_type'])
except libzfs.ZFSException as e:
raise CallError(str(e))
| 748 | Python | .py | 23 | 24.26087 | 74 | 0.614206 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,753 | pool_status.py | truenas_middleware/src/middlewared/middlewared/plugins/zfs_/pool_status.py | from pathlib import Path
from middlewared.schema import accepts, Bool, Dict, Str
from middlewared.service import Service
from .status_util import get_normalized_disk_info, get_zfs_vdev_disks, get_zpool_status
class ZPoolService(Service):
class Config:
namespace = 'zpool'
private = True
cli_private = True
process_pool = True
def resolve_block_path(self, path, should_resolve):
if not should_resolve:
return path
try:
dev = Path(path).resolve().name
resolved = Path(f'/sys/class/block/{dev}').resolve().parent.name
if resolved == 'block':
# example zpool status
# NAME STATE READ WRITE CKSUM
# tank DEGRADED 0 0 0
# mirror-0 DEGRADED 0 0 0
# sdrh1 ONLINE 0 0 0
# 7008beaf-4fa3-4c43-ba15-f3d5bea3fe0c REMOVED 0 0 0
# sda1 ONLINE 0 0 0
return dev
return resolved
except Exception:
return path
def resolve_block_paths(self, paths, should_resolve):
if not should_resolve:
return paths
return [self.resolve_block_path(i, should_resolve) for i in paths]
def status_impl(self, pool_name, vdev_type, members, **kwargs):
real_paths = kwargs.setdefault('real_paths', False)
final = dict()
for member in filter(lambda x: x.get('vdev_type') != 'file', members.values()):
vdev_disks = self.resolve_block_paths(get_zfs_vdev_disks(member), real_paths)
if member.get('vdev_type') in ('disk', 'dspare'):
disk = self.resolve_block_path(member['path'], real_paths)
final[disk] = get_normalized_disk_info(pool_name, member, 'stripe', vdev_type, vdev_disks)
else:
for i in member['vdevs'].values():
if i['vdev_type'] == 'spare':
i_vdevs = list(i['vdevs'].values())
if not i_vdevs:
# An edge case but just covering to be safe
continue
i = next((e for e in i_vdevs if e['class'] == 'spare'), i_vdevs[0])
elif i['vdev_type'] == 'replacing':
for j in filter(lambda entry: entry.get('path'), list(i['vdevs'].values())):
disk = self.resolve_block_path(j['path'], real_paths)
final[disk] = get_normalized_disk_info(pool_name, j, member['name'], vdev_type, vdev_disks)
continue
disk = self.resolve_block_path(i['path'], real_paths)
final[disk] = get_normalized_disk_info(pool_name, i, member['name'], vdev_type, vdev_disks)
return final
@accepts(Dict(
Str('name', required=False, default=None),
Bool('real_paths', required=False, default=False),
))
def status(self, data):
"""The equivalent of running 'zpool status' from the cli.
`name`: str the name of the zpool for which to return the status info
`real_paths`: bool if True, resolve the underlying devices to their
real device (i.e. /dev/disk/by-id/blah -> /dev/sda1)
An example of what this returns looks like the following:
{
"disks": {
"/dev/disk/by-partuuid/d9cfa346-8623-402f-9bfe-a8256de902ec": {
"pool_name": "evo",
"disk_status": "ONLINE",
"disk_read_errors": 0,
"disk_write_errors": 0,
"disk_checksum_errors": 0,
"vdev_name": "stripe",
"vdev_type": "data",
"vdev_disks": [
"/dev/disk/by-partuuid/d9cfa346-8623-402f-9bfe-a8256de902ec"
]
}
},
"evo": {
"spares": {},
"logs": {},
"dedup": {},
"special": {},
"l2cache": {},
"data": {
"/dev/disk/by-partuuid/d9cfa346-8623-402f-9bfe-a8256de902ec": {
"pool_name": "evo",
"disk_status": "ONLINE",
"disk_read_errors": 0,
"disk_write_errors": 0,
"disk_checksum_errors": 0,
"vdev_name": "stripe",
"vdev_type": "data",
"vdev_disks": [
"/dev/disk/by-partuuid/d9cfa346-8623-402f-9bfe-a8256de902ec"
]
}
}
}
}
"""
pools = get_zpool_status(data.get('name'))
final = {'disks': dict()}
for pool_name, pool_info in pools.items():
final[pool_name] = dict()
# We need some normalization for data vdev here
pool_info['data'] = pool_info.get('vdevs', {}).get(pool_name, {}).get('vdevs', {})
for vdev_type in ('spares', 'logs', 'dedup', 'special', 'l2cache', 'data'):
vdev_members = pool_info.get(vdev_type, {})
if not vdev_members:
final[pool_name][vdev_type] = dict()
continue
info = self.status_impl(pool_name, vdev_type, vdev_members, **data)
# we key on pool name and disk id because
# this was designed, primarily, for the
# `webui.enclosure.dashboard` endpoint
final[pool_name][vdev_type] = info
final['disks'].update(info)
return final
| 5,975 | Python | .py | 122 | 33.983607 | 119 | 0.482859 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,754 | status_util.py | truenas_middleware/src/middlewared/middlewared/plugins/zfs_/status_util.py | import json
import subprocess
from middlewared.service import CallError, ValidationError
def get_normalized_disk_info(pool_name: str, disk: dict, vdev_name: str, vdev_type: str, vdev_disks: list) -> dict:
return {
'pool_name': pool_name,
'disk_status': disk['state'],
'disk_read_errors': disk.get('read_errors', 0),
'disk_write_errors': disk.get('write_errors', 0),
'disk_checksum_errors': disk.get('checksum_errors', 0),
'vdev_name': vdev_name,
'vdev_type': vdev_type,
'vdev_disks': vdev_disks,
}
def get_zfs_vdev_disks(vdev) -> list:
# We get this safely because of draid based vdevs
if vdev.get('state') in ('UNAVAIL', 'OFFLINE'):
return []
vdev_type = vdev.get('vdev_type')
if vdev_type == 'disk':
return [vdev['path']]
elif vdev_type == 'file':
return []
else:
result = []
for i in vdev.get('vdevs', {}).values():
result.extend(get_zfs_vdev_disks(i))
return result
def get_zpool_status(pool_name: str | None = None) -> dict:
args = [pool_name] if pool_name else []
cp = subprocess.run(['zpool', 'status', '-jP', '--json-int'] + args, capture_output=True, check=False)
if cp.returncode:
if b'no such pool' in cp.stderr:
raise ValidationError('zpool.status', f'{pool_name!r} not found')
raise CallError(f'Failed to get zpool status: {cp.stderr.decode()}')
return json.loads(cp.stdout)['pools']
| 1,507 | Python | .py | 36 | 35.027778 | 115 | 0.617385 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,755 | dataset_quota.py | truenas_middleware/src/middlewared/middlewared/plugins/zfs_/dataset_quota.py | import libzfs
from middlewared.plugins.zfs_.utils import TNUserProp
from middlewared.service import CallError, Service
class ZFSDatasetService(Service):
class Config:
namespace = 'zfs.dataset'
private = True
process_pool = True
def query_for_quota_alert(self):
options = {
'extra': {
'properties': [
'name',
'quota',
'available',
'refquota',
'used',
'usedbydataset',
'mounted',
'mountpoint',
TNUserProp.QUOTA_WARN.value,
TNUserProp.QUOTA_CRIT.value,
TNUserProp.REFQUOTA_WARN.value,
TNUserProp.REFQUOTA_CRIT.value,
]
}
}
return [
{k: v for k, v in i['properties'].items() if k in options['extra']['properties']}
for i in self.middleware.call_sync('zfs.dataset.query', [], options)
]
# quota_type in ('USER', 'GROUP', 'DATASET', 'PROJECT')
def get_quota(self, ds, quota_type):
quota_type = quota_type.upper()
if quota_type == 'DATASET':
dataset = self.middleware.call_sync('zfs.dataset.query', [('id', '=', ds)], {'get': True})
return [{
'quota_type': quota_type,
'id': ds,
'name': ds,
'quota': int(dataset['properties']['quota']['rawvalue']),
'refquota': int(dataset['properties']['refquota']['rawvalue']),
'used_bytes': int(dataset['properties']['used']['rawvalue']),
}]
elif quota_type == 'USER':
quota_props = [
libzfs.UserquotaProp.USERUSED,
libzfs.UserquotaProp.USERQUOTA,
libzfs.UserquotaProp.USEROBJUSED,
libzfs.UserquotaProp.USEROBJQUOTA
]
elif quota_type == 'GROUP':
quota_props = [
libzfs.UserquotaProp.GROUPUSED,
libzfs.UserquotaProp.GROUPQUOTA,
libzfs.UserquotaProp.GROUPOBJUSED,
libzfs.UserquotaProp.GROUPOBJQUOTA
]
elif quota_type == 'PROJECT':
quota_props = [
libzfs.UserquotaProp.PROJECTUSED,
libzfs.UserquotaProp.PROJECTQUOTA,
libzfs.UserquotaProp.PROJECTOBJUSED,
libzfs.UserquotaProp.PROJECTOBJQUOTA
]
else:
raise CallError(f'Unknown quota type {quota_type}')
try:
with libzfs.ZFS() as zfs:
resource = zfs.get_object(ds)
quotas = resource.userspace(quota_props)
except libzfs.ZFSException:
raise CallError(f'Failed retreiving {quota_type} quotas for {ds}')
# We get the quotas in separate lists for each prop. Collect these into
# a single list of objects containing all the requested props. Each
# object is unique by (domain, rid), and we only work with POSIX ids,
# so we use rid as a dict key and update the values as we iterate
# through all the quotas.
keymap = {
libzfs.UserquotaProp.USERUSED: 'used_bytes',
libzfs.UserquotaProp.GROUPUSED: 'used_bytes',
libzfs.UserquotaProp.PROJECTUSED: 'used_bytes',
libzfs.UserquotaProp.USERQUOTA: 'quota',
libzfs.UserquotaProp.GROUPQUOTA: 'quota',
libzfs.UserquotaProp.PROJECTQUOTA: 'quota',
libzfs.UserquotaProp.USEROBJUSED: 'obj_used',
libzfs.UserquotaProp.GROUPOBJUSED: 'obj_used',
libzfs.UserquotaProp.PROJECTOBJUSED: 'obj_used',
libzfs.UserquotaProp.USEROBJQUOTA: 'obj_quota',
libzfs.UserquotaProp.GROUPOBJQUOTA: 'obj_quota',
libzfs.UserquotaProp.PROJECTOBJQUOTA: 'obj_quota',
}
collected = {}
for quota_prop, quota_list in quotas.items():
for quota in quota_list:
# We only use POSIX ids, skip anything with a domain.
if quota['domain'] != '':
continue
rid = quota['rid']
entry = collected.get(rid, {
'quota_type': quota_type,
'id': rid
})
key = keymap[quota_prop]
entry[key] = quota['space']
collected[rid] = entry
# Do name lookups last so we aren't repeating for all the quota props
# for each entry.
def add_name(entry):
try:
if quota_type == 'USER':
entry['name'] = self.middleware.call_sync('user.get_user_obj', {'uid': entry['id']})['pw_name']
elif quota_type == 'GROUP':
entry['name'] = self.middleware.call_sync('group.get_group_obj', {'gid': entry['id']})['gr_name']
except Exception:
self.logger.debug('Unable to resolve %s id %d to name', quota_type.lower(), entry['id'])
return entry
return [add_name(entry) for entry in collected.values()]
def set_quota(self, ds, quotas):
properties = {}
for quota in quotas:
for xid, quota_info in quota.items():
quota_type = quota_info['quota_type'].lower()
quota_value = {'value': quota_info['quota_value']}
if quota_type == 'dataset':
properties[xid] = quota_value
else:
properties[f'{quota_type}quota@{xid}'] = quota_value
if properties:
with libzfs.ZFS() as zfs:
dataset = zfs.get_dataset(ds)
dataset.update_properties(properties)
| 5,846 | Python | .py | 132 | 30.469697 | 117 | 0.536128 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,756 | pool.py | truenas_middleware/src/middlewared/middlewared/plugins/zfs_/pool.py | import errno
import libzfs
from middlewared.schema import accepts, Bool, Dict, Int, List, Str
from middlewared.service import CallError, CRUDService, filterable, job, ValidationErrors
from middlewared.utils import filter_list
from middlewared.utils.zfs import guid_fast_impl, state_fast_impl, query_imported_fast_impl
from .pool_utils import convert_topology, find_vdev
class ZFSPoolService(CRUDService):
class Config:
namespace = 'zfs.pool'
private = True
process_pool = True
@filterable
def query(self, filters, options):
# We should not get datasets, there is zfs.dataset.query for that
state_kwargs = {'datasets_recursive': False}
with libzfs.ZFS() as zfs:
# Handle `id` or `name` filter specially to avoiding getting every property for all zpools
if filters and len(filters) == 1 and list(filters[0][:2]) in (['id', '='], ['name', '=']):
try:
pools = [zfs.get(filters[0][2]).asdict(**state_kwargs)]
except libzfs.ZFSException:
pools = []
else:
pools = [i.asdict(**state_kwargs) for i in zfs.pools]
return filter_list(pools, filters, options)
@accepts(
Dict(
'zfspool_create',
Str('name', required=True),
List('vdevs', items=[
Dict(
'vdev',
Str('root', enum=['DATA', 'CACHE', 'LOG', 'SPARE', 'SPECIAL', 'DEDUP'], required=True),
Str(
'type', enum=[
'DRAID1', 'DRAID2', 'DRAID3', 'RAIDZ1', 'RAIDZ2', 'RAIDZ3', 'MIRROR', 'STRIPE'
], required=True,
),
List('devices', items=[Str('disk')], required=True),
Int('draid_data_disks'),
Int('draid_spare_disks'),
),
], required=True),
Dict('options', additional_attrs=True),
Dict('fsoptions', additional_attrs=True),
),
)
def do_create(self, data):
with libzfs.ZFS() as zfs:
topology = convert_topology(zfs, data['vdevs'])
zfs.create(data['name'], topology, data['options'], data['fsoptions'])
return self.middleware.call_sync('zfs.pool.get_instance', data['name'])
@accepts(Str('pool'), Dict(
'options',
Dict('properties', additional_attrs=True),
))
def do_update(self, name, options):
try:
with libzfs.ZFS() as zfs:
pool = zfs.get(name)
for k, v in options['properties'].items():
prop = pool.properties[k]
if 'value' in v:
prop.value = v['value']
elif 'parsed' in v:
prop.parsed = v['parsed']
except libzfs.ZFSException as e:
raise CallError(str(e))
else:
return options
@accepts(Str('pool'), Dict(
'options',
Bool('force', default=False),
))
def do_delete(self, name, options):
try:
with libzfs.ZFS() as zfs:
zfs.destroy(name, force=options['force'])
except libzfs.ZFSException as e:
errno_ = errno.EFAULT
if e.code == libzfs.Error.UMOUNTFAILED:
errno_ = errno.EBUSY
raise CallError(str(e), errno_)
else:
return True
@accepts(
Str('name'),
List('new', default=None, null=True),
List('existing', items=[
Dict(
'attachvdev',
Str('target'),
Str('type', enum=['DISK']),
Str('path'),
),
], null=True, default=None),
)
@job()
def extend(self, job, name, new, existing):
"""
Extend a zfs pool `name` with `new` vdevs or attach to `existing` vdevs.
"""
if new is None and existing is None:
raise CallError('New or existing vdevs must be provided', errno.EINVAL)
try:
with libzfs.ZFS() as zfs:
pool = zfs.get(name)
if new:
topology = convert_topology(zfs, new)
pool.attach_vdevs(topology)
# Make sure we can find all target vdev
for i in (existing or []):
target = find_vdev(pool, i['target'])
if target is None:
raise CallError(f"Failed to find vdev for {i['target']}", errno.EINVAL)
i['target'] = target
for i in (existing or []):
newvdev = libzfs.ZFSVdev(zfs, i['type'].lower())
newvdev.path = i['path']
i['target'].attach(newvdev)
except libzfs.ZFSException as e:
raise CallError(str(e), e.code)
def query_imported_fast(self, name_filters=None):
# the equivalent of running `zpool list -H -o guid,name` from cli
# name_filters will be a list of pool names
return query_imported_fast_impl(name_filters)
@accepts(Str('pool'))
def guid_fast(self, pool):
"""
Lockless read of zpool guid. Raises FileNotFoundError
if pool not imported.
"""
return guid_fast_impl(pool)
@accepts(Str('pool'))
def state_fast(self, pool):
"""
Lockless read of zpool state. Raises FileNotFoundError
if pool not imported.
"""
return state_fast_impl(pool)
def validate_draid_configuration(self, topology_type, numdisks, nparity, vdev):
verrors = ValidationErrors()
try:
libzfs.validate_draid_configuration(
numdisks, nparity, vdev['draid_spare_disks'], vdev['draid_data_disks'],
)
except libzfs.ZFSException as e:
verrors.add(
f'topology.{topology_type}.type',
str(e),
)
return verrors
| 6,112 | Python | .py | 153 | 27.751634 | 107 | 0.530313 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,757 | validation_utils.py | truenas_middleware/src/middlewared/middlewared/plugins/zfs_/validation_utils.py | import libzfs
from .utils import zvol_name_to_path
def check_zvol_in_boot_pool_using_name(zvol_name: str) -> bool:
return check_zvol_in_boot_pool_using_path(zvol_name_to_path(zvol_name))
def check_zvol_in_boot_pool_using_path(zvol_path: str) -> bool:
from middlewared.plugins.boot import BOOT_POOL_NAME
return zvol_path.startswith(f'/dev/zvol/{BOOT_POOL_NAME}/')
def validate_pool_name(name: str) -> bool:
return libzfs.validate_pool_name(name)
def validate_dataset_name(name: str) -> bool:
return libzfs.validate_dataset_name(name)
def validate_snapshot_name(name: str) -> bool:
return libzfs.validate_snapshot_name(name)
| 658 | Python | .py | 13 | 46.923077 | 75 | 0.752366 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,758 | zfs_events.py | truenas_middleware/src/middlewared/middlewared/plugins/zfs_/zfs_events.py | import threading
import libzfs
from middlewared.alert.base import (
Alert, AlertCategory, AlertClass, AlertLevel, OneShotAlertClass, SimpleOneShotAlertClass
)
from middlewared.plugins.boot import BOOT_POOL_NAME
from middlewared.utils.threading import start_daemon_thread
CACHE_POOLS_STATUSES = 'system.system_health_pools'
SCAN_THREADS = {}
class ScanWatch(object):
def __init__(self, middleware, pool):
self.middleware = middleware
self.pool = pool
self._cancel = threading.Event()
def run(self):
while not self._cancel.wait(2):
with libzfs.ZFS() as zfs:
scan = zfs.get(self.pool).scrub.asdict()
if scan['state'] == 'SCANNING':
self.send_scan(scan)
elif scan['state'] == 'FINISHED':
# Since this thread finishes on scrub/resilver end the event is sent
# on devd event arrival
break
def send_scan(self, scan=None):
if not scan:
with libzfs.ZFS() as zfs:
scan = zfs.get(self.pool).scrub.asdict()
self.middleware.send_event('zfs.pool.scan', 'CHANGED', fields={
'scan': scan,
'name': self.pool,
})
def cancel(self):
self._cancel.set()
class ScrubNotStartedAlertClass(AlertClass, OneShotAlertClass):
category = AlertCategory.TASKS
level = AlertLevel.WARNING
title = "Scrub Failed to Start"
text = "%s."
deleted_automatically = False
async def create(self, args):
return Alert(self.__class__, args["text"], _key=args["pool"])
async def delete(self, alerts, query):
return list(filter(
lambda alert: alert.key != query,
alerts
))
class ScrubStartedAlertClass(AlertClass, SimpleOneShotAlertClass):
category = AlertCategory.TASKS
level = AlertLevel.INFO
title = "Scrub Started"
text = "Scrub of pool %r started."
deleted_automatically = False
class ScrubFinishedAlertClass(AlertClass, SimpleOneShotAlertClass):
category = AlertCategory.TASKS
level = AlertLevel.INFO
title = "Scrub Finished"
text = "Scrub of pool %r finished."
deleted_automatically = False
async def resilver_scrub_start(middleware, pool_name):
if not pool_name:
return
if pool_name in SCAN_THREADS:
return
scanwatch = ScanWatch(middleware, pool_name)
SCAN_THREADS[pool_name] = scanwatch
start_daemon_thread(target=scanwatch.run)
async def resilver_scrub_stop_abort(middleware, pool_name):
if not pool_name:
return
scanwatch = SCAN_THREADS.pop(pool_name, None)
if not scanwatch:
return
await middleware.run_in_thread(scanwatch.cancel)
# Send the last event with SCRUB/RESILVER as FINISHED
await middleware.run_in_thread(scanwatch.send_scan)
async def scrub_finished(middleware, pool_name):
await middleware.call('alert.oneshot_delete', 'ScrubFinished', pool_name)
await middleware.call('alert.oneshot_create', 'ScrubFinished', pool_name)
async def retrieve_pool_from_db(middleware, pool_name):
pool = await middleware.call('pool.query', [['name', '=', pool_name]])
if not pool:
# If we have no record of the pool, let's skip sending any event please
return
return pool[0]
POOL_ALERTS = ('PoolUSBDisks', 'PoolUpgraded')
async def pool_alerts_args(middleware, pool_name):
disks = await middleware.call('device.get_disks')
return {'pool_name': pool_name, 'disks': disks}
async def zfs_events(middleware, data):
event_id = data['class']
if event_id in ('sysevent.fs.zfs.resilver_start', 'sysevent.fs.zfs.scrub_start'):
await resilver_scrub_start(middleware, data.get('pool'))
elif event_id in (
'sysevent.fs.zfs.resilver_finish', 'sysevent.fs.zfs.scrub_finish', 'sysevent.fs.zfs.scrub_abort'
):
await resilver_scrub_stop_abort(middleware, data.get('pool'))
if event_id == 'sysevent.fs.zfs.scrub_finish':
await scrub_finished(middleware, data.get('pool'))
elif event_id == 'resource.fs.zfs.statechange':
await middleware.call('cache.pop', CACHE_POOLS_STATUSES)
pool = await retrieve_pool_from_db(middleware, data.get('pool'))
if not pool:
return
middleware.send_event('pool.query', 'CHANGED', id=pool['id'], fields=pool)
elif event_id in (
'ereport.fs.zfs.checksum',
'ereport.fs.zfs.io',
'ereport.fs.zfs.data',
'ereport.fs.zfs.vdev.clear',
):
await middleware.call('cache.pop', 'VolumeStatusAlerts')
elif event_id in (
'sysevent.fs.zfs.config_sync',
'sysevent.fs.zfs.pool_destroy',
'sysevent.fs.zfs.pool_import',
):
pool_name = data.get('pool')
pool_guid = data.get('guid')
if pool_name:
await middleware.call('cache.pop', 'VolumeStatusAlerts')
if pool_name == BOOT_POOL_NAME:
# a change was made to the boot drive, so let's clear
# the disk mapping for this pool
await middleware.call('boot.clear_disks_cache')
args = await pool_alerts_args(middleware, pool_name)
if event_id.endswith('pool_import'):
for i in POOL_ALERTS:
await middleware.call('alert.oneshot_delete', i, pool_name)
await middleware.call('alert.oneshot_create', i, args)
elif event_id.endswith('pool_destroy'):
for i in POOL_ALERTS:
await middleware.call('alert.oneshot_delete', i, pool_name)
elif event_id.endswith('config_sync'):
if pool_guid and (pool := await retrieve_pool_from_db(middleware, pool_name)):
# This event is issued whenever a vdev change is done to a pool
# Checking pool_guid ensures that we do not do this on creation/deletion
# of pool as we expect the relevant event to be handled from the service
# endpoints because there are other operations related to create/delete
# which when done, we consider the create/delete operation as complete
middleware.send_event('pool.query', 'CHANGED', id=pool['id'], fields=pool)
for i in POOL_ALERTS:
await middleware.call('alert.oneshot_delete', i, pool_name)
await middleware.call('alert.oneshot_create', i, args)
elif (
event_id == 'sysevent.fs.zfs.history_event' and data.get('history_dsname') and data.get('history_internal_name')
):
# we need to send events for dataset creation/updating/deletion in case it's done via cli
event_type = data['history_internal_name']
ds_id = data['history_dsname']
if await middleware.call('pool.dataset.is_internal_dataset', ds_id):
# We should not raise any event for system internal datasets
return
# We are not handling create/changed events because it takes a toll on middleware when we are replicating
# datasets and repeated calls to the process pool can result in tasks getting blocked for longer periods
# of time and middleware itself getting slow as well to process requests in a timely manner
# We are now handling create/changed events whenever changes are made via our API
if event_type == 'destroy':
if ds_id.split('/')[-1].startswith('%'):
# Ignore deletion of hidden clones such as `%recv` dataset created by replication
return
middleware.send_event('pool.dataset.query', 'REMOVED', id=ds_id)
await middleware.call(
'pool.dataset.delete_encrypted_datasets_from_db', [
['OR', [['name', '=', data['history_dsname']], ['name', '^', f'{data["history_dsname"]}/']]]
]
)
await middleware.call_hook('dataset.post_delete', data['history_dsname'])
async def setup(middleware):
middleware.event_register('zfs.pool.scan', 'Progress of pool resilver/scrub.', roles=['POOL_SCRUB_READ'])
middleware.register_hook('zfs.pool.events', zfs_events, sync=False)
# middleware does not receive `sysevent.fs.zfs.pool_import` or `sysevent.fs.zfs.config_sync` events on the boot pool
# import because it happens before middleware is started. We have to manually process these alerts for the boot pool
pool_name = await middleware.call('boot.pool_name')
args = await pool_alerts_args(middleware, pool_name)
for i in POOL_ALERTS:
await middleware.call('alert.oneshot_delete', i, pool_name)
await middleware.call('alert.oneshot_create', i, args)
| 8,798 | Python | .py | 178 | 40.320225 | 120 | 0.649364 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,759 | dataset_encryption.py | truenas_middleware/src/middlewared/middlewared/plugins/zfs_/dataset_encryption.py | import libzfs
from middlewared.schema import accepts, Any, Bool, Dict, Int, List, Ref, Str
from middlewared.service import CallError, job, Service
from middlewared.utils import filter_list
from .dataset_utils import flatten_datasets
from .utils import unlocked_zvols_fast, zvol_path_to_name
class ZFSDatasetService(Service):
class Config:
namespace = 'zfs.dataset'
private = True
process_pool = True
@accepts(
Ref('query-filters'),
Ref('query-options'),
List(
'additional_information',
items=[Str('desideratum', enum=['SIZE', 'RO', 'DEVID', 'ATTACHMENT'])]
)
)
def unlocked_zvols_fast(self, filters, options, additional_information):
"""
Fast check for zvol information. Supports `additional_information` to
expand output on an as-needed basis. Adding additional_information to
the output may impact performance of 'fast' method.
"""
def get_attachments():
extents = self.middleware.call_sync(
'iscsi.extent.query', [('type', '=', 'DISK')], {'select': ['path', 'type']}
)
iscsi_zvols = {
zvol_path_to_name('/dev/' + i['path']): i for i in extents
}
vm_devices = self.middleware.call_sync('vm.device.query', [['dtype', '=', 'DISK']])
vm_zvols = {
zvol_path_to_name(i['attributes']['path']): i for i in vm_devices
}
instance_zvols = {}
for instance in self.middleware.call_sync('virt.instance.query'):
for device in self.middleware.call_sync('virt.instance.device_list', instance['id']):
if device['dev_type'] != 'DISK':
continue
if not device['source'] or not device['source'].startswith('/dev/zvol/'):
continue
# Remove /dev/zvol/ from source
instance_zvols[device['source'][10:]] = instance
return {
'iscsi.extent.query': iscsi_zvols,
'vm.devices.query': vm_zvols,
'virt.instance.query': instance_zvols,
}
data = {}
if 'ATTACHMENT' in additional_information:
data['attachments'] = get_attachments()
zvol_list = list(unlocked_zvols_fast(additional_information, data).values())
return filter_list(zvol_list, filters, options)
def locked_datasets(self, names=None):
query_filters = []
if names is not None:
names_optimized = []
for name in sorted(names, key=len):
if not any(name.startswith(f'{existing_name}/') for existing_name in names_optimized):
names_optimized.append(name)
query_filters.append(['id', 'in', names_optimized])
result = flatten_datasets(self.middleware.call_sync('zfs.dataset.query', query_filters, {
'extra': {
'flat': False, # So child datasets are also queried
'properties': ['encryption', 'keystatus', 'mountpoint']
},
}))
post_filters = [['encrypted', '=', True]]
try:
about_to_lock_dataset = self.middleware.call_sync('cache.get', 'about_to_lock_dataset')
except KeyError:
about_to_lock_dataset = None
post_filters.append([
'OR', [['key_loaded', '=', False]] + (
[['id', '=', about_to_lock_dataset], ['id', '^', f'{about_to_lock_dataset}/']]
if about_to_lock_dataset else []
)
])
return [
{
'id': dataset['id'],
'mountpoint': dataset['properties'].get('mountpoint', {}).get('value'),
}
for dataset in filter_list(result, post_filters)
]
def common_load_dataset_checks(self, id_, ds):
self.common_encryption_checks(id_, ds)
if ds.key_loaded:
raise CallError(f'{id_} key is already loaded')
def common_encryption_checks(self, id_, ds):
if not ds.encrypted:
raise CallError(f'{id_} is not encrypted')
@accepts(
Str('id'),
Dict(
'load_key_options',
Bool('mount', default=True),
Bool('recursive', default=False),
Any('key', default=None, null=True),
Str('key_location', default=None, null=True),
),
)
def load_key(self, id_, options):
mount_ds = options.pop('mount')
recursive = options.pop('recursive')
try:
with libzfs.ZFS() as zfs:
ds = zfs.get_dataset(id_)
self.common_load_dataset_checks(id_, ds)
ds.load_key(**options)
except libzfs.ZFSException as e:
self.logger.error(f'Failed to load key for {id_}', exc_info=True)
raise CallError(f'Failed to load key for {id_}: {e}')
else:
if mount_ds:
self.middleware.call_sync('zfs.dataset.mount', id_, {'recursive': recursive})
@accepts(
Str('id'),
Dict(
'check_key',
Any('key', default=None, null=True),
Str('key_location', default=None, null=True),
)
)
def check_key(self, id_, options):
"""
Returns `true` if the `key` is valid, `false` otherwise.
"""
try:
with libzfs.ZFS() as zfs:
ds = zfs.get_dataset(id_)
self.common_encryption_checks(id_, ds)
return ds.check_key(**options)
except libzfs.ZFSException as e:
self.logger.error(f'Failed to check key for {id_}', exc_info=True)
raise CallError(f'Failed to check key for {id_}: {e}')
@accepts(
Str('id'),
Dict(
'unload_key_options',
Bool('recursive', default=False),
Bool('force_umount', default=False),
Bool('umount', default=False),
)
)
def unload_key(self, id_, options):
force = options.pop('force_umount')
if options.pop('umount') and self.middleware.call_sync(
'zfs.dataset.query', [['id', '=', id_]], {'extra': {'retrieve_children': False}, 'get': True}
)['properties'].get('mountpoint', {}).get('value', 'none') != 'none':
self.middleware.call_sync('zfs.dataset.umount', id_, {'force': force})
try:
with libzfs.ZFS() as zfs:
ds = zfs.get_dataset(id_)
self.common_encryption_checks(id_, ds)
if not ds.key_loaded:
raise CallError(f'{id_}\'s key is not loaded')
ds.unload_key(**options)
except libzfs.ZFSException as e:
self.logger.error(f'Failed to unload key for {id_}', exc_info=True)
raise CallError(f'Failed to unload key for {id_}: {e}')
@accepts(
Str('id'),
Dict(
'change_key_options',
Dict(
'encryption_properties',
Str('keyformat'),
Str('keylocation'),
Int('pbkdf2iters')
),
Bool('load_key', default=True),
Any('key', default=None, null=True),
),
)
def change_key(self, id_, options):
try:
with libzfs.ZFS() as zfs:
ds = zfs.get_dataset(id_)
self.common_encryption_checks(id_, ds)
ds.change_key(props=options['encryption_properties'], load_key=options['load_key'], key=options['key'])
except libzfs.ZFSException as e:
self.logger.error(f'Failed to change key for {id_}', exc_info=True)
raise CallError(f'Failed to change key for {id_}: {e}')
@accepts(
Str('id'),
Dict(
'change_encryption_root_options',
Bool('load_key', default=True),
)
)
def change_encryption_root(self, id_, options):
try:
with libzfs.ZFS() as zfs:
ds = zfs.get_dataset(id_)
ds.change_key(load_key=options['load_key'], inherit=True)
except libzfs.ZFSException as e:
raise CallError(f'Failed to change encryption root for {id_}: {e}')
@accepts(Str('name'), List('params', private=True))
@job()
def bulk_process(self, job, name, params):
f = getattr(self, name, None)
if not f:
raise CallError(f'{name} method not found in zfs.dataset')
statuses = []
for i in params:
result = error = None
try:
result = f(*i)
except Exception as e:
error = str(e)
finally:
statuses.append({'result': result, 'error': error})
return statuses
| 8,907 | Python | .py | 216 | 29.680556 | 119 | 0.538382 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,760 | utils.py | truenas_middleware/src/middlewared/middlewared/plugins/zfs_/utils.py | # -*- coding=utf-8 -*-
import enum
import logging
import os
import re
from middlewared.service_exception import MatchNotFound
from middlewared.utils.filesystem.constants import ZFSCTL
from middlewared.plugins.audit.utils import (
AUDIT_DEFAULT_FILL_CRITICAL, AUDIT_DEFAULT_FILL_WARNING
)
from middlewared.utils.tdb import (
get_tdb_handle,
TDBBatchAction,
TDBBatchOperation,
TDBDataType,
TDBOptions,
TDBPathType,
)
logger = logging.getLogger(__name__)
__all__ = ["zvol_name_to_path", "zvol_path_to_name", "get_snapshot_count_cached"]
LEGACY_USERPROP_PREFIX = 'org.freenas'
USERPROP_PREFIX = 'org.truenas'
ZD_PARTITION = re.compile(r'zd[0-9]+p[0-9]+$')
SNAP_COUNT_TDB_NAME = 'snapshot_count'
SNAP_COUNT_TDB_OPTIONS = TDBOptions(TDBPathType.PERSISTENT, TDBDataType.JSON)
class TNUserProp(enum.Enum):
DESCRIPTION = f'{LEGACY_USERPROP_PREFIX}:description'
QUOTA_WARN = f'{LEGACY_USERPROP_PREFIX}:quota_warning'
QUOTA_CRIT = f'{LEGACY_USERPROP_PREFIX}:quota_critical'
REFQUOTA_WARN = f'{LEGACY_USERPROP_PREFIX}:refquota_warning'
REFQUOTA_CRIT = f'{LEGACY_USERPROP_PREFIX}:refquota_critical'
MANAGED_BY = f'{USERPROP_PREFIX}:managedby'
def default(self):
match self:
case TNUserProp.QUOTA_WARN:
return AUDIT_DEFAULT_FILL_WARNING
case TNUserProp.QUOTA_CRIT:
return AUDIT_DEFAULT_FILL_CRITICAL
case TNUserProp.REFQUOTA_WARN:
return AUDIT_DEFAULT_FILL_WARNING
case TNUserProp.REFQUOTA_CRIT:
return AUDIT_DEFAULT_FILL_CRITICAL
case _:
raise ValueError(f'{self.value}: no default value is set')
def quotas():
return [(a.value, a.default()) for a in [
TNUserProp.QUOTA_WARN,
TNUserProp.QUOTA_CRIT,
TNUserProp.REFQUOTA_WARN,
TNUserProp.REFQUOTA_CRIT
]]
def values():
return [a.value for a in TNUserProp]
def zvol_name_to_path(name):
return os.path.join("/dev/zvol", name.replace(" ", "+"))
def zvol_path_to_name(path):
if not path.startswith("/dev/zvol/"):
raise ValueError(f"Invalid zvol path: {path!r}")
return path[len("/dev/zvol/"):].replace("+", " ")
def unlocked_zvols_fast(options=None, data=None):
"""
Get zvol information from /sys/block and /dev/zvol.
This is quite a bit faster than using py-libzfs.
supported options:
`SIZE` - size of zvol
`DEVID` - the device id of the zvol
`RO` - whether zvol is flagged as ro (snapshot)
`ATTACHMENT` - where zvol is currently being used
If 'ATTACHMENT' is used, then dict of attachemnts
should be provided under `data` key `attachments`
"""
def get_size(zvol_dev):
with open(f'/sys/block/{zvol_dev}/size', 'r') as f:
nblocks = f.readline()
return int(nblocks[:-1]) * 512
def get_devid(zvol_dev):
with open(f'/sys/block/{zvol_dev}/dev', 'r') as f:
devid = f.readline()
return devid[:-1]
def get_ro(zvol_dev):
with open(f'/sys/block/{zvol_dev}/ro', 'r') as f:
ro = f.readline()
return ro[:-1] == '1'
def get_attachment(zvol_vdev, data):
out = None
for method, attachment in data.items():
val = attachment.pop(zvol_vdev, None)
if val is not None:
out = {
'method': method,
'data': val
}
break
return out
def get_zvols(info_level, data):
out = {}
zvol_path = '/dev/zvol/'
do_get_size = 'SIZE' in info_level
do_get_dev = 'DEVID' in info_level
do_get_ro = 'RO' in info_level
do_get_attachment = 'ATTACHMENT' in info_level
for root, dirs, files in os.walk(zvol_path):
if not files:
continue
for file in files:
path = root + '/' + file
zvol_name = zvol_path_to_name(path)
try:
dev_name = os.readlink(path).split('/')[-1]
except Exception:
# this happens if the file is a regular file
# saw this happend when a user logged into a system
# via ssh and tried to "copy" a zvol using "dd" on
# the cli and made a typo in the command. This created
# a regular file. When we readlink() that file, it
# crashed with OSError 22 Invalid Argument so we just
# skip this file
continue
if ZD_PARTITION.match(dev_name):
continue
out.update({
zvol_name: {
'path': path,
'name': zvol_name,
'dev': dev_name,
}
})
if do_get_size is True:
out[zvol_name]['size'] = get_size(dev_name)
if do_get_dev is True:
out[zvol_name]['devid'] = get_devid(dev_name)
if do_get_ro is True:
out[zvol_name]['ro'] = get_ro(dev_name)
if do_get_attachment:
out[zvol_name]['attachment'] = get_attachment(zvol_name, data.get('attachments', {}))
return out
info_level = options or []
zvols = get_zvols(info_level, data or {})
return zvols
def get_snapshot_count_cached(middleware, lz, datasets, update_datasets=False, remove_snapshots_changed=False):
"""
Try retrieving snapshot count for dataset from cache if the
`snapshots_changed` timestamp hasn't changed. If it has,
then retrieve new snapshot count in most optimized way possible
and cache new value
Parameters:
----------
middleware - middleware object
lz - libzfs handle, e.g. libzfs.ZFS()
zhdl - iterable containing dataset information as returned by
libzfs.datasets_serialized
update_datasets - bool - optional - insert `snapshot_count` key into datasets passed
into this method
remove_snapshots_changed - bool - remove the snapshots_changed key from dataset properties
after processing. This is to hide the fact that we had to retrieve this property to
determine whether to return cached value.
Returns:
-------
Dict containing following:
key (dataset name) : value (int - snapshot count)
"""
def get_mountpoint(zhdl):
mp = zhdl['properties'].get('mountpoint')
if mp is None:
return None
if mp['parsed'] and mp['parsed'] != 'legacy':
return mp['parsed']
return None
def entry_get_cnt(zhdl):
"""
Retrieve snapshot count in most efficient way possible. If dataset is mounted, then
retrieve from st_nlink otherwise, iter snapshots from dataset handle
"""
if mp := get_mountpoint(zhdl):
try:
st = os.stat(f'{mp}/.zfs/snapshot')
except Exception:
pass
else:
if st.st_ino == ZFSCTL.INO_SNAPDIR.value:
return st.st_nlink - 2
return len(lz.snapshots_serialized(['name'], datasets=[zhdl['name']], recursive=False))
def get_entry_fetch(key):
""" retrieve cached snapshot count from persistent key-value store """
try:
with get_tdb_handle(SNAP_COUNT_TDB_NAME, SNAP_COUNT_TDB_OPTIONS) as hdl:
entry = hdl.get(key)
except MatchNotFound:
entry = {
'changed_ts': None,
'cnt': -1
}
return entry
def process_entry(out, zhdl, batch_ops):
"""
This method processes the dataset entry and
sets new value in tdb file if necessary. Since
we may be consuming "flattened" datasets here, there
is potential for duplicate entries. Hence, check for
whether we've already handled the dataset for this run.
"""
existing_entry = out.get(zhdl['name'])
if existing_entry:
if update_datasets:
zhdl['snapshot_count'] = existing_entry
if remove_snapshots_changed:
zhdl['properties'].pop('snapshots_changed', None)
return
changed_ts = zhdl['properties']['snapshots_changed']['parsed']
cache_key = f'SNAPCNT%{zhdl["name"]}'
entry = get_entry_fetch(cache_key)
if entry['changed_ts'] != changed_ts:
entry['cnt'] = entry_get_cnt(zhdl)
entry['changed_ts'] = changed_ts
# There are circumstances in which legacy datasets
# may not have this property populated. We don't
# want cache insertion with NULL key to avoid
# collisions
if changed_ts:
batch_ops.append(TDBBatchOperation(
action=TDBBatchAction.SET,
key=cache_key,
value=entry
))
out[zhdl['name']] = entry['cnt']
if update_datasets:
zhdl['snapshot_count'] = entry['cnt']
if remove_snapshots_changed:
zhdl['properties'].pop('snapshots_changed', None)
def iter_datasets(out, datasets_in, batch_ops):
for ds in datasets_in:
process_entry(out, ds, batch_ops)
iter_datasets(out, ds.get('children', []), batch_ops)
out = {}
batch_ops = []
iter_datasets(out, datasets, batch_ops)
if batch_ops:
# Commit changes to snapshot counts under a transaction lock
try:
with get_tdb_handle(SNAP_COUNT_TDB_NAME, SNAP_COUNT_TDB_OPTIONS) as hdl:
hdl.batch_op(batch_ops)
except Exception:
logger.warning('Failed to update cached snapshot counts', exc_info=True)
return out
| 10,001 | Python | .py | 244 | 30.606557 | 111 | 0.586392 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,761 | dataset_actions.py | truenas_middleware/src/middlewared/middlewared/plugins/zfs_/dataset_actions.py | import errno
import libzfs
import os
from middlewared.schema import accepts, Bool, Dict, Str
from middlewared.service import CallError, Service
from middlewared.utils.mount import getmntinfo
from middlewared.utils.path import is_child
def handle_ds_not_found(error_code: int, ds_name: str):
if error_code == libzfs.Error.NOENT.value:
raise CallError(f'Dataset {ds_name!r} not found', errno.ENOENT)
class ZFSDatasetService(Service):
class Config:
namespace = 'zfs.dataset'
private = True
process_pool = True
def path_to_dataset(self, path, mntinfo=None):
"""
Convert `path` to a ZFS dataset name. This
performs lookup through mountinfo.
Anticipated error conditions are that path is not
on ZFS or if the boot pool underlies the path. In
addition to this, all the normal exceptions that
can be raised by a failed call to os.stat() are
possible.
"""
boot_pool = self.middleware.call_sync("boot.pool_name")
st = os.stat(path)
if mntinfo is None:
mntinfo = getmntinfo(st.st_dev)[st.st_dev]
else:
mntinfo = mntinfo[st.st_dev]
ds_name = mntinfo['mount_source']
if mntinfo['fs_type'] != 'zfs':
raise CallError(f'{path}: path is not a ZFS filesystem')
if is_child(ds_name, boot_pool):
raise CallError(f'{path}: path is on boot pool')
return ds_name
def child_dataset_names(self, path):
# return child datasets given a dataset `path`.
try:
with libzfs.ZFS() as zfs:
return [child.name for child in zfs.get_dataset_by_path(path).children]
except libzfs.ZFSException as e:
raise CallError(f'Failed retrieving child datsets for {path} with error {e}')
@accepts(
Str('name'),
Dict(
'options',
Bool('recursive', default=False),
Bool('force_mount', default=False),
)
)
def mount(self, name, options):
try:
with libzfs.ZFS() as zfs:
dataset = zfs.get_dataset(name)
if options['recursive']:
dataset.mount_recursive(ignore_errors=True, force_mount=options['force_mount'])
else:
dataset.mount()
except libzfs.ZFSException as e:
self.logger.error('Failed to mount dataset', exc_info=True)
handle_ds_not_found(e.code, name)
raise CallError(f'Failed to mount dataset: {e}')
@accepts(Str('name'), Dict('options', Bool('force', default=False)))
def umount(self, name, options):
try:
with libzfs.ZFS() as zfs:
dataset = zfs.get_dataset(name)
dataset.umount(force=options['force'])
except libzfs.ZFSException as e:
self.logger.error('Failed to umount dataset', exc_info=True)
handle_ds_not_found(e.code, name)
raise CallError(f'Failed to umount dataset: {e}')
@accepts(
Str('dataset'),
Dict(
'options',
Str('new_name', required=True, empty=False),
Bool('recursive', default=False)
)
)
def rename(self, name, options):
try:
with libzfs.ZFS() as zfs:
dataset = zfs.get_dataset(name)
dataset.rename(options['new_name'], recursive=options['recursive'])
except libzfs.ZFSException as e:
self.logger.error('Failed to rename dataset', exc_info=True)
handle_ds_not_found(e.code, name)
raise CallError(f'Failed to rename dataset: {e}')
def promote(self, name):
try:
with libzfs.ZFS() as zfs:
dataset = zfs.get_dataset(name)
dataset.promote()
except libzfs.ZFSException as e:
self.logger.error('Failed to promote dataset', exc_info=True)
handle_ds_not_found(e.code, name)
raise CallError(f'Failed to promote dataset: {e}')
def inherit(self, name, prop, recursive=False):
try:
with libzfs.ZFS() as zfs:
dataset = zfs.get_dataset(name)
zprop = dataset.properties.get(prop)
if not zprop:
raise CallError(f'Property {prop!r} not found.', errno.ENOENT)
zprop.inherit(recursive=recursive)
except libzfs.ZFSException as e:
handle_ds_not_found(e.code, name)
if prop != 'mountpoint':
raise CallError(str(e))
err = e.code.name
if err not in ("SHARENFSFAILED", "SHARESMBFAILED"):
raise CallError(str(e))
# We set /etc/exports.d to be immutable, which
# results on inherit of mountpoint failing with
# SHARENFSFAILED. We give special return in this case
# so that caller can set this property to "off"
raise CallError(err, errno.EPROTONOSUPPORT)
| 5,066 | Python | .py | 120 | 31.475 | 99 | 0.593503 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,762 | snapshot.py | truenas_middleware/src/middlewared/middlewared/plugins/zfs_/snapshot.py | import copy
import errno
import libzfs
from middlewared.schema import accepts, Bool, Dict, List, Str
from middlewared.service import CallError, CRUDService, filterable, private, ValidationErrors
from middlewared.service_exception import InstanceNotFound
from middlewared.utils import filter_list, filter_getattrs
from middlewared.validators import Match, ReplicationSnapshotNamingSchema
from .utils import get_snapshot_count_cached
from .validation_utils import validate_snapshot_name
class ZFSSnapshot(CRUDService):
class Config:
datastore_primary_key_type = 'string'
namespace = 'zfs.snapshot'
process_pool = True
cli_namespace = 'storage.snapshot'
role_prefix = 'SNAPSHOT'
role_separate_delete = True
@private
def count(self, dataset_names='*', recursive=False):
kwargs = {
'user_props': False,
'props': ['snapshots_changed'],
'retrieve_children': (dataset_names == '*' or recursive)
}
if dataset_names != '*':
if not isinstance(dataset_names, list):
raise ValueError("dataset_names must be '*' or a list")
kwargs['datasets'] = dataset_names
try:
with libzfs.ZFS() as zfs:
datasets = zfs.datasets_serialized(**kwargs)
return get_snapshot_count_cached(self.middleware, zfs, datasets)
except libzfs.ZFSException as e:
raise CallError(str(e))
@filterable
def query(self, filters, options):
"""
Query all ZFS Snapshots with `query-filters` and `query-options`.
`query-options.extra.holds` specifies whether hold tags for snapshots should be retrieved (false by default)
`query-options.extra.min_txg` can be specified to limit snapshot retrieval based on minimum transaction group.
`query-options.extra.max_txg` can be specified to limit snapshot retrieval based on maximum transaction group.
"""
# Special case for faster listing of snapshot names (#53149)
filters_attrs = filter_getattrs(filters)
extra = copy.deepcopy(options['extra'])
min_txg = extra.get('min_txg', 0)
max_txg = extra.get('max_txg', 0)
if (
(
options.get('select') == ['name'] or
options.get('count')
) and filters_attrs.issubset({'name', 'pool', 'dataset'})
):
kwargs = {}
other_filters = []
if not filters and options.get('count'):
snaps = self.count()
cnt = 0
for entry in snaps.values():
cnt += entry
return cnt
for f in filters:
if len(f) == 3 and f[0] in ['pool', 'dataset'] and f[1] in ['=', 'in']:
if f[1] == '=':
kwargs['datasets'] = [f[2]]
else:
kwargs['datasets'] = f[2]
if f[0] == 'dataset':
kwargs['recursive'] = False
else:
other_filters.append(f)
filters = other_filters
with libzfs.ZFS() as zfs:
snaps = zfs.snapshots_serialized(['name'], min_txg=min_txg, max_txg=max_txg, **kwargs)
if filters or len(options) > 1:
return filter_list(snaps, filters, options)
return snaps
if options['extra'].get('retention'):
if 'id' not in filter_getattrs(filters) and not options.get('limit'):
raise CallError('`id` or `limit` is required if `retention` is requested', errno.EINVAL)
holds = extra.get('holds', False)
properties = extra.get('properties')
with libzfs.ZFS() as zfs:
# Handle `id` filter to avoid getting all snapshots first
kwargs = dict(holds=holds, mounted=False, props=properties, min_txg=min_txg, max_txg=max_txg)
if filters and len(filters) == 1 and len(filters[0]) == 3 and filters[0][0] in (
'id', 'name'
) and filters[0][1] == '=':
kwargs['datasets'] = [filters[0][2]]
snapshots = zfs.snapshots_serialized(**kwargs)
# FIXME: awful performance with hundreds/thousands of snapshots
select = options.pop('select', None)
result = filter_list(snapshots, filters, options)
if options['extra'].get('retention'):
if isinstance(result, list):
result = self.middleware.call_sync('zettarepl.annotate_snapshots', result)
elif isinstance(result, dict):
result = self.middleware.call_sync('zettarepl.annotate_snapshots', [result])[0]
if select:
if isinstance(result, list):
result = [{k: v for k, v in item.items() if k in select} for item in result]
elif isinstance(result, dict):
result = {k: v for k, v in result.items() if k in select}
return result
@accepts(Dict(
'snapshot_create',
Str('dataset', required=True, empty=False),
Str('name', empty=False),
Str('naming_schema', empty=False, validators=[ReplicationSnapshotNamingSchema()]),
Bool('recursive', default=False),
List('exclude', items=[Str('dataset')]),
Bool('suspend_vms', default=False),
Bool('vmware_sync', default=False),
Dict('properties', additional_attrs=True),
))
def do_create(self, data):
"""
Take a snapshot from a given dataset.
"""
dataset = data['dataset']
recursive = data['recursive']
exclude = data['exclude']
properties = data['properties']
verrors = ValidationErrors()
name = None
if 'name' in data and 'naming_schema' in data:
verrors.add('snapshot_create.naming_schema', 'You can\'t specify name and naming schema at the same time')
elif 'name' in data:
name = data['name']
elif 'naming_schema' in data:
# We can't do `strftime` here because we are in the process pool and `TZ` environment variable update
# is not propagated here.
name = self.middleware.call_sync('replication.new_snapshot_name', data['naming_schema'])
else:
verrors.add('snapshot_create.naming_schema', 'You must specify either name or naming schema')
if exclude:
if not recursive:
verrors.add('snapshot_create.exclude', 'This option has no sense for non-recursive snapshots')
for k in ['vmware_sync', 'properties']:
if data[k]:
verrors.add(f'snapshot_create.{k}', 'This option is not supported when excluding datasets')
if name and not validate_snapshot_name(f'{dataset}@{name}'):
verrors.add('snapshot_create.name', 'Invalid snapshot name')
verrors.check()
vmware_context = None
if data['vmware_sync']:
vmware_context = self.middleware.call_sync('vmware.snapshot_begin', dataset, recursive)
affected_vms = {}
if data['suspend_vms']:
if affected_vms := self.middleware.call_sync('vm.query_snapshot_begin', dataset, recursive):
self.middleware.call_sync('vm.suspend_vms', list(affected_vms))
try:
if not exclude:
with libzfs.ZFS() as zfs:
ds = zfs.get_dataset(dataset)
ds.snapshot(f'{dataset}@{name}', recursive=recursive, fsopts=properties)
if vmware_context and vmware_context['vmsynced']:
ds.properties['freenas:vmsynced'] = libzfs.ZFSUserProperty('Y')
else:
self.middleware.call_sync('zettarepl.create_recursive_snapshot_with_exclude', dataset, name, exclude)
self.logger.info(f"Snapshot taken: {dataset}@{name}")
except libzfs.ZFSException as err:
errno_ = errno.EFAULT
if 'already exists' in str(err):
errno_ = errno.EEXIST
self.logger.error(f'Failed to snapshot {dataset}@{name}: {err}')
raise CallError(f'Failed to snapshot {dataset}@{name}: {err}', errno_)
else:
instance = self.middleware.call_sync('zfs.snapshot.get_instance', f'{dataset}@{name}')
self.middleware.send_event(f'{self._config.namespace}.query', 'ADDED', id=instance['id'], fields=instance)
return instance
finally:
if affected_vms:
self.middleware.call_sync('vm.resume_suspended_vms', list(affected_vms))
if vmware_context:
self.middleware.call_sync('vmware.snapshot_end', vmware_context)
@accepts(
Str('id'), Dict(
'snapshot_update',
List(
'user_properties_update',
items=[Dict(
'user_property',
Str('key', required=True, validators=[Match(r'.*:.*')]),
Str('value'),
Bool('remove'),
)],
),
)
)
def do_update(self, snap_id, data):
verrors = ValidationErrors()
props = data['user_properties_update']
for index, prop in enumerate(props):
if prop.get('remove') and 'value' in prop:
verrors.add(
f'snapshot_update.user_properties_update.{index}.remove',
'Must not be set when value is specified'
)
verrors.check()
try:
with libzfs.ZFS() as zfs:
snap = zfs.get_snapshot(snap_id)
user_props = self.middleware.call_sync('pool.dataset.get_create_update_user_props', props, True)
self.middleware.call_sync('zfs.dataset.update_zfs_object_props', user_props, snap)
except libzfs.ZFSException as e:
raise CallError(str(e))
else:
return self.middleware.call_sync('zfs.snapshot.get_instance', snap_id)
@accepts(
Str('id'),
Dict(
'options',
Bool('defer', default=False),
Bool('recursive', default=False),
),
)
def do_delete(self, id_, options):
"""
Delete snapshot of name `id`.
`options.defer` will defer the deletion of snapshot.
"""
verrors = ValidationErrors()
try:
with libzfs.ZFS() as zfs:
snap = zfs.get_snapshot(id_)
snap.delete(defer=options['defer'], recursive=options['recursive'])
except libzfs.ZFSException as e:
if e.code == libzfs.Error.NOENT:
raise InstanceNotFound(str(e))
if e.args and isinstance(e.args[0], str) and 'snapshot has dependent clones' in e.args[0]:
with libzfs.ZFS() as zfs:
dep = list(zfs.get_snapshot(id_).dependents)
if len(dep) and not options['defer']:
verrors.add(
'options.defer',
f'Please set this attribute as {snap.name!r} snapshot has dependent clones: '
f'{", ".join([i.name for i in dep])}'
)
verrors.check()
raise CallError(str(e))
else:
# TODO: Events won't be sent for child snapshots in recursive delete
self.middleware.send_event(
f'{self._config.namespace}.query', 'REMOVED', id=id_, recursive=options['recursive'],
)
return True
| 11,738 | Python | .py | 248 | 34.778226 | 118 | 0.570293 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,763 | pool_actions.py | truenas_middleware/src/middlewared/middlewared/plugins/zfs_/pool_actions.py | import errno
import libzfs
import subprocess
import functools
from middlewared.schema import accepts, Bool, Dict, Str
from middlewared.service import CallError, Service
from .pool_utils import find_vdev, SEARCH_PATHS
class ZFSPoolService(Service):
class Config:
namespace = 'zfs.pool'
private = True
process_pool = True
@functools.cache
def get_search_paths(self):
if self.middleware.call_sync('system.is_ha_capable'):
# HA capable hardware which means we _ALWAYS_ expect
# the zpool to have been created with disks that have
# been formatted with gpt type labels on them
return ['/dev/disk/by-partuuid']
return SEARCH_PATHS
def is_upgraded(self, pool_name):
enabled = (libzfs.FeatureState.ENABLED, libzfs.FeatureState.ACTIVE)
with libzfs.ZFS() as zfs:
try:
pool = zfs.get(pool_name)
except libzfs.ZFSException:
raise CallError(f'{pool_name!r} not found', errno.ENOENT)
return all((i.state in enabled for i in pool.features))
@accepts(Str('pool', required=True))
def upgrade(self, pool):
try:
with libzfs.ZFS() as zfs:
zfs.get(pool).upgrade()
except libzfs.ZFSException as e:
raise CallError(str(e))
@accepts(Str('pool'), Dict(
'options',
Bool('force', default=False),
))
def export(self, name, options):
try:
with libzfs.ZFS() as zfs:
# FIXME: force not yet implemented
pool = zfs.get(name)
zfs.export_pool(pool)
except libzfs.ZFSException as e:
raise CallError(str(e))
@accepts(Str('pool'))
def get_devices(self, name):
try:
with libzfs.ZFS() as zfs:
return [i.replace('/dev/', '') for i in zfs.get(name).disks]
except libzfs.ZFSException as e:
raise CallError(str(e), errno.ENOENT)
def __zfs_vdev_operation(self, name, label, op, *args):
try:
with libzfs.ZFS() as zfs:
pool = zfs.get(name)
target = find_vdev(pool, label)
if target is None:
raise CallError(f'Failed to find vdev for {label}', errno.EINVAL)
op(target, *args)
except libzfs.ZFSException as e:
raise CallError(str(e), e.code)
@accepts(Str('pool'), Str('label'), Dict('options', Bool('clear_label', default=False)))
def detach(self, name, label, options):
"""
Detach device `label` from the pool `pool`.
"""
self.detach_remove_impl('detach', name, label, options)
def detach_remove_impl(self, op, name, label, options):
def impl(target):
getattr(target, op)()
if options['clear_label']:
self.clear_label(target.path)
self.__zfs_vdev_operation(name, label, impl)
@accepts(Str('device'))
def clear_label(self, device):
"""
Clear label from `device`.
"""
try:
libzfs.clear_label(device)
except (libzfs.ZFSException, OSError) as e:
raise CallError(str(e))
@accepts(Str('pool'), Str('label'))
def offline(self, name, label):
"""
Offline device `label` from the pool `pool`.
"""
self.__zfs_vdev_operation(name, label, lambda target: target.offline())
@accepts(
Str('pool'), Str('label'), Bool('expand', default=False)
)
def online(self, name, label, expand):
"""
Online device `label` from the pool `pool`.
"""
self.__zfs_vdev_operation(name, label, lambda target, *args: target.online(*args), expand)
@accepts(Str('pool'), Str('label'), Dict('options', Bool('clear_label', default=False)))
def remove(self, name, label, options):
"""
Remove device `label` from the pool `pool`.
"""
self.detach_remove_impl('remove', name, label, options)
@accepts(Str('pool'), Str('label'), Str('dev'))
def replace(self, name, label, dev):
"""
Replace device `label` with `dev` in pool `name`.
"""
try:
with libzfs.ZFS() as zfs:
pool = zfs.get(name)
target = find_vdev(pool, label)
if target is None:
raise CallError(f'Failed to find vdev for {label!r}', errno.EINVAL)
newvdev = libzfs.ZFSVdev(zfs, 'disk')
newvdev.path = f'/dev/{dev}'
# FIXME: Replace using old path is not working for some reason
# Lets use guid for now.
target.path = str(target.guid)
target.replace(newvdev)
except libzfs.ZFSException as e:
raise CallError(str(e), e.code)
@accepts(
Str('name', required=True),
Str('action', enum=['START', 'STOP', 'PAUSE'], default='START')
)
def scrub_action(self, name, action):
"""
Start/Stop/Pause a scrub on pool `name`.
"""
if action != 'PAUSE':
try:
with libzfs.ZFS() as zfs:
pool = zfs.get(name)
if action == 'START':
running_scrubs = len([
pool for pool in zfs.pools
if pool.scrub.state == libzfs.ScanState.SCANNING
])
if running_scrubs >= 10:
raise CallError(
f'{running_scrubs} scrubs are already running. Running too many scrubs simultaneously '
'will result in an unresponsive system. Refusing to start scrub.'
)
pool.start_scrub()
else:
pool.stop_scrub()
except libzfs.ZFSException as e:
raise CallError(str(e), e.code)
else:
proc = subprocess.Popen(
f'zpool scrub -p {name}'.split(' '),
stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
proc.communicate()
if proc.returncode != 0:
raise CallError('Unable to pause scrubbing')
def scrub_state(self, name):
with libzfs.ZFS() as zfs:
return zfs.get(name).scrub.asdict()
def expand_state(self, name):
with libzfs.ZFS() as zfs:
return zfs.get(name).expand.asdict()
@accepts()
def find_import(self):
sp = self.get_search_paths()
with libzfs.ZFS() as zfs:
return [i.asdict() for i in zfs.find_import(search_paths=sp)]
@accepts(
Str('name_or_guid'),
Dict('properties', additional_attrs=True),
Bool('any_host', default=True),
Str('cachefile', null=True, default=None),
Str('new_name', null=True, default=None),
Dict(
'import_options',
Bool('missing_log', default=False),
),
)
def import_pool(self, name_or_guid, properties, any_host, cachefile, new_name, import_options):
with libzfs.ZFS() as zfs:
found = None
sp = self.get_search_paths()
try:
for pool in zfs.find_import(cachefile=cachefile, search_paths=sp):
if pool.name == name_or_guid or str(pool.guid) == name_or_guid:
found = pool
break
except libzfs.ZFSInvalidCachefileException:
raise CallError('Invalid or missing cachefile', errno.ENOENT)
except libzfs.ZFSException as e:
code = errno.ENOENT if e.code == libzfs.Error.NOENT.value else e.code
raise CallError(str(e), code)
else:
if found is None:
raise CallError(f'Pool {name_or_guid} not found.', errno.ENOENT)
missing_log = import_options['missing_log']
pool_name = new_name or found.name
try:
zfs.import_pool(found, pool_name, properties, missing_log=missing_log, any_host=any_host)
except libzfs.ZFSException as e:
# We only log if some datasets failed to mount after pool import
if e.code != libzfs.Error.MOUNTFAILED:
raise CallError(f'Failed to import {pool_name!r} pool: {e}', e.code)
else:
self.logger.error(
'Failed to mount datasets after importing "%s" pool: %s', name_or_guid, str(e), exc_info=True
)
@accepts(Str('pool'))
def find_not_online(self, pool):
pool = self.middleware.call_sync('zfs.pool.query', [['id', '=', pool]], {'get': True})
unavails = []
for nodes in pool['groups'].values():
for node in nodes:
unavails.extend(self.__find_not_online(node))
return unavails
def __find_not_online(self, node):
if len(node['children']) == 0 and node['status'] not in ('ONLINE', 'AVAIL'):
return [node]
unavails = []
for child in node['children']:
unavails.extend(self.__find_not_online(child))
return unavails
def get_vdev(self, name, vname):
try:
with libzfs.ZFS() as zfs:
pool = zfs.get(name)
vdev = find_vdev(pool, vname)
if not vdev:
raise CallError(f'{vname} not found in {name}', errno.ENOENT)
return vdev.asdict()
except libzfs.ZFSException as e:
raise CallError(str(e))
| 9,791 | Python | .py | 235 | 29.659574 | 119 | 0.547049 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,764 | events.py | truenas_middleware/src/middlewared/middlewared/plugins/zfs_/events.py | import libzfs
import multiprocessing
import time
from middlewared.utils.threading import set_thread_name, start_daemon_thread
def zfs_events(child_conn):
with libzfs.ZFS() as zfs:
for event in zfs.zpool_events(blocking=True, skip_existing_events=True):
child_conn.send(event)
def setup_zfs_events_process(middleware):
set_thread_name('retrieve_zfs_events_thread')
while True:
try:
parent_conn, child_conn = multiprocessing.Pipe(duplex=False)
events_process = multiprocessing.Process(
daemon=True, target=zfs_events, args=(child_conn,), name='retrieve_zfs_events_process'
)
except Exception as e:
middleware.logger.error('Failed to spawn process for retrieving ZFS events %s', str(e))
time.sleep(3)
continue
try:
events_process.start()
while True:
middleware.call_hook_sync('zfs.pool.events', data=parent_conn.recv())
except Exception as e:
if middleware.call_sync('system.state') != 'SHUTTING_DOWN':
middleware.logger.error('Failed to retrieve ZFS events: %s', str(e))
else:
break
time.sleep(1)
async def setup(middleware):
start_daemon_thread(target=setup_zfs_events_process, args=(middleware,))
| 1,366 | Python | .py | 32 | 33.28125 | 102 | 0.644528 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,765 | pool_utils.py | truenas_middleware/src/middlewared/middlewared/plugins/zfs_/pool_utils.py | import libzfs
from collections import defaultdict
SEARCH_PATHS = ['/dev/disk/by-partuuid', '/dev']
def convert_topology(zfs, vdevs):
topology = defaultdict(list)
for vdev in vdevs:
children = []
for device in vdev['devices']:
z_cvdev = libzfs.ZFSVdev(zfs, 'disk')
z_cvdev.type = 'disk'
z_cvdev.path = device
children.append(z_cvdev)
if vdev['type'] == 'STRIPE':
topology[vdev['root'].lower()].extend(children)
else:
z_vdev = libzfs.ZFSVdev(zfs, 'disk')
z_vdev.children = children
if vdev['type'].startswith('DRAID'):
z_vdev.type = 'draid'
topology['draid'].append({
'disk': z_vdev,
'parameters': {
'children': len(children),
'draid_parity': int(vdev['type'][-1]),
'draid_spare_disks': vdev['draid_spare_disks'],
'draid_data_disks': vdev['draid_data_disks'],
}
})
else:
z_vdev.type = vdev['type'].lower()
topology[vdev['root'].lower()].append(z_vdev)
return topology
def find_vdev(pool, vname):
"""
Find a vdev in the given `pool` using `vname` looking for
guid or path
Returns:
libzfs.ZFSVdev object
"""
children = []
for vdevs in pool.groups.values():
children += vdevs
while children:
child = children.pop()
if str(vname) == str(child.guid):
return child
if child.type == 'disk':
path = child.path.replace('/dev/', '')
if path == vname:
return child
children += list(child.children)
| 1,817 | Python | .py | 51 | 24.196078 | 71 | 0.510262 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,766 | dataset.py | truenas_middleware/src/middlewared/middlewared/plugins/zfs_/dataset.py | import errno
import libzfs
import subprocess
from middlewared.schema import accepts, Bool, Dict, Str
from middlewared.service import CallError, CRUDService, filterable, ValidationErrors
from middlewared.utils import filter_list
from .dataset_utils import flatten_datasets
from .utils import get_snapshot_count_cached
class ZFSDatasetService(CRUDService):
class Config:
namespace = 'zfs.dataset'
private = True
process_pool = True
@filterable
def query(self, filters, options):
"""
In `query-options` we can provide `extra` arguments which control which data should be retrieved
for a dataset.
`query-options.extra.snapshots` is a boolean which when set will retrieve snapshots for the dataset in question
by adding a snapshots key to the dataset data.
`query-options.extra.snapshots_count` is a boolean key which when set will retrieve snapshot counts for the
datasets returned by adding a snapshot_count key to the dataset data.
`query-options.extra.retrieve_children` is a boolean set to true by default. When set to true, will retrieve
all children datasets which can cause a performance penalty. When set to false, will not retrieve children
datasets which does not incur the performance penalty.
`query-options.extra.properties` is a list of properties which should be retrieved. If null ( by default ),
it would retrieve all properties, if empty, it will retrieve no property.
We provide 2 ways how zfs.dataset.query returns dataset's data. First is a flat structure ( default ), which
means that all the datasets in the system are returned as separate objects which also contain all the data
their is for their children. This retrieval type is slightly slower because of duplicates which exist in
each object.
Second type is hierarchical where only top level datasets are returned in the list and they contain all the
children there are for them in `children` key. This retrieval type is slightly faster.
These options are controlled by `query-options.extra.flat` attribute which defaults to true.
`query-options.extra.user_properties` controls if user defined properties of datasets should be retrieved
or not.
While we provide a way to exclude all properties from data retrieval, we introduce a single attribute
`query-options.extra.retrieve_properties` which if set to false will make sure that no property is retrieved
whatsoever and overrides any other property retrieval attribute.
"""
options = options or {}
extra = options.get('extra', {}).copy()
props = extra.get('properties', None)
flat = extra.get('flat', True)
user_properties = extra.get('user_properties', True)
retrieve_properties = extra.get('retrieve_properties', True)
retrieve_children = extra.get('retrieve_children', True)
snapshots = extra.get('snapshots')
snapshots_count = extra.get('snapshots_count')
snapshots_recursive = extra.get('snapshots_recursive')
snapshots_properties = extra.get('snapshots_properties', [])
if not retrieve_properties:
# This is a short hand version where consumer can specify that they don't want any property to
# be retrieved
user_properties = False
props = []
with libzfs.ZFS() as zfs:
# Handle `id` or `name` filter specially to avoiding getting all datasets
pop_snapshots_changed = False
if snapshots_count and props is not None and 'snapshots_changed' not in props:
props.append('snapshots_changed')
pop_snapshots_changed = True
kwargs = dict(
props=props, user_props=user_properties, snapshots=snapshots, retrieve_children=retrieve_children,
snapshots_recursive=snapshots_recursive, snapshot_props=snapshots_properties
)
if filters and filters[0][0] in ('id', 'name'):
if filters[0][1] == '=':
kwargs['datasets'] = [filters[0][2]]
if filters[0][1] == 'in':
kwargs['datasets'] = filters[0][2]
datasets = zfs.datasets_serialized(**kwargs)
if flat:
datasets = flatten_datasets(datasets)
else:
datasets = list(datasets)
if snapshots_count:
get_snapshot_count_cached(
self.middleware,
zfs,
datasets,
True,
pop_snapshots_changed
)
return filter_list(datasets, filters, options)
@accepts(Dict(
'dataset_create',
Bool('create_ancestors', default=False),
Str('name', required=True),
Str('type', enum=['FILESYSTEM', 'VOLUME'], default='FILESYSTEM'),
Dict(
'properties',
Bool('sparse'),
additional_attrs=True,
),
))
def do_create(self, data):
"""
Creates a ZFS dataset.
"""
verrors = ValidationErrors()
if '/' not in data['name']:
verrors.add('name', 'You need a full name, e.g. pool/newdataset')
verrors.check()
properties = data.get('properties') or {}
sparse = properties.pop('sparse', False)
params = {}
for k, v in data['properties'].items():
params[k] = v
# it's important that we set xattr=sa for various
# performance reasons related to ea handling
if data['type'] == 'FILESYSTEM' and 'xattr' not in params:
params['xattr'] = 'sa'
try:
with libzfs.ZFS() as zfs:
pool = zfs.get(data['name'].split('/')[0])
pool.create(
data['name'], params, fstype=getattr(libzfs.DatasetType, data['type']),
sparse_vol=sparse, create_ancestors=data['create_ancestors'],
)
except libzfs.ZFSException as e:
self.logger.error('Failed to create dataset', exc_info=True)
raise CallError(f'Failed to create dataset: {e}')
else:
return data
@accepts(
Str('id'),
Dict(
'dataset_update',
Dict(
'properties',
additional_attrs=True,
),
),
)
def do_update(self, id_, data):
try:
with libzfs.ZFS() as zfs:
dataset = zfs.get_dataset(id_)
if 'properties' in data:
properties = data['properties'].copy()
# Set these after reservations
for k in ['quota', 'refquota']:
if k in properties:
properties[k] = properties.pop(k) # Set them last
self.update_zfs_object_props(properties, dataset)
except libzfs.ZFSException as e:
self.logger.error('Failed to update dataset', exc_info=True)
raise CallError(f'Failed to update dataset: {e}')
else:
return data
@accepts(
Str('id'),
Dict(
'options',
Bool('force', default=False),
Bool('recursive', default=False),
)
)
def do_delete(self, id_, options):
force = options['force']
recursive = options['recursive']
args = []
if force:
args += ['-f']
if recursive:
args += ['-r']
# If dataset is mounted and has receive_resume_token, we should destroy it or ZFS will say
# "cannot destroy 'pool/dataset': dataset already exists"
recv_run = subprocess.run(['zfs', 'recv', '-A', id_], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
# Destroying may take a long time, lets not use py-libzfs as it will block
# other ZFS operations.
try:
subprocess.run(
['zfs', 'destroy'] + args + [id_], text=True, capture_output=True, check=True,
)
except subprocess.CalledProcessError as e:
if recv_run.returncode == 0 and e.stderr.strip().endswith('dataset does not exist'):
# This operation might have deleted this dataset if it was created by `zfs recv` operation
return
error = e.stderr.strip()
errno_ = errno.EFAULT
if 'Device busy' in error or 'dataset is busy' in error:
errno_ = errno.EBUSY
raise CallError(f'Failed to delete dataset: {error}', errno_)
return True
def destroy_snapshots(self, name, snapshot_spec):
try:
with libzfs.ZFS() as zfs:
dataset = zfs.get_dataset(name)
return dataset.delete_snapshots(snapshot_spec)
except libzfs.ZFSException as e:
raise CallError(str(e))
def update_zfs_object_props(self, properties, zfs_object):
verrors = ValidationErrors()
for k, v in properties.items():
# If prop already exists we just update it,
# otherwise create a user property
prop = zfs_object.properties.get(k)
if v.get('source') == 'INHERIT':
if not prop:
verrors.add(f'properties.{k}', 'Property does not exist and cannot be inherited')
else:
if not any(i in v for i in ('parsed', 'value')):
verrors.add(f'properties.{k}', '"value" or "parsed" must be specified when setting a property')
if not prop and ':' not in k:
verrors.add(f'properties.{k}', 'User property needs a colon (:) in its name')
verrors.check()
try:
zfs_object.update_properties(properties)
except libzfs.ZFSException as e:
raise CallError(f'Failed to update properties: {e!r}')
| 10,114 | Python | .py | 212 | 36.103774 | 119 | 0.600547 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,767 | exceptions.py | truenas_middleware/src/middlewared/middlewared/plugins/zfs_/exceptions.py | from middlewared.service import CallError
class ZFSSetPropertyError(CallError):
def __init__(self, property_, error):
self.property = property_
self.error = error
super().__init__(f'Failed to update dataset: failed to set property {self.property}: {self.error}')
| 293 | Python | .py | 6 | 42.833333 | 107 | 0.698246 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,768 | snapshot_actions.py | truenas_middleware/src/middlewared/middlewared/plugins/zfs_/snapshot_actions.py | import libzfs
import subprocess
from middlewared.schema import accepts, Bool, Dict, returns, Str
from middlewared.service import CallError, private, Service
class ZFSSnapshot(Service):
class Config:
namespace = 'zfs.snapshot'
process_pool = True
@accepts(Dict(
'snapshot_clone',
Str('snapshot', required=True, empty=False),
Str('dataset_dst', required=True, empty=False),
Dict(
'dataset_properties',
additional_attrs=True,
)
))
def clone(self, data):
"""
Clone a given snapshot to a new dataset.
Returns:
bool: True if succeed otherwise False.
"""
snapshot = data.get('snapshot', '')
dataset_dst = data.get('dataset_dst', '')
props = data['dataset_properties']
try:
with libzfs.ZFS() as zfs:
snp = zfs.get_snapshot(snapshot)
snp.clone(dataset_dst, props)
dataset = zfs.get_dataset(dataset_dst)
if dataset.type.name == 'FILESYSTEM':
dataset.mount_recursive()
self.logger.info("Cloned snapshot {0} to dataset {1}".format(snapshot, dataset_dst))
return True
except libzfs.ZFSException as err:
self.logger.error("{0}".format(err))
raise CallError(f'Failed to clone snapshot: {err}')
@accepts(
Str('id'),
Dict(
'options',
Bool('recursive', default=False),
Bool('recursive_clones', default=False),
Bool('force', default=False),
Bool('recursive_rollback', default=False),
),
)
def rollback(self, id_, options):
"""
Rollback to a given snapshot `id`.
`options.recursive` will destroy any snapshots and bookmarks more recent than the one
specified.
`options.recursive_clones` is just like `recursive` but will also destroy any clones.
`options.force` will force unmount of any clones.
`options.recursive_rollback` will do a complete recursive rollback of each child snapshots for `id`. If
any child does not have specified snapshot, this operation will fail.
"""
args = []
if options['force']:
args += ['-f']
if options['recursive']:
args += ['-r']
if options['recursive_clones']:
args += ['-R']
if options['recursive_rollback']:
dataset, snap_name = id_.rsplit('@', 1)
datasets = set({
f'{ds["id"]}@{snap_name}' for ds in self.middleware.call_sync(
'zfs.dataset.query', [['OR', [['id', '^', f'{dataset}/'], ['id', '=', dataset]]]]
)
})
for snap in filter(lambda sn: self.middleware.call_sync('zfs.snapshot.query', [['id', '=', sn]]), datasets):
self.rollback_impl(args, snap)
else:
self.rollback_impl(args, id_)
@private
def rollback_impl(self, args, id_):
try:
subprocess.run(
['zfs', 'rollback'] + args + [id_], text=True, capture_output=True, check=True,
)
except subprocess.CalledProcessError as e:
raise CallError(f'Failed to rollback snapshot: {e.stderr.strip()}')
@accepts(
Str('id'),
Dict(
'options',
Bool('recursive', default=False),
),
)
@returns()
def hold(self, id_, options):
"""
Holds snapshot `id`.
`truenas` tag will be added to the snapshot's tag namespace.
`options.recursive` will hold snapshots recursively.
"""
try:
with libzfs.ZFS() as zfs:
snapshot = zfs.get_snapshot(id_)
snapshot.hold('truenas', options['recursive'])
except libzfs.ZFSException as err:
raise CallError(f'Failed to hold snapshot: {err}')
@accepts(
Str('id'),
Dict(
'options',
Bool('recursive', default=False),
),
)
@returns()
def release(self, id_, options):
"""
Release held snapshot `id`.
Will remove all hold tags from the specified snapshot.
`options.recursive` will release snapshots recursively. Please note that only the tags that are present on the
parent snapshot will be removed.
"""
try:
with libzfs.ZFS() as zfs:
snapshot = zfs.get_snapshot(id_)
for tag in snapshot.holds:
snapshot.release(tag, options['recursive'])
except libzfs.ZFSException as err:
raise CallError(f'Failed to release snapshot: {err}')
| 4,784 | Python | .py | 126 | 27.579365 | 120 | 0.561165 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,769 | dataset_utils.py | truenas_middleware/src/middlewared/middlewared/plugins/zfs_/dataset_utils.py | from copy import deepcopy
def flatten_datasets(datasets):
return sum([[deepcopy(ds)] + flatten_datasets(ds.get('children') or []) for ds in datasets], [])
| 161 | Python | .py | 3 | 50.666667 | 100 | 0.717949 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,770 | disks.py | truenas_middleware/src/middlewared/middlewared/plugins/zfs_/disks.py | import pyudev
from middlewared.service import Service
class ZFSPoolService(Service):
class Config:
namespace = 'zfs.pool'
private = True
process_pool = True
def get_disks(self, name):
sys_devices = {}
for dev in pyudev.Context().list_devices(subsystem='block'):
if dev.sys_name.startswith('sr'):
continue
devtype = dev.properties.get('DEVTYPE', '')
if devtype not in ('disk', 'partition'):
continue
# this is "sda/sda1/sda2/sda3" etc
sys_devices[dev.sys_name] = dev.sys_name
# zpool could have been created using the raw partition
# (i.e. "sda3"). This happens on the "boot-pool" for example.
# We need to get the parent device name when this occurs.
if dev.sys_number and (parent := dev.find_parent('block')):
sys_devices[dev.sys_name] = parent.sys_name
# these are the various by-{partuuid/label/id/path} labels
if dev.properties['DEVTYPE'] == 'partition':
for link in (dev.properties.get('DEVLINKS') or '').split():
sys_devices[link.removeprefix('/dev/')] = dev.find_parent('block').sys_name
mapping = {name: set()}
for disk in self.middleware.call_sync('zfs.pool.get_devices', name):
try:
mapping[name].add(sys_devices[disk])
except KeyError:
continue
return list(mapping[name])
| 1,538 | Python | .py | 33 | 34.848485 | 95 | 0.578983 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,771 | general.py | truenas_middleware/src/middlewared/middlewared/plugins/network_/general.py | from collections import defaultdict
from middlewared.service import Service
from middlewared.schema import Dict, List, IPAddr, returns, accepts
class NetworkGeneralService(Service):
class Config:
namespace = 'network.general'
cli_namespace = 'network.general'
@accepts(roles=['NETWORK_GENERAL_READ'])
@returns(
Dict(
'network_summary',
Dict('ips', additional_attrs=True, required=True),
List('default_routes', items=[IPAddr('default_route')], required=True),
List('nameservers', items=[IPAddr('nameserver')], required=True),
)
)
async def summary(self):
"""
Retrieve general information for current Network.
Returns a dictionary. For example:
.. examples(websocket)::
:::javascript
{
"ips": {
"vtnet0": {
"IPV4": [
"192.168.0.15/24"
]
}
},
"default_routes": [
"192.168.0.1"
],
"nameservers": [
"192.168.0.1"
]
}
"""
ips = defaultdict(lambda: defaultdict(list))
for iface in await self.middleware.call('interface.query'):
for alias in iface['state']['aliases']:
if alias['type'] == 'INET':
key = 'IPV4'
elif alias['type'] == 'INET6':
key = 'IPV6'
else:
continue
ips[iface['name']][key].append(f'{alias["address"]}/{alias["netmask"]}')
default_routes = dict()
for route in await self.middleware.call('route.system_routes', [('netmask', 'in', ['0.0.0.0', '::'])]):
# IPv6 have local addresses that don't have gateways. Make sure we only return a gateway
# if there is one.
if route['gateway']:
default_routes[route['gateway']] = None
nameservers = dict()
for ns in await self.middleware.call('dns.query'):
nameservers[ns['nameserver']] = None
return {
'ips': ips,
'default_routes': list(default_routes.keys()),
'nameservers': list(nameservers.keys()),
}
| 2,390 | Python | .py | 62 | 25.629032 | 111 | 0.502374 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,772 | activity.py | truenas_middleware/src/middlewared/middlewared/plugins/network_/activity.py | # -*- coding=utf-8 -*-
import logging
from middlewared.schema import List, returns, Str
from middlewared.service import accepts, CallError, private, Service
logger = logging.getLogger(__name__)
class NetworkConfigurationService(Service):
class Config:
namespace = 'network.configuration'
@accepts()
@returns(List('activity_choices', items=[List('activity_choice', items=[Str('activity')])]))
async def activity_choices(self):
"""
Returns allowed/forbidden network activity choices.
"""
return await self.middleware.call('network.general.activity_choices')
class NetworkGeneralService(Service):
class Config:
namespace = 'network.general'
activities = {}
@private
def register_activity(self, name, description):
if name in self.activities:
raise RuntimeError(f'Network activity {name} is already registered')
self.activities[name] = description
@private
def activity_choices(self):
return sorted([[k, v] for k, v in self.activities.items()], key=lambda t: t[1].lower())
@private
async def can_perform_activity(self, name):
if name not in self.activities:
raise RuntimeError(f'Unknown network activity {name}')
config = await self.middleware.call('network.configuration.config')
if config['activity']['type'] == 'ALLOW':
return name in config['activity']['activities']
else:
return name not in config['activity']['activities']
@private
async def will_perform_activity(self, name):
if not await self.middleware.call('network.general.can_perform_activity', name):
raise CallError(f'Network activity "{self.activities[name]}" is disabled')
| 1,777 | Python | .py | 40 | 37.375 | 96 | 0.683111 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,773 | route.py | truenas_middleware/src/middlewared/middlewared/plugins/network_/route.py | import re
import ipaddress
import os
import contextlib
import signal
import asyncio
from pyroute2.netlink.exceptions import NetlinkError
from middlewared.service import Service, filterable, filterable_returns, private
from middlewared.schema import Dict, List, Str, Int, IPAddr, accepts, returns, Bool
from middlewared.plugins.interface.netif import netif
from middlewared.utils import filter_list
RE_RTSOLD_INTERFACE = re.compile(r'Interface (.+)')
RE_RTSOLD_NUMBER_OF_VALID_RAS = re.compile(r'number of valid RAs: ([0-9]+)')
class RouteService(Service):
class Config:
namespace_alias = 'routes'
cli_namespace = 'network.route'
@filterable
@filterable_returns(Dict(
'system_route',
IPAddr('network', required=True),
IPAddr('netmask', required=True),
IPAddr('gateway', null=True, required=True),
Str('interface', required=True),
List('flags', required=True),
Int('table_id', required=True),
Int('scope', required=True),
Str('preferred_source', null=True, required=True),
))
def system_routes(self, filters, options):
"""
Get current/applied network routes.
"""
rtable = netif.RoutingTable()
return filter_list([r.asdict() for r in rtable.routes], filters, options)
@private
async def configured_default_ipv4_route(self):
route = netif.RoutingTable().default_route_ipv4
return bool(route or (await self.middleware.call('network.configuration.config'))['ipv4gateway'])
@private
async def sync(self):
config = await self.middleware.call('datastore.query', 'network.globalconfiguration', [], {'get': True})
# Generate dhclient.conf so we can ignore routes (def gw) option
# in case there is one explicitly set in network config
await self.middleware.call('etc.generate', 'dhclient')
ipv4_gateway = config['gc_ipv4gateway'] or None
if not ipv4_gateway:
interfaces = await self.middleware.call('datastore.query', 'network.interfaces')
if interfaces:
interfaces = [interface['int_interface'] for interface in interfaces if interface['int_dhcp']]
else:
ignore = tuple(await self.middleware.call('interface.internal_interfaces'))
interfaces = list(filter(lambda x: not x.startswith(ignore), netif.list_interfaces().keys()))
for interface in interfaces:
dhclient_running, dhclient_pid = await self.middleware.call('interface.dhclient_status', interface)
if dhclient_running:
leases = await self.middleware.call('interface.dhclient_leases', interface)
reg_routers = re.search(r'option routers (.+);', leases or '')
if reg_routers:
# Make sure to get first route only
ipv4_gateway = reg_routers.group(1).split(' ')[0]
break
routing_table = netif.RoutingTable()
if ipv4_gateway:
ipv4_gateway = netif.Route('0.0.0.0', '0.0.0.0', ipaddress.ip_address(str(ipv4_gateway)))
ipv4_gateway.flags.add(netif.RouteFlags.STATIC)
ipv4_gateway.flags.add(netif.RouteFlags.GATEWAY)
# If there is a gateway but there is none configured, add it
# Otherwise change it
if not routing_table.default_route_ipv4:
self.logger.info('Adding IPv4 default route to {}'.format(ipv4_gateway.gateway))
try:
routing_table.add(ipv4_gateway)
except NetlinkError as e:
# Error could be (101, Network host unreachable)
# This error occurs in random race conditions.
# For example, can occur in the following scenario:
# 1. delete all configured interfaces on system
# 2. interface.sync() gets called and starts dhcp
# on all interfaces detected on the system
# 3. route.sync() gets called which eventually
# calls dhclient_leases which reads a file on
# disk to see if we have any previously
# defined default gateways from dhclient.
# However, by the time we read this file,
# dhclient could still be requesting an
# address from the DHCP server
# 4. so when we try to install our own default
# gateway manually (even though dhclient will
# do this for us) it will fail expectedly here.
# Either way, let's log the error.
gw = ipv4_gateway.asdict()['gateway']
self.logger.error('Failed adding %s as default gateway: %r', gw, e)
elif ipv4_gateway != routing_table.default_route_ipv4:
_from = routing_table.default_route_ipv4.gateway
_to = ipv4_gateway.gateway
self.logger.info(f'Changing IPv4 default route from {_from} to {_to}')
routing_table.change(ipv4_gateway)
elif routing_table.default_route_ipv4:
# If there is no gateway in database but one is configured
# remove it
self.logger.info('Removing IPv4 default route')
routing_table.delete(routing_table.default_route_ipv4)
ipv6_gateway = config['gc_ipv6gateway'] or None
if ipv6_gateway:
if ipv6_gateway.count("%") == 1:
ipv6_gateway, ipv6_gateway_interface = ipv6_gateway.split("%")
else:
ipv6_gateway_interface = None
ipv6_gateway = netif.Route('::', '::', ipaddress.ip_address(str(ipv6_gateway)), ipv6_gateway_interface)
ipv6_gateway.flags.add(netif.RouteFlags.STATIC)
ipv6_gateway.flags.add(netif.RouteFlags.GATEWAY)
# If there is a gateway but there is none configured, add it
# Otherwise change it
if not routing_table.default_route_ipv6:
self.logger.info(f'Adding IPv6 default route to {ipv6_gateway.gateway}')
routing_table.add(ipv6_gateway)
elif ipv6_gateway != routing_table.default_route_ipv6:
_from = routing_table.default_route_ipv6.gateway
_to = ipv6_gateway.gateway
self.logger.info(f'Changing IPv6 default route from {_from} to {_to}')
routing_table.change(ipv6_gateway)
elif routing_table.default_route_ipv6:
# If there is no gateway in database but one is configured
# remove it
interface = routing_table.default_route_ipv6.interface
autoconfigured_interface = await self.middleware.call(
'datastore.query', 'network.interfaces', [
['int_interface', '=', interface],
['int_ipv6auto', '=', True],
]
)
remove = False
if not autoconfigured_interface:
self.logger.info('Removing IPv6 default route as there is no IPv6 autoconfiguration')
remove = True
elif not await self.middleware.call('route.has_valid_router_announcements', interface):
self.logger.info('Removing IPv6 default route as IPv6 autoconfiguration has not succeeded')
remove = True
if remove:
routing_table.delete(routing_table.default_route_ipv6)
@accepts(Str('ipv4_gateway'))
@returns(Bool())
def ipv4gw_reachable(self, ipv4_gateway):
"""
Get the IPv4 gateway and verify if it is reachable by any interface.
Returns:
bool: True if the gateway is reachable or otherwise False.
"""
ignore_nics = ('lo', 'tap', 'epair')
for if_name, iface in list(netif.list_interfaces().items()):
if not if_name.startswith(ignore_nics):
for nic_address in iface.addresses:
if nic_address.af == netif.AddressFamily.INET:
ipv4_nic = ipaddress.IPv4Interface(nic_address)
if ipaddress.ip_address(ipv4_gateway) in ipv4_nic.network:
return True
return False
@private
async def has_valid_router_announcements(self, interface):
rtsold_dump_path = '/var/run/rtsold.dump'
try:
with open('/var/run/rtsold.pid') as f:
rtsold_pid = int(f.read().strip())
except (FileNotFoundError, ValueError):
self.logger.warning('rtsold pid file does not exist')
return False
with contextlib.suppress(FileNotFoundError):
os.unlink(rtsold_dump_path)
try:
os.kill(rtsold_pid, signal.SIGUSR1)
except ProcessLookupError:
self.logger.warning('rtsold is not running')
return False
for i in range(10):
await asyncio.sleep(0.2)
try:
with open(rtsold_dump_path) as f:
dump = f.readlines()
break
except FileNotFoundError:
continue
else:
self.logger.warning('rtsold has not dumped status')
return False
current_interface = None
for line in dump:
line = line.strip()
m = RE_RTSOLD_INTERFACE.match(line)
if m:
current_interface = m.group(1)
if current_interface == interface:
m = RE_RTSOLD_NUMBER_OF_VALID_RAS.match(line)
if m:
return int(m.group(1)) > 0
self.logger.warning('Have not found %s status in rtsold dump', interface)
return False
| 9,999 | Python | .py | 197 | 37.736041 | 115 | 0.592043 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,774 | common.py | truenas_middleware/src/middlewared/middlewared/plugins/network_/common.py | from middlewared.service import Service
class NetworkCommonService(Service):
class Config:
namespace = 'network.common'
private = True
async def check_failover_disabled(self, schema, verrors):
if not await self.middleware.call('failover.licensed'):
return
elif await self.middleware.call('failover.status') == 'SINGLE':
return
elif not (await self.middleware.call('failover.config'))['disabled']:
verrors.add(schema, 'Failover must be disabled.')
async def check_dhcp_or_aliases(self, schema, verrors):
keys = ('ipv4_dhcp', 'ipv6_auto', 'aliases')
if not any([i[key] for key in keys] for i in await self.middleware.call('interface.query')):
verrors.add(
schema, 'At least one interface must be configured with IPv4 DHCP, IPv6 Autoconfig or a static IP.'
)
| 905 | Python | .py | 18 | 41 | 115 | 0.651927 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,775 | global_config.py | truenas_middleware/src/middlewared/middlewared/plugins/network_/global_config.py | import ipaddress
import middlewared.sqlalchemy as sa
from middlewared.service import ConfigService, private
from middlewared.schema import accepts, Patch, List, Dict, Int, Str, Bool, IPAddr, Ref, URI, ValidationErrors
from middlewared.utils.directoryservices.constants import DSType
from middlewared.validators import Match, Hostname
HOSTS_FILE_EARMARKER = '# STATIC ENTRIES'
class NetworkConfigurationModel(sa.Model):
__tablename__ = 'network_globalconfiguration'
id = sa.Column(sa.Integer(), primary_key=True)
gc_hostname = sa.Column(sa.String(120), default='nas')
gc_hostname_b = sa.Column(sa.String(120), nullable=True)
gc_domain = sa.Column(sa.String(120), default='local')
gc_ipv4gateway = sa.Column(sa.String(42), default='')
gc_ipv6gateway = sa.Column(sa.String(45), default='')
gc_nameserver1 = sa.Column(sa.String(45), default='')
gc_nameserver2 = sa.Column(sa.String(45), default='')
gc_nameserver3 = sa.Column(sa.String(45), default='')
gc_httpproxy = sa.Column(sa.String(255))
gc_hosts = sa.Column(sa.Text(), default='')
gc_domains = sa.Column(sa.Text(), default='')
gc_service_announcement = sa.Column(sa.JSON(dict), default={'mdns': True, 'wsdd': True, "netbios": False})
gc_hostname_virtual = sa.Column(sa.String(120), nullable=True)
gc_activity = sa.Column(sa.JSON(dict))
class NetworkConfigurationService(ConfigService):
class Config:
namespace = 'network.configuration'
datastore = 'network.globalconfiguration'
datastore_prefix = 'gc_'
datastore_extend = 'network.configuration.network_config_extend'
cli_namespace = 'network.configuration'
ENTRY = Dict(
'network_configuration_entry',
Int('id', required=True),
Str('hostname', required=True, validators=[Hostname()]),
Str('domain', validators=[Match(r'^[a-zA-Z\.\-\0-9]*$')],),
IPAddr('ipv4gateway', required=True),
IPAddr('ipv6gateway', required=True, allow_zone_index=True),
IPAddr('nameserver1', required=True),
IPAddr('nameserver2', required=True),
IPAddr('nameserver3', required=True),
URI('httpproxy', required=True),
List('hosts', required=True, items=[Str('host')]),
List('domains', required=True, items=[Str('domain')]),
Dict(
'service_announcement',
Bool('netbios'),
Bool('mdns'),
Bool('wsd'),
register=True,
),
Dict(
'activity',
Str('type', enum=['ALLOW', 'DENY'], required=True),
List('activities', items=[Str('activity')]),
strict=True
),
Str('hostname_local', required=True, validators=[Hostname()]),
Str('hostname_b', validators=[Hostname()], null=True),
Str('hostname_virtual', validators=[Hostname()], null=True),
Dict(
'state',
IPAddr('ipv4gateway', required=True),
IPAddr('ipv6gateway', required=True, allow_zone_index=True),
IPAddr('nameserver1', required=True),
IPAddr('nameserver2', required=True),
IPAddr('nameserver3', required=True),
),
)
@private
def read_etc_hosts_file(self):
rv = []
try:
with open('/etc/hosts') as f:
lines = f.read().splitlines()
except FileNotFoundError:
return rv
try:
start_pos = lines.index(HOSTS_FILE_EARMARKER) + 1
except ValueError:
# someone has manually modified file potentially
return rv
try:
for idx in range(start_pos, len(lines)):
if (entry := lines[idx].strip()):
rv.append(entry)
except IndexError:
# mako program should write file with an empty newline
# but if someone manually removes it, make sure we dont
# crash here
return rv
return rv
@private
def network_config_extend(self, data):
# hostname_local will be used when the hostname of the current machine
# needs to be used so it works with either TrueNAS SCALE or SCALE_ENTERPRISE
data['hostname_local'] = data['hostname']
if not self.middleware.call_sync('system.is_enterprise'):
data.pop('hostname_b')
data.pop('hostname_virtual')
else:
if self.middleware.call_sync('failover.node') == 'B':
data['hostname_local'] = data['hostname_b']
data['domains'] = data['domains'].split()
if (hosts := data['hosts'].strip()):
data['hosts'] = hosts.split('\n')
else:
data['hosts'] = []
data['state'] = {
'ipv4gateway': '',
'ipv6gateway': '',
'nameserver1': '',
'nameserver2': '',
'nameserver3': '',
'hosts': self.read_etc_hosts_file(),
}
summary = self.middleware.call_sync('network.general.summary')
for default_route in summary['default_routes']:
try:
ipaddress.IPv4Address(default_route)
except ValueError:
if not data['state']['ipv6gateway']:
data['state']['ipv6gateway'] = default_route
else:
if not data['state']['ipv4gateway']:
data['state']['ipv4gateway'] = default_route
for i, nameserver in enumerate(summary['nameservers'][:3]):
data['state'][f'nameserver{i + 1}'] = nameserver
return data
@private
async def validate_nameservers(self, verrors, data, schema):
verrors = ValidationErrors()
ns_ints = []
for ns, ns_value in filter(lambda x: x[0].startswith('nameserver') and x[1], data.items()):
schema = f'{schema}.{ns}'
ns_ints.append(int(ns[-1]))
try:
nameserver_ip = ipaddress.ip_address(ns_value)
except ValueError as e:
verrors.add(schema, str(e))
else:
if nameserver_ip.is_loopback:
verrors.add(schema, 'Loopback is not a valid nameserver')
elif nameserver_ip.is_unspecified:
verrors.add(schema, 'Unspecified addresses are not valid as nameservers')
elif nameserver_ip.version == 4:
if ns_value == '255.255.255.255':
verrors.add(schema, 'This is not a valid nameserver address')
elif ns_value.startswith('169.254'):
verrors.add(schema, '169.254/16 subnet is not valid for nameserver')
len_ns_ints = len(ns_ints)
if len_ns_ints >= 2:
ns_ints = sorted(ns_ints)
for i in range(len_ns_ints - 1):
if ns_ints[i - 1] - ns_ints[i] != 1:
verrors.add(
f'{schema}.nameserver{i}',
'When providing nameservers, they must be provided in consecutive order '
'(i.e. nameserver1, nameserver2, nameserver3)'
)
@private
async def validate_general_settings(self, data, schema):
verrors = ValidationErrors()
await self.validate_nameservers(verrors, data, schema)
if (ipv4_gateway_value := data.get('ipv4gateway')):
if not await self.middleware.call(
'route.ipv4gw_reachable',
ipaddress.ip_address(ipv4_gateway_value).exploded
):
verrors.add(f'{schema}.ipv4gateway', f'Gateway {ipv4_gateway_value} is unreachable')
if (domains := data.get('domains', [])) and len(domains) > 5:
verrors.add(f'{schema}.domains', 'No more than 5 additional domains are allowed')
return verrors
@accepts(Ref('service_announcement'))
@private
async def toggle_announcement(self, data):
announce_srv = {'mdns': 'mdns', 'netbios': 'nmbd', 'wsd': 'wsdd'}
for srv, enabled in data.items():
service_name = announce_srv[srv]
started = await self.middleware.call('service.started', service_name)
verb = None
if enabled:
verb = 'restart' if started else 'start'
else:
verb = 'stop' if started else None
if not verb:
continue
await self.middleware.call(f'service.{verb}', service_name)
@accepts(
Patch(
'network_configuration_entry', 'global_configuration_update',
('rm', {'name': 'id'}),
('rm', {'name': 'hostname_local'}),
('rm', {'name': 'state'}),
('attr', {'update': True}),
),
)
async def do_update(self, data):
"""
Update Network Configuration Service configuration.
`ipv4gateway` if set is used instead of the default gateway provided by DHCP.
`nameserver1` is primary DNS server.
`nameserver2` is secondary DNS server.
`nameserver3` is tertiary DNS server.
`httpproxy` attribute must be provided if a proxy is to be used for network operations.
`service_announcement` determines the broadcast protocols that will be used to advertise the server.
`netbios` enables the NetBIOS name server (NBNS), which starts concurrently with the SMB service. SMB clients
will only perform NBNS lookups if SMB1 is enabled. NBNS may be required for legacy SMB clients.
`mdns` enables multicast DNS service announcements for enabled services. `wsd` enables Web Service
Discovery support.
"""
config = await self.config()
config.pop('state')
new_config = config.copy()
new_config.update(data)
new_config['service_announcement'] = config['service_announcement'] | data.get('service_announcement', {})
if new_config == config:
# nothing changed so return early
return await self.config()
verrors = await self.validate_general_settings(data, 'global_configuration_update')
filters = [('timemachine', '=', True), ('enabled', '=', True)]
if not new_config['service_announcement']['mdns'] and await self.middleware.call(
'sharing.smb.query', filters, {'select': ['enabled', 'timemachine']}
):
verrors.add(
'global_configuration_update.service_announcement.mdns',
'NAS is configured as a time machine target. mDNS is required.'
)
lhost_changed = rhost_changed = False
this_node = await self.middleware.call('failover.node')
if this_node in ('MANUAL', 'A'):
lhost_changed = config['hostname'] != new_config['hostname']
rhost_changed = config.get('hostname_b') and config['hostname_b'] != new_config['hostname_b']
elif this_node == 'B':
lhost_changed = config['hostname_b'] != new_config['hostname_b']
rhost_changed = config['hostname'] != new_config['hostname']
vhost_changed = config.get('hostname_virtual') and config['hostname_virtual'] != new_config['hostname_virtual']
if vhost_changed:
ds = await self.middleware.call('directoryservices.status')
if ds['type'] in (DSType.AD.value, DSType.IPA.value):
verrors.add(
'global_configuration_update.hostname_virtual',
'This parameter may not be changed after joining a directory service. '
'If it must be changed, the proper procedure is to cleanly leave the domain '
'and then alter the parameter before re-joining the domain.'
)
verrors.check()
# pop the `hostname_local` key since that's created in the _extend method
# and doesn't exist in the database
new_config.pop('hostname_local', None)
new_config['domains'] = ' '.join(new_config.get('domains', []))
new_config['hosts'] = '\n'.join(new_config.get('hosts', []))
# update the db
await self.middleware.call(
'datastore.update', 'network.globalconfiguration', config['id'], new_config, {'prefix': 'gc_'}
)
service_actions = set()
if lhost_changed:
await self.middleware.call('etc.generate', 'hostname')
service_actions.add(('nscd', 'reload'))
if rhost_changed:
try:
await self.middleware.call('failover.call_remote', 'etc.generate', ['hostname'])
except Exception:
self.logger.warning('Failed to set hostname on standby storage controller', exc_info=True)
# dns domain name changed or /etc/hosts table changed
licensed = await self.middleware.call('failover.licensed')
domainname_changed = new_config['domain'] != config['domain']
hosts_table_changed = new_config['hosts'] != config['hosts']
if domainname_changed or hosts_table_changed:
await self.middleware.call('etc.generate', 'hosts')
service_actions.add(('nscd', 'reload'))
if licensed:
try:
await self.middleware.call('failover.call_remote', 'etc.generate', ['hosts'])
except Exception:
self.logger.warning(
'Unexpected failure updating domain name and/or hosts table on standby controller',
exc_info=True
)
# anything related to resolv.conf changed
dnssearch_changed = new_config['domains'] != config['domains']
dns1_changed = new_config['nameserver1'] != config['nameserver1']
dns2_changed = new_config['nameserver2'] != config['nameserver2']
dns3_changed = new_config['nameserver3'] != config['nameserver3']
dnsservers_changed = any((dns1_changed, dns2_changed, dns3_changed))
if dnssearch_changed or dnsservers_changed:
await self.middleware.call('dns.sync')
service_actions.add(('nscd', 'reload'))
if licensed:
try:
await self.middleware.call('failover.call_remote', 'dns.sync')
except Exception:
self.logger.warning('Failed to generate resolv.conf on standby storage controller', exc_info=True)
await self.middleware.call('system.reload_cli')
# default gateways changed
ipv4gw_changed = new_config['ipv4gateway'] != config['ipv4gateway']
ipv6gw_changed = new_config['ipv6gateway'] != config['ipv6gateway']
if ipv4gw_changed or ipv6gw_changed:
await self.middleware.call('route.sync')
if licensed:
try:
await self.middleware.call('failover.call_remote', 'route.sync')
except Exception:
self.logger.warning('Failed to generate routes on standby storage controller', exc_info=True)
# kerberized NFS needs to be restarted if these change
if lhost_changed or vhost_changed or domainname_changed:
if await self.middleware.call('kerberos.keytab.has_nfs_principal'):
service_actions.add(('nfs', 'restart'))
# proxy server has changed
if new_config['httpproxy'] != config['httpproxy']:
await self.middleware.call(
'core.event_send',
'network.config',
'CHANGED',
{'data': {'httpproxy': new_config['httpproxy']}}
)
# allowing outbound network activity has been changed
if new_config['activity'] != config['activity']:
await self.middleware.call('zettarepl.update_tasks')
# handle the various service announcement daemons
announce_changed = new_config['service_announcement'] != config['service_announcement']
announce_srv = {'mdns': 'mdns', 'netbios': 'nmbd', 'wsd': 'wsdd'}
if any((lhost_changed, vhost_changed)) or announce_changed:
# lhost_changed is the local hostname and vhost_changed is the virtual hostname
# and if either of these change then we need to toggle the service announcement
# daemons irregardless whether or not these were toggled on their own
for srv, enabled in new_config['service_announcement'].items():
service_name = announce_srv[srv]
started = await self.middleware.call('service.started', service_name)
verb = None
if enabled:
verb = 'restart' if started else 'start'
else:
verb = 'stop' if started else None
if not verb:
continue
service_actions.add((service_name, verb))
for service, verb in service_actions:
await self.middleware.call(f'service.{verb}', service)
await self.middleware.call('network.configuration.toggle_announcement', new_config['service_announcement'])
return await self.config()
| 17,214 | Python | .py | 342 | 38.511696 | 119 | 0.595027 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,776 | static_routes.py | truenas_middleware/src/middlewared/middlewared/plugins/network_/static_routes.py | from ipaddress import ip_interface
from middlewared.api import api_method
from middlewared.api.current import (
StaticRouteEntry,
StaticRouteUpdateArgs,
StaticRouteUpdateResult,
StaticRouteCreateArgs,
StaticRouteCreateResult,
StaticRouteDeleteArgs,
StaticRouteDeleteResult,
)
import middlewared.sqlalchemy as sa
from middlewared.service import CRUDService, private
from middlewared.service_exception import ValidationError
from middlewared.plugins.interface.netif import netif
class StaticRouteModel(sa.Model):
__tablename__ = 'network_staticroute'
id = sa.Column(sa.Integer(), primary_key=True)
sr_destination = sa.Column(sa.String(120))
sr_gateway = sa.Column(sa.String(42))
sr_description = sa.Column(sa.String(120))
class StaticRouteService(CRUDService):
class Config:
datastore = 'network.staticroute'
datastore_prefix = 'sr_'
cli_namespace = 'network.static_route'
entry = StaticRouteEntry
@api_method(StaticRouteCreateArgs, StaticRouteCreateResult)
async def do_create(self, data):
"""
Create a Static Route.
Address families of `gateway` and `destination` should match when creating a static route.
`description` is an optional attribute for any notes regarding the static route.
"""
self._validate('staticroute_create', data)
id_ = await self.middleware.call(
'datastore.insert', self._config.datastore, data,
{'prefix': self._config.datastore_prefix})
await self.middleware.call('service.restart', 'routing')
return await self.get_instance(id_)
@api_method(StaticRouteUpdateArgs, StaticRouteUpdateResult)
async def do_update(self, id_, data):
"""
Update Static Route of `id`.
"""
old = await self.get_instance(id_)
new = old.copy()
new.update(data)
self._validate('staticroute_update', new)
await self.middleware.call(
'datastore.update', self._config.datastore, id_, new,
{'prefix': self._config.datastore_prefix})
await self.middleware.call('service.restart', 'routing')
return await self.get_instance(id_)
@api_method(StaticRouteDeleteArgs, StaticRouteDeleteResult)
def do_delete(self, id_):
"""
Delete Static Route of `id`.
"""
st = self.middleware.call_sync('staticroute.get_instance', id_)
rv = self.middleware.call_sync('datastore.delete', self._config.datastore, id_)
try:
rt = netif.RoutingTable()
rt.delete(self._netif_route(st))
except Exception:
self.logger.exception('Failed to delete static route %r', st['destination'])
return rv
@private
def sync(self):
new_routes = list()
for route in self.middleware.call_sync('staticroute.query'):
new_routes.append(self._netif_route(route))
rt = netif.RoutingTable()
default_route_ipv4 = rt.default_route_ipv4
default_route_ipv6 = rt.default_route_ipv6
for route in rt.routes:
if route in new_routes:
new_routes.remove(route)
continue
if route not in [default_route_ipv4, default_route_ipv6] and route.gateway is not None:
self.logger.debug('Removing route %r', route.asdict())
try:
rt.delete(route)
except Exception:
self.logger.exception('Failed to remove route')
for route in new_routes:
self.logger.debug('Adding route %r', route.asdict())
try:
rt.add(route)
except Exception:
self.logger.exception('Failed to add route')
def _validate(self, schema_name, data):
dst, gw = data.pop('destination'), data.pop('gateway')
dst, gw = ip_interface(dst), ip_interface(gw)
if dst.version != gw.version:
raise ValidationError(
f'{schema_name}.destination',
'Destination and gateway address families must match'
)
else:
data['destination'] = dst.exploded
data['gateway'] = gw.ip.exploded
def _netif_route(self, staticroute):
ip_info = ip_interface(staticroute['destination'])
return netif.Route(
str(ip_info.ip), str(ip_info.netmask), gateway=staticroute['gateway']
)
| 4,501 | Python | .py | 107 | 32.869159 | 99 | 0.639963 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,777 | dns.py | truenas_middleware/src/middlewared/middlewared/plugins/network_/dns.py | import contextlib
import ipaddress
import re
import subprocess
import tempfile
from middlewared.service import Service, filterable, filterable_returns, private
from middlewared.schema import accepts, Bool, Dict, Int, IPAddr, List, Str, ValidationErrors
from middlewared.utils import filter_list, MIDDLEWARE_RUN_DIR
from middlewared.plugins.interface.netif import netif
from middlewared.service_exception import CallError
class DNSService(Service):
class Config:
cli_namespace = 'network.dns'
@filterable
@filterable_returns(Dict('nameserver', IPAddr('nameserver', required=True)))
def query(self, filters, options):
"""
Query Name Servers with `query-filters` and `query-options`.
"""
ips = []
with contextlib.suppress(Exception):
with open('/etc/resolv.conf') as f:
for line in filter(lambda x: x.startswith('nameserver'), f):
ip = line[len('nameserver'):].strip()
try:
IPAddr().validate(ip) # make sure it's a valid IP (better safe than sorry)
except ValidationErrors:
self.logger.warning('IP %r in resolv.conf does not seem to be valid', ip)
else:
ip = {'nameserver': ip}
if ip not in ips:
ips.append(ip)
return filter_list(ips, filters, options)
@private
def sync(self):
domain = ''
domains = []
nameservers = []
gc = self.middleware.call_sync('datastore.query', 'network.globalconfiguration')[0]
if gc['gc_domain']:
domain = gc['gc_domain']
if gc['gc_domains']:
domains = gc['gc_domains'].split()
if gc['gc_nameserver1']:
nameservers.append(gc['gc_nameserver1'])
if gc['gc_nameserver2']:
nameservers.append(gc['gc_nameserver2'])
if gc['gc_nameserver3']:
nameservers.append(gc['gc_nameserver3'])
resolvconf = ''
if domain:
resolvconf += 'domain {}\n'.format(domain)
if domains:
resolvconf += 'search {}\n'.format(' '.join(domains))
resolvconf += self.middleware.call_sync('dns.configure_nameservers', nameservers)
try:
with open('/etc/resolv.conf', 'w') as f:
f.write(resolvconf)
except Exception:
self.logger.error('Failed to write /etc/resolv.conf', exc_info=True)
@private
def configure_nameservers(self, nameservers):
result = ''
if nameservers:
# means nameservers are configured explicitly so add them
for i in nameservers:
result += f'nameserver {i}\n'
else:
# means there aren't any nameservers configured so let's
# check to see if dhcp is running on any of the interfaces
# and if there are, then check dhclient leases file for
# nameservers that were handed to us via dhcp
interfaces = self.middleware.call_sync('datastore.query', 'network.interfaces')
if interfaces:
interfaces = [i['int_interface'] for i in interfaces if i['int_dhcp']]
else:
ignore = tuple(self.middleware.call_sync('interface.internal_interfaces'))
interfaces = list(filter(lambda x: not x.startswith(ignore), netif.list_interfaces().keys()))
dns_from_dhcp = set()
for iface in interfaces:
dhclient_running, dhclient_pid = self.middleware.call_sync('interface.dhclient_status', iface)
if dhclient_running:
leases = self.middleware.call_sync('interface.dhclient_leases', iface)
for dns_srvs in re.findall(r'option domain-name-servers (.+)', leases or ''):
for dns in dns_srvs.split(';')[0].split(','):
dns_from_dhcp.add(f'nameserver {dns.strip()}\n')
for dns in dns_from_dhcp:
result += dns
return result
@accepts(Dict(
'nsupdate_info',
Bool('use_kerberos', default=True),
List(
'ops',
required=True,
unique=True,
items=[
Dict(
Str('command', enum=['ADD', 'DELETE'], required=True),
Str('name', required=True),
Str('type', enum=['A', 'AAAA'], default='A'),
Int('ttl', default=3600),
IPAddr('address', required=True, excluded_address_types=[
'MULTICAST', 'LOOPBACK', 'LINK_LOCAL', 'RESERVED'
]),
Bool('do_ptr', default=True)
)
],
),
Int('timeout', default=30),
))
@private
def nsupdate(self, data):
if data['use_kerberos']:
self.middleware.call_sync('kerberos.check_ticket')
if len(data['ops']) == 0:
raise CallError('At least one nsupdate command must be specified')
with tempfile.NamedTemporaryFile(dir=MIDDLEWARE_RUN_DIR) as tmpfile:
ptrs = []
for entry in data['ops']:
addr = ipaddress.ip_address(entry['address'])
if entry['type'] == 'A' and not addr.version == 4:
raise CallError(f'{addr.compressed}: not an IPv4 address')
if entry['type'] == 'AAAA' and not addr.version == 6:
raise CallError(f'{addr.compressed}: not an IPv6 address')
directive = ' '.join([
'update',
entry['command'].lower(),
entry['name'],
str(entry['ttl']),
entry['type'],
addr.compressed,
'\n'
])
tmpfile.write(directive.encode())
if entry['do_ptr']:
ptrs.append((addr.reverse_pointer, entry['name']))
if ptrs:
# additional newline means "send"
# in this case we send our A and AAAA changes
# prior to sending our PTR changes
tmpfile.write(b'\n')
for ptr in ptrs:
reverse_pointer, name = ptr
directive = ' '.join([
'update',
entry['command'].lower(),
reverse_pointer,
str(entry['ttl']),
'PTR',
name,
'\n'
])
tmpfile.write(directive.encode())
tmpfile.write(b'send\n')
tmpfile.file.flush()
cmd = ['nsupdate', '-t', str(data['timeout'])]
if data['use_kerberos']:
cmd.append('-g')
cmd.append(tmpfile.name)
nsupdate_proc = subprocess.run(cmd, capture_output=True)
# tsig verify failure is possible if reverse zone is misconfigured
# Unfortunately, this is quite common and so we have to skip it.
#
# Future enhancement can be to perform forward-lookups to validate
# changes were applied properly
if nsupdate_proc.returncode and 'tsig verify failure' not in nsupdate_proc.stderr.decode():
raise CallError(f'nsupdate failed: {nsupdate_proc.stderr.decode()}')
| 7,600 | Python | .py | 168 | 31.238095 | 110 | 0.534711 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,778 | on_config_upload.py | truenas_middleware/src/middlewared/middlewared/plugins/service_/on_config_upload.py | import subprocess
import sqlite3
from middlewared.service_exception import MatchNotFound
def on_config_upload(middleware, path):
# For SCALE, we have to enable/disable services based on the uploaded database
enable_disable_units = {'enable': [], 'disable': []}
conn = sqlite3.connect(path)
try:
cursor = conn.cursor()
for service, enabled in cursor.execute('SELECT srv_service, srv_enable FROM services_services').fetchall():
try:
units = middleware.call_sync('service.systemd_units', service)
except MatchNotFound:
# An old service which we don't have currently
continue
if enabled:
enable_disable_units['enable'].extend(units)
else:
enable_disable_units['disable'].extend(units)
finally:
conn.close()
need_enabled = []
need_disabled = []
for action, services in enable_disable_units.items():
cp = subprocess.run(['systemctl', 'is-enabled'] + services, stdout=subprocess.PIPE, encoding='utf8')
for service, line in zip(services, cp.stdout.split('\n')):
if (line := line.strip()):
if line == 'disabled' and action == 'enable':
need_enabled.append(service)
elif line == 'enabled' and action == 'disable':
need_disabled.append(service)
if need_enabled:
cp = subprocess.run(
['systemctl', 'enable'] + need_enabled,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
if cp.returncode:
middleware.logger.error(
'Failed to enable systemd units %r with error %r',
', '.join(need_enabled), cp.stdout.decode()
)
if need_disabled:
cp = subprocess.run(
['systemctl', 'disable'] + need_disabled,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
if cp.returncode:
middleware.logger.error(
'Failed to disable systemd units %r with error %r',
', '.join(need_enabled), cp.stdout.decode()
)
async def setup(middleware):
middleware.register_hook('config.on_upload', on_config_upload, sync=True)
| 2,293 | Python | .py | 53 | 32.566038 | 115 | 0.595067 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,779 | systemd_linux.py | truenas_middleware/src/middlewared/middlewared/plugins/service_/systemd_linux.py | from middlewared.service import private, Service
class ServiceService(Service):
@private
async def systemd_units(self, name):
service = await self.middleware.call('service.object', name)
if service.systemd_unit == NotImplemented:
return []
else:
return [service.systemd_unit] + await service.systemd_extra_units()
| 373 | Python | .py | 9 | 33.888889 | 79 | 0.684211 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,780 | utils.py | truenas_middleware/src/middlewared/middlewared/plugins/service_/utils.py | import enum
from middlewared.utils.privilege import credential_has_full_admin
class ServiceWriteRole(enum.Enum):
CIFS = 'SHARING_SMB_WRITE'
NFS = 'SHARING_NFS_WRITE'
ISCSITARGET = 'SHARING_ISCSI_WRITE'
FTP = 'SHARING_FTP_WRITE'
def app_has_write_privilege_for_service(
app: object | None,
service: str
) -> bool:
if app is None:
# Internal middleware call
return True
if app.authenticated_credentials is None:
return False
if not app.authenticated_credentials.is_user_session:
return True
if credential_has_full_admin(app.authenticated_credentials):
return True
if app.authenticated_credentials.has_role('SERVICES_WRITE'):
return True
try:
required_role = ServiceWriteRole[service.upper()]
except KeyError:
return False
return app.authenticated_credentials.has_role(required_role.value)
| 917 | Python | .py | 27 | 28.148148 | 70 | 0.713636 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,781 | snmp.py | truenas_middleware/src/middlewared/middlewared/plugins/service_/services/snmp.py | from .base import SimpleService
class SNMPService(SimpleService):
name = "snmp"
etc = ["snmpd"]
systemd_unit = "snmpd"
async def systemd_extra_units(self):
return ["snmp-agent"]
async def start(self):
await super().start()
await self._systemd_unit("snmp-agent", "start")
async def stop(self):
await self._systemd_unit("snmp-agent", "stop")
await super().stop()
| 432 | Python | .py | 13 | 26.769231 | 55 | 0.633495 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,782 | ssh.py | truenas_middleware/src/middlewared/middlewared/plugins/service_/services/ssh.py | from .base import SimpleService
class SSHService(SimpleService):
name = "ssh"
reloadable = True
etc = ["ssh"]
systemd_unit = "ssh"
async def after_start(self):
await self.middleware.call("ssh.save_keys")
async def after_reload(self):
await self.middleware.call("ssh.save_keys")
| 324 | Python | .py | 10 | 26.8 | 51 | 0.678571 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,783 | incus.py | truenas_middleware/src/middlewared/middlewared/plugins/service_/services/incus.py | import os
import re
import signal
from pystemd.systemd1 import Unit
from middlewared.plugins.service_.services.base import SimpleService
from middlewared.plugins.virt.websocket import IncusWS
RE_DNSMASQ_PID = re.compile(r'^pid: (\d+)', flags=re.M)
class IncusService(SimpleService):
name = "incus"
etc = ["subids"]
systemd_unit = "incus"
async def start(self):
await super().start()
await IncusWS().start()
async def stop(self):
await IncusWS().stop()
await self._unit_action("Stop")
# incus.socket needs to be stopped in addition to the service
unit = Unit("incus.socket")
unit.load()
await self._unit_action("Stop", unit=unit)
await self.middleware.run_in_thread(self._stop_dnsmasq)
def _stop_dnsmasq(self):
# Incus will run dnsmasq for its managed network and not stop it
# when the service is stopped.
dnsmasq_pid = '/var/lib/incus/networks/incusbr0/dnsmasq.pid'
if os.path.exists(dnsmasq_pid):
try:
with open(dnsmasq_pid) as f:
data = f.read()
if reg := RE_DNSMASQ_PID.search(data):
os.kill(int(reg.group(1)), signal.SIGTERM)
except FileNotFoundError:
pass
| 1,320 | Python | .py | 34 | 30.235294 | 72 | 0.626959 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,784 | netbios.py | truenas_middleware/src/middlewared/middlewared/plugins/service_/services/netbios.py | from .base import SimpleService
class NetBIOSService(SimpleService):
name = "nmbd"
systemd_unit = "nmbd"
| 116 | Python | .py | 4 | 25.25 | 36 | 0.752294 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,785 | sssd.py | truenas_middleware/src/middlewared/middlewared/plugins/service_/services/sssd.py | from .base import SimpleService
class SSSDService(SimpleService):
name = "sssd"
systemd_unit = "sssd"
async def before_start(self):
await self.middleware.call('ldap.create_sssd_dirs')
| 208 | Python | .py | 6 | 29.666667 | 59 | 0.722222 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,786 | nscd.py | truenas_middleware/src/middlewared/middlewared/plugins/service_/services/nscd.py | from .base import SimpleService
class NSCDService(SimpleService):
name = "nscd"
reloadable = True
etc = ["nscd"]
systemd_unit = "nscd"
| 155 | Python | .py | 6 | 21.5 | 33 | 0.689655 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,787 | wsd.py | truenas_middleware/src/middlewared/middlewared/plugins/service_/services/wsd.py | from .base import SimpleService
class WSDService(SimpleService):
name = "wsdd"
etc = ["wsd"]
systemd_unit = "wsdd"
| 131 | Python | .py | 5 | 22 | 32 | 0.688525 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,788 | netdata.py | truenas_middleware/src/middlewared/middlewared/plugins/service_/services/netdata.py | from middlewared.plugins.service_.services.base import SimpleService
class NetdataService(SimpleService):
name = 'netdata'
etc = ['netdata']
restartable = True
systemd_unit = 'netdata'
| 205 | Python | .py | 6 | 29.833333 | 68 | 0.748718 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,789 | base_state.py | truenas_middleware/src/middlewared/middlewared/plugins/service_/services/base_state.py | from collections import namedtuple
import logging
logger = logging.getLogger(__name__)
ServiceState = namedtuple("ServiceState", ["running", "pids"])
| 152 | Python | .py | 4 | 36.5 | 62 | 0.787671 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,790 | mdns.py | truenas_middleware/src/middlewared/middlewared/plugins/service_/services/mdns.py | from .base import SimpleService
class MDNSService(SimpleService):
name = "mdns"
reloadable = True
etc = ["mdns"]
systemd_unit = "avahi-daemon"
async def start(self):
return await self._systemd_unit("avahi-daemon", "start")
async def reload(self):
announce = (await self.middleware.call("network.configuration.config"))["service_announcement"]
if not announce["mdns"]:
return
state = await self.get_state()
cmd = "reload" if state.running else "start"
return await self._systemd_unit("avahi-daemon", cmd)
| 595 | Python | .py | 15 | 32.6 | 103 | 0.657941 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,791 | ftp.py | truenas_middleware/src/middlewared/middlewared/plugins/service_/services/ftp.py | from .base import SimpleService
class FTPService(SimpleService):
name = "ftp"
reloadable = True
etc = ["ftp"]
systemd_unit = "proftpd"
| 155 | Python | .py | 6 | 21.5 | 32 | 0.689655 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,792 | base.py | truenas_middleware/src/middlewared/middlewared/plugins/service_/services/base.py | import logging
import select
import subprocess
from pystemd.base import SDObject
from pystemd.dbusexc import DBusUnknownObjectError
from pystemd.dbuslib import DBus
from pystemd.systemd1 import Unit
from systemd import journal
from middlewared.utils import run
from .base_interface import ServiceInterface, IdentifiableServiceInterface
from .base_state import ServiceState
logger = logging.getLogger(__name__)
class Job(SDObject):
def __init__(self, job, bus=None, _autoload=False):
super().__init__(
destination=b"org.freedesktop.systemd1",
path=job,
bus=bus,
_autoload=_autoload,
)
class SimpleService(ServiceInterface, IdentifiableServiceInterface):
systemd_unit = NotImplemented
systemd_async_start = False
systemd_unit_timeout = 5
async def systemd_extra_units(self):
return []
async def get_state(self):
return await self.middleware.run_in_thread(self._get_state_sync)
def _get_state_sync(self):
unit = self._get_systemd_unit()
state = unit.Unit.ActiveState
if state == b"active" or (self.systemd_async_start and state == b"activating"):
return ServiceState(True, list(filter(None, [unit.MainPID])))
else:
return ServiceState(False, [])
async def get_unit_state(self):
return await self.middleware.run_in_thread(self._get_unit_state_sync)
def _get_unit_state_sync(self):
unit = self._get_systemd_unit()
state = unit.Unit.ActiveState
return state.decode("utf-8")
async def start(self):
await self._unit_action("Start")
async def stop(self):
await self._unit_action("Stop")
async def restart(self):
await self._unit_action("Restart")
async def reload(self):
await self._unit_action("Reload")
async def identify(self, procname):
pass
async def failure_logs(self):
return await self.middleware.run_in_thread(self._unit_failure_logs)
def _get_systemd_unit(self):
unit = Unit(self._get_systemd_unit_name())
unit.load()
return unit
def _get_systemd_unit_name(self):
return f"{self.systemd_unit}.service".encode()
async def _unit_action(self, action, wait=True, unit=None):
return await self.middleware.run_in_thread(self._unit_action_sync, action, wait, self.systemd_unit_timeout, unit=unit)
def _unit_action_sync(self, action, wait, timeout, unit=None):
if unit is None:
unit = self._get_systemd_unit()
job = getattr(unit.Unit, action)(b"replace")
if wait:
with DBus() as bus:
done = False
def callback(msg, error=None, userdata=None):
nonlocal done
msg.process_reply(True)
if msg.body[1] == job:
done = True
bus.match_signal(
b"org.freedesktop.systemd1",
b"/org/freedesktop/systemd1",
b"org.freedesktop.systemd1.Manager",
b"JobRemoved",
callback,
None,
)
job_object = Job(job, bus)
try:
job_object.load()
except DBusUnknownObjectError:
# Job has already completed
return
fd = bus.get_fd()
while True:
fds = select.select([fd], [], [], timeout)
if not any(fds):
break
bus.process()
if done:
break
async def _systemd_unit(self, unit, verb):
await systemd_unit(unit, verb)
def _unit_failure_logs(self):
unit = self._get_systemd_unit()
unit_name = self._get_systemd_unit_name()
j = journal.Reader()
j.seek_monotonic(unit.Unit.InactiveExitTimestampMonotonic / 1e6)
# copied from `https://github.com/systemd/systemd/blob/main/src/shared/logs-show.c`,
# `add_matches_for_unit` function
# Look for messages from the service itself
j.add_match(_SYSTEMD_UNIT=unit_name)
# Look for coredumps of the service
j.add_disjunction()
j.add_match(MESSAGE_ID=b"fc2e22bc6ee647b6b90729ab34a250b1")
j.add_match(_UID=0)
j.add_match(COREDUMP_UNIT=unit_name)
# Look for messages from PID 1 about this service
j.add_disjunction()
j.add_match(_PID=1)
j.add_match(UNIT=unit_name)
# Look for messages from authorized daemons about this service
j.add_disjunction()
j.add_match(_UID=0)
j.add_match(OBJECT_SYSTEMD_UNIT=unit_name)
return "\n".join([
f"{record['__REALTIME_TIMESTAMP'].strftime('%b %d %H:%M:%S')} "
f"{record.get('SYSLOG_IDENTIFIER')}[{record.get('_PID', 0)}]: {record['MESSAGE']}"
for record in j
])
async def systemd_unit(unit, verb):
result = await run("systemctl", verb, unit, check=False, encoding="utf-8", stderr=subprocess.STDOUT)
if result.returncode != 0:
logger.warning("%s %s failed with code %d: %r", unit, verb, result.returncode, result.stdout)
return result
| 5,372 | Python | .py | 129 | 31.209302 | 126 | 0.601578 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,793 | all.py | truenas_middleware/src/middlewared/middlewared/plugins/service_/services/all.py | from .cifs import CIFSService
from .docker import DockerService
from .ftp import FTPService
from .incus import IncusService
from .iscsitarget import ISCSITargetService
from .mdns import MDNSService
from .netbios import NetBIOSService
from .netdata import NetdataService
from .nfs import NFSService
from .nscd import NSCDService
from .smartd import SMARTDService
from .snmp import SNMPService
from .ssh import SSHService
from .sssd import SSSDService
from .truecommand import TruecommandService
from .ups import UPSService
from .wsd import WSDService
from .keepalived import KeepalivedService
from .idmap import IdmapService
from .openipmi import OpenIpmiService
from .pseudo.libvirtd import LibvirtdService, LibvirtGuestService
from .pseudo.misc import (
CronService,
KmipService,
LoaderService,
HostnameService,
HttpService,
NetworkService,
NetworkGeneralService,
NfsMountdService,
NtpdService,
OpenVmToolsService,
PowerdService,
RcService,
ResolvConfService,
RoutingService,
SslService,
SyslogdService,
TimeservicesService,
UserService,
)
all_services = [
CIFSService,
DockerService,
FTPService,
ISCSITargetService,
MDNSService,
NetBIOSService,
NFSService,
NSCDService,
SMARTDService,
SNMPService,
SSHService,
SSSDService,
UPSService,
WSDService,
NetdataService,
IdmapService,
OpenIpmiService,
KeepalivedService,
OpenVmToolsService,
LibvirtdService,
LibvirtGuestService,
CronService,
KmipService,
IncusService,
LoaderService,
HostnameService,
HttpService,
NetworkService,
NetworkGeneralService,
NfsMountdService,
NtpdService,
PowerdService,
RcService,
ResolvConfService,
RoutingService,
SslService,
SyslogdService,
TimeservicesService,
TruecommandService,
UserService,
]
| 1,907 | Python | .py | 83 | 19.156627 | 65 | 0.78595 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,794 | iscsitarget.py | truenas_middleware/src/middlewared/middlewared/plugins/service_/services/iscsitarget.py | import asyncio
from middlewared.utils import run
from .base import SimpleService
class ISCSITargetService(SimpleService):
name = "iscsitarget"
reloadable = True
systemd_async_start = True
etc = ["scst", "scst_targets"]
systemd_unit = "scst"
async def _wait_to_avoid_states(self, states, retries=10):
initial_retries = retries
while retries > 0:
curstate = await self.middleware.call("service.get_unit_state", self.name)
if curstate not in states:
break
retries -= 1
await asyncio.sleep(1)
if retries != initial_retries:
if curstate in states:
self.middleware.logger.debug(f'Waited unsucessfully for {self.name} to enter {curstate} state')
else:
self.middleware.logger.debug(f'Waited sucessfully for {self.name} to enter {curstate} state')
async def before_start(self):
await self.middleware.call("iscsi.alua.before_start")
# Because we are a systemd_async_start service, it is possible that
# a start could be requested while a stop is still in progress.
if await self.middleware.call("failover.in_progress"):
await self._wait_to_avoid_states(['deactivating'], 5)
else:
await self._wait_to_avoid_states(['deactivating'])
async def after_start(self):
await self.middleware.call("iscsi.host.injection.start")
await self.middleware.call("iscsi.alua.after_start")
async def before_stop(self):
await self.middleware.call("iscsi.alua.before_stop")
await self.middleware.call("iscsi.host.injection.stop")
async def reload(self):
return (await run(
["scstadmin", "-noprompt", "-force", "-config", "/etc/scst.conf"], check=False
)).returncode == 0
async def become_active(self):
"""If we are becoming the ACTIVE node on a HA system, and if SCST was already loaded
then we can perform a shortcut operation to switch from being the STANDBY node to the
ACTIVE one, *without* restarting SCST, but just by reconfiguring it."""
if await self.middleware.call('iscsi.global.alua_enabled'):
if await self.middleware.call('iscsi.scst.is_kernel_module_loaded'):
try:
return await self.middleware.call("iscsi.alua.become_active")
except Exception:
self.logger.warning('Failover exception', exc_info=True)
# Fall through
# Fallback to doing a regular restart
return await self.middleware.call('service.restart', self.name, {'ha_propagate': False})
| 2,702 | Python | .py | 53 | 41 | 111 | 0.647706 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,795 | idmap.py | truenas_middleware/src/middlewared/middlewared/plugins/service_/services/idmap.py | from .base import SimpleService
class IdmapService(SimpleService):
name = "idmap"
reloadable = True
restartable = True
systemd_unit = "winbind"
async def healthy(self):
return await self.middleware.call("smb.configure_wait")
async def start(self):
if not await self.healthy():
return
await self._systemd_unit("winbind", "start")
async def restart(self):
if not await self.healthy():
return
return await self._systemd_unit("winbind", "restart")
async def reload(self):
if not await self.healthy():
return
return await self._systemd_unit("winbind", "reload")
| 691 | Python | .py | 20 | 26.85 | 63 | 0.639939 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,796 | openipmi.py | truenas_middleware/src/middlewared/middlewared/plugins/service_/services/openipmi.py | from middlewared.plugins.service_.services.base import SimpleService
class OpenIpmiService(SimpleService):
name = "openipmi"
systemd_unit = "openipmi"
| 161 | Python | .py | 4 | 36.75 | 68 | 0.8 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,797 | base_interface.py | truenas_middleware/src/middlewared/middlewared/plugins/service_/services/base_interface.py | class ServiceInterface:
name = NotImplemented
etc = []
restartable = False # Implements `restart` method instead of `stop` + `start`
reloadable = False # Implements `reload` method
deprecated = False # Alert if service is running
def __init__(self, middleware):
self.middleware = middleware
async def get_state(self):
raise NotImplementedError
async def get_unit_state(self):
raise NotImplementedError
async def become_active(self):
raise NotImplementedError
async def become_standby(self):
raise NotImplementedError
async def check_configuration(self):
pass
async def start(self):
raise NotImplementedError
async def before_start(self):
pass
async def after_start(self):
pass
async def stop(self):
raise NotImplementedError
async def before_stop(self):
pass
async def after_stop(self):
pass
async def restart(self):
raise NotImplementedError
async def before_restart(self):
pass
async def after_restart(self):
pass
async def reload(self):
raise NotImplementedError
async def before_reload(self):
pass
async def after_reload(self):
pass
class IdentifiableServiceInterface:
async def identify(self, procname):
raise NotImplementedError
| 1,407 | Python | .py | 45 | 24.288889 | 82 | 0.677852 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,798 | truecommand.py | truenas_middleware/src/middlewared/middlewared/plugins/service_/services/truecommand.py | from middlewared.plugins.truecommand.utils import WIREGUARD_INTERFACE_NAME
from .base import SimpleService
class TruecommandService(SimpleService):
name = 'truecommand'
etc = ['rc', 'truecommand']
systemd_unit = f'wg-quick@{WIREGUARD_INTERFACE_NAME}'
| 268 | Python | .py | 6 | 40.833333 | 74 | 0.782101 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,799 | nfs.py | truenas_middleware/src/middlewared/middlewared/plugins/service_/services/nfs.py | import os
from .base import SimpleService
class NFSService(SimpleService):
name = "nfs"
reloadable = True
systemd_unit_timeout = 10
etc = ["nfsd"]
systemd_unit = "nfs-server"
def check_exportsd_dir(self):
exports = list()
try:
with os.scandir('/etc/exports.d') as scan:
for i in filter(lambda x: x.is_file() and not x.name.startswith('.'), scan):
exports.append(i.name)
except (FileNotFoundError, NotADirectoryError):
pass
return exports
async def check_configuration(self):
# Raise alert if there are entries in /etc/exports.d
if (exportsdList := await self.middleware.run_in_thread(self.check_exportsd_dir)):
await self.middleware.call('alert.oneshot_create', 'NFSblockedByExportsDir', {'entries': exportsdList})
else:
await self.middleware.call('alert.oneshot_delete', 'NFSblockedByExportsDir')
async def before_start(self):
# If available, make sure the procfs nfsv4recoverydir entry has the correct info.
# Usually the update should be done _before_ nfsd is running.
# Sometimes, after a reboot, the proc entry may not exist and that's ok.
await self.middleware.call('nfs.update_procfs_v4recoverydir')
async def after_start(self):
# This is to cover the case where the proc entry did not exist
await self.middleware.call('nfs.update_procfs_v4recoverydir')
await self._systemd_unit("rpc-statd", "start")
async def stop(self):
await self._systemd_unit(self.systemd_unit, "stop")
await self._systemd_unit("rpc-statd", "stop")
await self._systemd_unit("rpcbind", "stop")
await self._systemd_unit("rpc-gssd", "stop")
| 1,795 | Python | .py | 37 | 40 | 115 | 0.659039 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |