id int64 0 458k | file_name stringlengths 4 119 | file_path stringlengths 14 227 | content stringlengths 24 9.96M | size int64 24 9.96M | language stringclasses 1 value | extension stringclasses 14 values | total_lines int64 1 219k | avg_line_length float64 2.52 4.63M | max_line_length int64 5 9.91M | alphanum_fraction float64 0 1 | repo_name stringlengths 7 101 | repo_stars int64 100 139k | repo_forks int64 0 26.4k | repo_open_issues int64 0 2.27k | repo_license stringclasses 12 values | repo_extraction_date stringclasses 433 values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
24,900 | power_management_linux.py | truenas_middleware/src/middlewared/middlewared/plugins/disk_/power_management_linux.py | import asyncio
from middlewared.service import private, Service
from middlewared.utils import run
class DiskService(Service):
@private
async def power_management_impl(self, dev, disk):
self.middleware.create_task(run(
'hdparm', '-B', disk['advpowermgmt'] if disk['advpowermgmt'] != 'DISABLED' else '255', f'/dev/{dev}',
check=False,
))
if disk['hddstandby'] != 'ALWAYS ON':
if int(disk['hddstandby']) <= 20:
# Values from 1 to 240 specify multiples of 5 seconds
idle = int(disk['hddstandby']) * 60 // 5
else:
# values from 241 to 251 specify multiples of 30 minutes.
idle = 240 + int(disk['hddstandby']) // 30
else:
idle = 0
async def hdparm_idle():
# We wait a minute before applying idle because its likely happening during system boot
# or some activity is happening very soon.
await asyncio.sleep(60)
self.middleware.create_task(run('hdparm', '-S', str(idle), f'/dev/{dev}', check=False))
self.middleware.create_task(hdparm_idle())
| 1,171 | Python | .py | 25 | 36.44 | 113 | 0.597893 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,901 | disk_events.py | truenas_middleware/src/middlewared/middlewared/plugins/disk_/disk_events.py | from middlewared.utils.disks import DISKS_TO_IGNORE
async def added_disk(middleware, disk_name):
await middleware.call('disk.sync', disk_name)
await middleware.call('disk.sed_unlock', disk_name)
await middleware.call('alert.oneshot_delete', 'SMART', disk_name)
async def remove_disk(middleware, disk_name):
await (await middleware.call('disk.sync_all')).wait()
await middleware.call('alert.oneshot_delete', 'SMART', disk_name)
async def udev_block_devices_hook(middleware, data):
if data.get('SUBSYSTEM') != 'block':
return
elif data.get('DEVTYPE') != 'disk':
return
elif data['SYS_NAME'].startswith(DISKS_TO_IGNORE):
return
if data['ACTION'] == 'add':
await added_disk(middleware, data['SYS_NAME'])
elif data['ACTION'] == 'remove':
await remove_disk(middleware, data['SYS_NAME'])
def setup(middleware):
middleware.register_hook('udev.block', udev_block_devices_hook)
| 961 | Python | .py | 21 | 40.333333 | 69 | 0.694952 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,902 | wipe.py | truenas_middleware/src/middlewared/middlewared/plugins/disk_/wipe.py | import asyncio
import os
import pathlib
import threading
import time
from middlewared.schema import accepts, Bool, Ref, Str, returns
from middlewared.service import job, Service, private
CHUNK = 1048576 # 1MB binary
# Maximum number of attempts to request partition table update
MAX_NUM_PARTITION_UPDATE_RETRIES = 4
class DiskService(Service):
@private
def get_partitions_quick(self, dev_name, tries=None):
"""
Lightweight function to generate a dictionary of
partition start in units of bytes.
`tries` int, specifies the number of tries that we will
look for the various files in sysfs. Often times this
function is called after a drive has been formatted
and so the caller might want to wait on udev to become
aware of the new partitions.
"""
if tries in (0, 1) or not isinstance(tries, int):
tries = 1
else:
tries = min(tries, 10)
startsect = {}
sectsize = 0
path_obj = pathlib.Path(f"/sys/block/{dev_name}")
for _try in range(tries):
if startsect:
# dictionary of partition info has already been populated
# so we'll break out early
return startsect
else:
time.sleep(0.5)
try:
sectsize = int((path_obj / 'queue/logical_block_size').read_text().strip())
with os.scandir(path_obj) as dir_contents:
for partdir in filter(lambda x: x.is_dir() and x.name.startswith(dev_name), dir_contents):
partdir_obj = pathlib.Path(partdir.path)
part_num = int((partdir_obj / 'partition').read_text().strip())
part_start = int((partdir_obj / 'start').read_text().strip()) * sectsize
startsect[part_num] = part_start
except (FileNotFoundError, ValueError):
continue
except Exception:
if _try + 1 == tries: # range() built-in is half-open
self.logger.error('Unexpected failure gathering partition info', exc_info=True)
return startsect
def _wipe_impl(self, job, dev, mode, event):
disk_path = f'/dev/{dev}'
with open(os.open(disk_path, os.O_WRONLY | os.O_EXCL), 'wb') as f:
size = os.lseek(f.fileno(), 0, os.SEEK_END)
if size == 0:
# no size means nothing else will work
self.logger.error('Unable to determine size of "%s"', dev)
return
elif size < 33554432 and mode == 'QUICK':
# we wipe the first and last 33554432 bytes (32MB) of the
# device when it's the "QUICK" mode so if the device is smaller
# than that, ignore it.
return
# no reason to write more than 1MB at a time
# or kernel will break them into smaller chunks
if mode in ('QUICK', 'FULL'):
to_write = b'0' * CHUNK
else:
to_write = os.urandom(CHUNK)
# seek back to the beginning of the disk
os.lseek(f.fileno(), 0, os.SEEK_SET)
if mode == 'QUICK':
# Get partition info before it gets destroyed
try:
disk_parts = self.get_partitions_quick(dev)
except Exception:
disk_parts = {}
_32 = 32
for i in range(_32):
# wipe first 32MB
os.write(f.fileno(), to_write)
os.fsync(f.fileno())
if event.is_set():
return
# we * 50 since we write a total of 64MB
# so this will be 50% of the total
job.set_progress(round(((i / _32) * 50), 2))
# seek to 32MB before end of drive
os.lseek(f.fileno(), (size - (CHUNK * _32)), os.SEEK_SET)
_64 = _32 * 2
for i in range(_32, _64): # this is done to have accurate reporting
# wipe last 32MB
os.write(f.fileno(), to_write)
os.fsync(f.fileno())
if event.is_set():
return
job.set_progress(round(((i / _64) * 100), 2))
# The middle partitions often contain old cruft. Clean those.
if len(disk_parts) > 1:
_30MiB = 30 * CHUNK
_30MiB_from_end = size - _30MiB
for sector_start in disk_parts.values():
# Skip any that start under 30 MiB or 30MiB from the end
if (sector_start < _30MiB) or (_30MiB_from_end < sector_start):
continue
# Start 2 MiB back from the start and 'clean' 2 MiB past, 4 MiB total
os.lseek(f.fileno(), sector_start - (2 * CHUNK), os.SEEK_SET)
for i in range(4):
os.write(f.fileno(), to_write)
os.fsync(f.fileno())
if event.is_set():
return
# This is quick. We can reasonably skip the progress update
else:
iterations = (size // CHUNK)
for i in range(iterations):
os.write(f.fileno(), to_write)
# Linux allocates extremely large buffers for some disks. Even after everything is written and the
# device is successfully closed, disk activity might still continue for quite a while. This will
# give a false sense of data on the disk being completely destroyed while in reality it is still
# not.
# Additionally, such a behavior causes issues when aborting the disk wipe. Even after the file
# descriptor is closed, OS will prevent any other program from opening the disk with O_EXCL until
# all the buffers are flushed, resulting in a "Device or resource busy" error.
os.fsync(f.fileno())
if event.is_set():
return
job.set_progress(round(((i / iterations) * 100), 2))
# The call to update_partition_table_quick can require retries
error = {}
retries = MAX_NUM_PARTITION_UPDATE_RETRIES
# Unfortunately, without a small initial sleep, the following
# retry loop will almost certainly require two iterations.
time.sleep(0.1)
while retries > 0:
# Use BLKRRPATH ioctl to update the kernel partition table
error = self.middleware.call_sync('disk.update_partition_table_quick', disk_path)
if not error[disk_path]:
break
time.sleep(0.1)
retries -= 1
if error[disk_path]:
self.logger.error('Error partition table update "%s": %s', disk_path, error[disk_path])
@accepts(
Str('dev'),
Str('mode', enum=['QUICK', 'FULL', 'FULL_RANDOM'], required=True),
Bool('synccache', default=True),
)
@returns()
@job(
lock=lambda args: args[0],
description=lambda dev, mode, *args: f'{mode.replace("_", " ").title()} wipe of disk {dev}',
abortable=True,
)
async def wipe(self, job, dev, mode, sync):
"""
Performs a wipe of a disk `dev`.
It can be of the following modes:
- QUICK: clean the first and last 32 megabytes on `dev`
- FULL: write whole disk with zero's
- FULL_RANDOM: write whole disk with random bytes
"""
event = threading.Event()
try:
await self.middleware.run_in_thread(self._wipe_impl, job, dev, mode, event)
except asyncio.CancelledError:
event.set()
raise
if sync:
await self.middleware.call('disk.sync', dev)
| 8,198 | Python | .py | 170 | 33.647059 | 118 | 0.53385 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,903 | temperature.py | truenas_middleware/src/middlewared/middlewared/plugins/disk_/temperature.py | import asyncio
import datetime
import time
import json
import async_timeout
from middlewared.api import api_method
from middlewared.api.current import DiskTemperatureAlertsArgs, DiskTemperatureAlertsResult
from middlewared.common.smart.smartctl import SMARTCTL_POWERMODES
from middlewared.schema import accepts, Bool, Dict, Int, List, returns, Str
from middlewared.service import private, Service
from middlewared.utils.asyncio_ import asyncio_map
from middlewared.utils.disk_temperatures import parse_smartctl_for_temperature_output
class DiskService(Service):
cache = {}
@private
async def disks_for_temperature_monitoring(self):
return [
disk['name']
for disk in await self.middleware.call(
'disk.query',
[
['name', '!=', None],
['togglesmart', '=', True],
]
)
]
@accepts(
Str('name'),
Dict(
'options',
Int('cache', default=None, null=True),
Str('powermode', enum=SMARTCTL_POWERMODES, default=SMARTCTL_POWERMODES[0]),
),
deprecated=[
(
lambda args: len(args) == 2 and isinstance(args[1], str),
lambda name, powermode: [name, {'powermode': powermode}],
),
],
roles=['REPORTING_READ']
)
@returns(Int('temperature', null=True))
async def temperature(self, name, options):
"""
Returns temperature for device `name` using specified S.M.A.R.T. `powermode`. If `cache` is not null
then the last cached within `cache` seconds value is used.
"""
if options['cache'] is not None:
if cached := self.cache.get(name):
temperature, cache_time = cached
if cache_time > time.monotonic() - options['cache']:
return temperature
temperature = await self.middleware.call('disk.temperature_uncached', name, options['powermode'])
self.cache[name] = (temperature, time.monotonic())
return temperature
@private
async def temperature_uncached(self, name, powermode):
if output := await self.middleware.call('disk.smartctl', name, ['-a', '-n', powermode.lower(), '--json=c'], {'silent': True}):
return parse_smartctl_for_temperature_output(json.loads(output))
@private
async def reset_temperature_cache(self):
self.cache = {}
temperatures_semaphore = asyncio.BoundedSemaphore(8)
@accepts(
List('names', items=[Str('name')]),
Dict(
'options',
# A little less than collectd polling interval of 300 seconds to avoid returning old value when polling
# occurs in 299.9 seconds.
Int('cache', default=290, null=True),
Bool('only_cached', default=False),
Str('powermode', enum=SMARTCTL_POWERMODES, default=SMARTCTL_POWERMODES[0]),
),
deprecated=[
(
lambda args: len(args) == 2 and isinstance(args[1], str),
lambda name, powermode: [name, {'powermode': powermode}],
),
],
roles=['REPORTING_READ']
)
@returns(Dict('disks_temperatures', additional_attrs=True))
async def temperatures(self, names, options):
"""
Returns temperatures for a list of devices (runs in parallel).
See `disk.temperature` documentation for more details.
If `only_cached` is specified then this method only returns disk temperatures that exist in cache.
"""
if len(names) == 0:
names = await self.disks_for_temperature_monitoring()
if options.pop('only_cached'):
return {
disk: temperature
for disk, (temperature, cache_time) in self.cache.items()
if (
disk in names and
cache_time > time.monotonic() - 610 # Double collectd polling interval + a little bit
)
}
async def temperature(name):
try:
async with async_timeout.timeout(15):
return await self.middleware.call('disk.temperature', name, options)
except asyncio.TimeoutError:
return None
return dict(zip(names, await asyncio_map(temperature, names, semaphore=self.temperatures_semaphore)))
@accepts(List('names', items=[Str('name')]), Int('days', default=7), roles=['REPORTING_READ'])
@returns(Dict('temperatures', additional_attrs=True))
def temperature_agg(self, names, days):
"""Returns min/max/avg temperature for `names` disks for the last `days` days"""
# we only keep 7 days of historical data because we keep per second information
# which adds up to lots of used disk space quickly depending on the size of the
# system
start = datetime.datetime.now()
end = start + datetime.timedelta(days=min(days, 7))
opts = {'start': round(start.timestamp()), 'end': round(end.timestamp())}
final = dict()
for disk in self.middleware.call_sync('reporting.netdata_graph', 'disktemp', opts):
if disk['identifier'] in names:
final[disk['identifier']] = {
'min': disk['aggregations']['min'].get('temperature_value', None),
'max': disk['aggregations']['max'].get('temperature_value', None),
'avg': disk['aggregations']['mean'].get('temperature_value', None),
}
return final
@api_method(DiskTemperatureAlertsArgs, DiskTemperatureAlertsResult, roles=['REPORTING_READ'])
async def temperature_alerts(self, names):
"""
Returns existing temperature alerts for specified disk `names.`
"""
devices = {f'/dev/{name}' for name in names}
alerts = await self.middleware.call('alert.list')
return [
alert for alert in alerts
if (
alert['klass'] == 'SMART' and
alert['args']['device'] in devices and
'temperature' in alert['args']['message'].lower()
)
]
| 6,254 | Python | .py | 140 | 34.2 | 134 | 0.599573 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,904 | sync.py | truenas_middleware/src/middlewared/middlewared/plugins/disk_/sync.py | import re
from datetime import timedelta
from middlewared.schema import accepts, Bool, Dict, Str
from middlewared.service import job, private, Service, ServiceChangeMixin
from middlewared.utils.disks import dev_to_ident
from middlewared.utils.time_utils import utc_now
RE_IDENT = re.compile(r'^\{(?P<type>.+?)\}(?P<value>.+)$')
class DiskService(Service, ServiceChangeMixin):
DISK_EXPIRECACHE_DAYS = 7
@private
@accepts(Str('name'))
async def sync(self, name):
"""
Syncs a disk `name` with the database cache.
"""
if await self.middleware.call('failover.licensed'):
if await self.middleware.call('failover.status') == 'BACKUP':
return
disks = await self.middleware.call('device.get_disks')
# Abort if the disk is not recognized as an available disk
if name not in disks:
return
ident = await self.middleware.call('disk.device_to_identifier', name, disks)
qs = await self.middleware.call(
'datastore.query', 'storage.disk', [('disk_identifier', '=', ident)], {'order_by': ['disk_expiretime']}
)
if ident and qs:
disk = qs[0]
new = False
else:
new = True
qs = await self.middleware.call('datastore.query', 'storage.disk', [('disk_name', '=', name)])
for i in qs:
i['disk_expiretime'] = utc_now() + timedelta(days=self.DISK_EXPIRECACHE_DAYS)
await self.middleware.call('datastore.update', 'storage.disk', i['disk_identifier'], i)
disk = {'disk_identifier': ident}
disk.update({'disk_name': name, 'disk_expiretime': None})
self._map_device_disk_to_db(disk, disks[name])
if not new:
await self.middleware.call('datastore.update', 'storage.disk', disk['disk_identifier'], disk)
else:
disk['disk_identifier'] = await self.middleware.call('datastore.insert', 'storage.disk', disk)
if disks[name]['dif']:
await self.middleware.call('alert.oneshot_create', 'DifFormatted', [name])
else:
await self.middleware.call('alert.oneshot_delete', 'DifFormatted', None)
await self.restart_services_after_sync()
# We have expansion shelves that take up to 60 seconds before the disk will be mapped to the enclosure slot in
# the sysfs structure. There is no way around it, unfortunately. We should ask `enclosure.sync_disk` to retry
# itself if it fails to map the disk to enclosure.
await self.middleware.call('enclosure.sync_disk', disk['disk_identifier'], None, True)
@private
def log_disk_info(self, sys_disks):
number_of_disks = len(sys_disks)
if number_of_disks <= 25:
# output logging information to middlewared.log in case we sync disks
# when not all the disks have been resolved
log_info = {
ok: {
ik: iv for ik, iv in ov.items() if ik in ('lunid', 'serial')
} for ok, ov in sys_disks.items()
}
self.logger.info('Found disks: %r', log_info)
else:
self.logger.info('Found %d disks', number_of_disks)
return number_of_disks
@private
def ident_to_dev(self, ident, sys_disks):
if not ident or not (search := RE_IDENT.search(ident)):
return
tp = search.group('type')
value = search.group('value')
mapping = {'uuid': 'uuid', 'devicename': 'name', 'serial_lunid': 'serial_lunid', 'serial': 'serial'}
if tp not in mapping:
return
for disk, info in sys_disks.items():
if tp == 'uuid':
for part in filter(lambda x: x['partition_uuid'] == value, info['parts']):
return part['disk']
elif info.get(mapping[tp]) == value:
return disk
@private
def dev_to_ident(self, name, sys_disks):
return dev_to_ident(name, sys_disks)
@private
@accepts(Dict(
'options',
Bool('zfs_guid', default=False),
))
@job(lock='disk.sync_all')
def sync_all(self, job, opts):
"""
Synchronize all disks with the cache in database.
"""
# Skip sync disks on standby node
licensed = self.middleware.call_sync('failover.licensed')
if licensed:
status = self.middleware.call_sync('failover.status')
if status == 'BACKUP':
return
job.set_progress(10, 'Enumerating system disks')
sys_disks = self.middleware.call_sync('device.get_disks', True)
number_of_disks = self.log_disk_info(sys_disks)
job.set_progress(20, 'Enumerating disk information from database')
db_disks = self.middleware.call_sync('datastore.query', 'storage.disk', [], {'order_by': ['disk_expiretime']})
options = {'send_events': False, 'ha_sync': False}
seen_disks = {}
changed = set()
deleted = set()
dif_formatted_disks = []
increment = round((40 - 20) / number_of_disks, 3) # 20% of the total percentage
progress_percent = 40
encs = self.middleware.call_sync('enclosure.query')
for idx, disk in enumerate(db_disks, start=1):
progress_percent += increment
job.set_progress(progress_percent, f'Syncing disk {idx}/{number_of_disks}')
original_disk = disk.copy()
name = self.ident_to_dev(disk['disk_identifier'], sys_disks)
if not name or self.dev_to_ident(name, sys_disks) != disk['disk_identifier']:
# 1. can't translate identitifer to device
# 2. or can't translate device to identifier
if not disk['disk_expiretime']:
disk['disk_expiretime'] = utc_now() + timedelta(days=self.DISK_EXPIRECACHE_DAYS)
self.middleware.call_sync(
'datastore.update', 'storage.disk', disk['disk_identifier'], disk, options
)
changed.add(disk['disk_identifier'])
elif disk['disk_expiretime'] < utc_now():
# Disk expire time has surpassed, go ahead and remove it
if disk['disk_kmip_uid']:
self.middleware.call_sync(
'kmip.reset_sed_disk_password', disk['disk_identifier'], disk['disk_kmip_uuid'],
background=True
)
self.middleware.call_sync('datastore.delete', 'storage.disk', disk['disk_identifier'], options)
deleted.add(disk['disk_identifier'])
continue
else:
disk['disk_expiretime'] = None
disk['disk_name'] = name
if name in sys_disks:
if sys_disks[name]['dif']:
dif_formatted_disks.append(name)
self._map_device_disk_to_db(disk, sys_disks[name])
if name not in sys_disks and not disk['disk_expiretime']:
# If for some reason disk is not identified as a system disk mark it to expire.
disk['disk_expiretime'] = utc_now() + timedelta(days=self.DISK_EXPIRECACHE_DAYS)
if self._disk_changed(disk, original_disk):
self.middleware.call_sync('datastore.update', 'storage.disk', disk['disk_identifier'], disk, options)
changed.add(disk['disk_identifier'])
try:
self.middleware.call_sync('enclosure.sync_disk', disk['disk_identifier'], encs)
except Exception:
self.logger.error(
'Unhandled exception in enclosure.sync_disk for %r', disk['disk_identifier'], exc_info=True
)
seen_disks[name] = disk
qs = None
progress_percent = 70
for name in filter(lambda x: x not in seen_disks, sys_disks):
progress_percent += increment
disk_identifier = self.dev_to_ident(name, sys_disks)
if qs is None:
qs = self.middleware.call_sync('datastore.query', 'storage.disk')
if disk := [i for i in qs if i['disk_identifier'] == disk_identifier]:
new = False
disk = disk[0]
job.set_progress(progress_percent, f'Updating disk {name!r}')
else:
new = True
disk = {'disk_identifier': disk_identifier}
job.set_progress(progress_percent, f'Syncing new disk {name!r}')
original_disk = disk.copy()
disk['disk_name'] = name
self._map_device_disk_to_db(disk, sys_disks[name])
if sys_disks[name]['dif']:
dif_formatted_disks.append(name)
if not new:
if self._disk_changed(disk, original_disk):
self.middleware.call_sync(
'datastore.update', 'storage.disk', disk['disk_identifier'], disk, options
)
changed.add(disk['disk_identifier'])
else:
self.middleware.call_sync('datastore.insert', 'storage.disk', disk, options)
changed.add(disk['disk_identifier'])
try:
self.middleware.call_sync('enclosure.sync_disk', disk['disk_identifier'], encs)
except Exception:
self.logger.error(
'Unhandled exception in enclosure.sync_disk for %r', disk['disk_identifier'], exc_info=True
)
if dif_formatted_disks:
self.middleware.call_sync('alert.oneshot_create', 'DifFormatted', dif_formatted_disks)
else:
self.middleware.call_sync('alert.oneshot_delete', 'DifFormatted', None)
if changed or deleted:
job.set_progress(92, 'Restarting necessary services')
self.middleware.call_sync('disk.restart_services_after_sync')
# we query the db again since we've made changes to it
job.set_progress(94, 'Emitting disk events')
disks = {i['disk_identifier']: i for i in self.middleware.call_sync('datastore.query', 'storage.disk')}
for change in changed:
self.middleware.send_event('disk.query', 'CHANGED', id=change, fields=disks[change])
for delete in deleted:
self.middleware.send_event('disk.query', 'REMOVED', id=delete)
if opts['zfs_guid']:
job.set_progress(95, 'Synchronizing ZFS GUIDs')
self.middleware.call_sync('disk.sync_all_zfs_guid')
if licensed and status == 'MASTER':
job.set_progress(96, 'Synchronizing database to standby controller')
# there could be, literally, > 1k database changes in this method on large systems
# so we're not sync'ing these db changes synchronously. Instead we're sync'ing the
# entire database to the remote node after we're done. The (potential) speed
# improvement this provides is substantial
self.middleware.call_sync('failover.datastore.force_send')
job.set_progress(100, 'Syncing all disks complete')
return 'OK'
def _disk_changed(self, disk, original_disk):
# storage_disk.disk_size is a string
return dict(disk, disk_size=None if disk.get('disk_size') is None else str(disk['disk_size'])) != original_disk
def _map_device_disk_to_db(self, db_disk, disk):
only_update_if_true = ('size',)
update_keys = ('serial', 'lunid', 'rotationrate', 'type', 'size', 'subsystem', 'number', 'model', 'bus')
for key in filter(lambda k: k in update_keys and (k not in only_update_if_true or disk[k]), disk):
db_disk[f'disk_{key}'] = disk[key]
@private
async def restart_services_after_sync(self):
await self.middleware.call('disk.update_smartctl_args_for_disks')
await self._service_change('smartd', 'restart')
await self._service_change('snmp', 'restart')
expired_disks = set()
@private
async def init_datastore_events_processor(self):
self.expired_disks = {
disk["identifier"]
for disk in await self.middleware.call(
"datastore.query",
"storage.disk",
[("expiretime", "!=", None)],
{"prefix": "disk_"},
)
}
@private
async def process_datastore_event(self, type_, kwargs):
if type_ == "CHANGED" and "fields" in kwargs:
if kwargs["fields"]["expiretime"] is not None:
if kwargs["fields"]["identifier"] not in self.expired_disks:
self.expired_disks.add(kwargs["fields"]["identifier"])
return "REMOVED", {"id": kwargs["id"]}
return None
else:
if kwargs["fields"]["identifier"] in self.expired_disks:
self.expired_disks.remove(kwargs["fields"]["identifier"])
return "ADDED", {"id": kwargs["id"], "fields": kwargs["fields"]}
return type_, kwargs
async def setup(middleware):
await middleware.call("disk.init_datastore_events_processor")
await middleware.call("datastore.register_event", {
"description": "Sent on disk changes.",
"datastore": "storage.disk",
"plugin": "disk",
"prefix": "disk_",
"extra": {"include_expired": True},
"id": "identifier",
"process_event": "disk.process_datastore_event",
})
| 13,677 | Python | .py | 267 | 38.902622 | 119 | 0.582778 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,905 | retaste.py | truenas_middleware/src/middlewared/middlewared/plugins/disk_/retaste.py | import fcntl
import logging
import multiprocessing
import os
import re
from middlewared.schema import List, Str
from middlewared.service import Service, accepts, job, private
logger = logging.getLogger(__name__)
SD_PATTERN = re.compile(r"^sd[a-z]+$")
NVME_PATTERN = re.compile(r"^nvme\d+n\d+$")
def taste_it(disk, errors):
BLKRRPART = 0x125f # force reread partition table
fd = None
errors[disk] = []
try:
fd = os.open(disk, os.O_WRONLY)
except Exception as e:
errors[disk].append(str(e))
# can't open, no reason to continue
else:
try:
fcntl.ioctl(fd, BLKRRPART)
except Exception as e:
errors[disk].append(str(e))
finally:
if fd is not None:
os.close(fd)
def retaste_disks_impl(disks: set = None):
if disks is None:
disks = set()
with os.scandir('/dev') as sdir:
for i in sdir:
if SD_PATTERN.match(i.name) or NVME_PATTERN.match(i.name):
disks.add(i.path)
with multiprocessing.Manager() as m:
errors = m.dict()
with multiprocessing.Pool() as p:
# we use processes so that these operations are truly
# "parrallel" (side-step the GIL) since we have systems
# with 1k+ disks. Since this runs, potentially, on failover
# event we need to squeeze out every bit of perf we can get
p.starmap(taste_it, [(disk, errors) for disk in disks])
for disk, errors in filter(lambda x: len(x[1]) > 0, errors.items()):
logger.error('Failed to retaste %r with error(s): %s', disk, ', '.join(errors))
del errors
class DiskService(Service):
@private
def update_partition_table_quick(self, devnode):
"""
Call the BLKRRPATH ioctl to update the partition table on a single dev node
Used by 'wipe'
"""
errors = {}
taste_it(devnode, errors)
return errors
@accepts(List('disks', required=False, default=None, items=[Str('name', required=True)]))
@job(lock='disk_retaste', lock_queue_size=1)
def retaste(self, job, disks):
if disks:
# remove duplicates and prefix '/dev' (i.e. /dev/sda, /dev/sdb, etc)
disks = set(f'/dev/{i.removeprefix("/dev/")}' for i in disks)
job.set_progress(85, 'Retasting disks')
retaste_disks_impl(disks)
job.set_progress(95, 'Waiting for disk events to settle')
self.middleware.call_sync('device.settle_udev_events')
job.set_progress(100, 'Retasting disks done')
return 'SUCCESS'
| 2,636 | Python | .py | 67 | 31.492537 | 93 | 0.621176 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,906 | smartctl.py | truenas_middleware/src/middlewared/middlewared/plugins/disk_/smartctl.py | import asyncio
import subprocess
from middlewared.common.smart.smartctl import get_smartctl_args, smartctl, SMARTCTX
from middlewared.schema import accepts, Bool, Dict, List, Str
from middlewared.service import CallError, private, Service
from middlewared.utils.asyncio_ import asyncio_map
class DiskService(Service):
smartctl_args_for_disk = {}
smartctl_args_for_device_lock = asyncio.Lock()
@private
async def update_smartctl_args_for_disks(self):
await self.smartctl_args_for_device_lock.acquire()
async def update():
try:
disks = await self.middleware.call("disk.query", [["name", "!=", None]])
devices = await self.middleware.call("device.get_disks")
hardware = await self.middleware.call("truenas.is_ix_hardware")
context = SMARTCTX(devices=devices, enterprise_hardware=hardware, middleware=self.middleware)
self.smartctl_args_for_disk = dict(zip(
[disk["name"] for disk in disks],
await asyncio_map(
lambda disk: get_smartctl_args(context, disk["name"], disk["smartoptions"]), disks, 8
)
))
except Exception:
self.logger.error("update_smartctl_args_for_disks failed", exc_info=True)
finally:
self.smartctl_args_for_device_lock.release()
self.middleware.create_task(update())
@private
async def smartctl_args(self, disk):
async with self.smartctl_args_for_device_lock:
return self.smartctl_args_for_disk.get(disk)
@accepts(
Str('disk'),
List('args', items=[Str('arg')]),
Dict(
'options',
Bool('cache', default=True),
Bool('silent', default=False),
),
)
@private
async def smartctl(self, disk, args, options):
try:
if options['cache']:
smartctl_args = await self.middleware.call('disk.smartctl_args', disk)
else:
devices = await self.middleware.call('device.get_disks')
hardware = await self.middleware.call('truenas.is_ix_hardware')
context = SMARTCTX(devices=devices, enterprise_hardware=hardware, middleware=self.middleware)
if disks := await self.middleware.call('disk.query', [['name', '=', disk]]):
smartoptions = disks[0]['smartoptions']
else:
self.middleware.logger.warning("No database row found for disk %r", disk)
smartoptions = ''
smartctl_args = await get_smartctl_args(context, disk, smartoptions)
if smartctl_args is None:
raise CallError(f'S.M.A.R.T. is unavailable for disk {disk}')
cp = await smartctl(smartctl_args + args, check=False, stderr=subprocess.STDOUT,
encoding='utf8', errors='ignore')
if (cp.returncode & 0b11) != 0:
raise CallError(f'smartctl failed for disk {disk}:\n{cp.stdout}')
except CallError:
if options['silent']:
return None
raise
return cp.stdout
async def setup(middleware):
await middleware.call('disk.update_smartctl_args_for_disks')
| 3,363 | Python | .py | 70 | 35.9 | 109 | 0.597498 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,907 | format.py | truenas_middleware/src/middlewared/middlewared/plugins/disk_/format.py | import pathlib
import parted
from middlewared.service import CallError, private, Service
class DiskService(Service):
@private
def format(self, disk):
"""Format a data drive with a maximized data partition"""
sysfs = pathlib.Path(f'/sys/class/block/{disk}')
if not sysfs.exists():
raise CallError(f'Unable to retrieve disk details for {disk!r}')
is_dif = next(sysfs.glob('device/scsi_disk/*/protection_type'), None)
if is_dif is not None and is_dif.read_text().strip() != '0':
# 0 == disabled, > 0 enabled
raise CallError(f'Disk: {disk!r} is incorrectly formatted with Data Integrity Feature (DIF).')
dev = parted.getDevice(f'/dev/{disk}')
# it's important we remove this device from the global cache
# so that libparted probes the disk for the latest up-to-date
# information. This becomes _very_ important, for example,
# when we overprovision disk devices. If the disk is overprovisioned
# to a larger/smaller size, then libparted has possibility of
# referencing the old disk size. So depending on the direction of
# the resize operation, the `clobber()` operation can run off of
# the end of the disk and raise an IO failure. We actually saw this
# interally during testing
dev._Device__device.cache_remove()
for i in range(2):
if not dev.clobber():
# clobber() wipes partition label info from disk but during testing
# on an m40 HA system, the disk had to be clobber()'ed twice before
# fdisk -l wouldn't show any partitions. Only doing it once showed
# the following output
# Disk /dev/sda: 10.91 TiB, 12000138625024 bytes, 2929721344 sectors
# Disk model: HUH721212AL4200
# Units: sectors of 1 * 4096 = 4096 bytes
# Sector size (logical/physical): 4096 bytes / 4096 bytes
# I/O size (minimum/optimal): 4096 bytes / 4096 bytes
# Disklabel type: dos
# Disk identifier: 0x00000000
#
# Device Boot Start End Sectors Size Id Type
# /dev/sda1 1 2929721343 2929721343 10.9T ee GPT
raise CallError(f'Failed on attempt #{i} clearing partition labels for {disk!r}')
parted_disk = parted.freshDisk(dev, 'gpt')
regions = sorted(parted_disk.getFreeSpaceRegions(), key=lambda x: x.length)[-1]
geom = parted.Geometry(start=regions.start, end=regions.end, device=dev)
fs = parted.FileSystem(type='zfs', geometry=geom)
part = parted.Partition(disk=parted_disk, type=parted.PARTITION_NORMAL, fs=fs, geometry=geom)
part.name = 'data' # give a human readable name to the label
parted_disk.addPartition(part, constraint=dev.optimalAlignedConstraint)
parted_disk.commit()
if len(self.middleware.call_sync('disk.get_partitions_quick', disk, 10)) != len(parted_disk.partitions):
# In some rare cases udev does not re-read the partition table correctly; force it
self.middleware.call_sync('device.trigger_udev_events', f'/dev/{disk}')
self.middleware.call_sync('device.settle_udev_events')
| 3,347 | Python | .py | 54 | 50.814815 | 112 | 0.638246 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,908 | zfs_guid.py | truenas_middleware/src/middlewared/middlewared/plugins/disk_/zfs_guid.py | import bidict
from middlewared.service import private, Service
from middlewared.service_exception import MatchNotFound
class DiskService(Service):
@private
async def disk_by_zfs_guid(self, guid):
"""
This method returns a single disk entry with the specified
ZFS guid. The database however may contain multiple disks
with the same GUID differentiated by the `expiretime` key.
The `expiretime` key has the following special meaning depending
on value type:
`None` - disk is currently detected and in the system
`datetime` - disk was removed and will expire at the specified
time.
Since type is inconsistent for this value, it cannot be used
for ordering disks using builtin sorted() method in filter_list.
"""
disk = None
disks_with_zfs_guid = await self.middleware.call(
"disk.query",
[["zfs_guid", "=", guid]],
{"extra": {"include_expired": True}},
)
for entry in disks_with_zfs_guid:
if entry['expiretime'] is None:
disk = entry
break
if disk is None:
disk = entry
elif entry['expiretime'] > disk['expiretime']:
disk = entry
return disk
@private
async def sync_all_zfs_guid(self):
for pool in await self.middleware.call(
"zfs.pool.query",
[["name", "!=", await self.middleware.call("boot.pool_name")]],
):
try:
await self.sync_zfs_guid({
**pool,
"topology": await self.middleware.call("pool.transform_topology", pool["groups"])
})
except Exception:
self.logger.error("Error running sync_zfs_guid for pool %r", pool["name"])
@private
async def sync_zfs_guid(self, pool_id_or_pool):
if isinstance(pool_id_or_pool, dict):
pool = pool_id_or_pool
topology = pool_id_or_pool["topology"]
elif isinstance(pool_id_or_pool, str):
pool = await self.middleware.call("zfs.pool.query", [["name", "=", pool_id_or_pool]], {"get": True})
topology = await self.middleware.call("pool.transform_topology", pool["groups"])
else:
pool = await self.middleware.call("pool.get_instance", pool_id_or_pool)
topology = pool["topology"]
if topology is None:
return
disk_to_guid = bidict.bidict()
for vdev in await self.middleware.call("pool.flatten_topology", topology):
if vdev["type"] == "DISK":
if vdev["disk"] is not None:
disk_to_guid[vdev["disk"]] = vdev["guid"]
else:
self.logger.debug("Pool %r vdev %r disk is None", pool["name"], vdev["guid"])
events = set()
for disk in await self.middleware.call("disk.query", [], {"extra": {"include_expired": True}}):
guid = disk_to_guid.get(disk["devname"])
if guid is not None and guid != disk["zfs_guid"]:
if not disk["expiretime"]:
self.logger.debug(
"Setting disk %r (%r) zfs_guid %r",
disk["identifier"], disk["devname"], guid,
)
events.add(disk["identifier"])
await self.middleware.call(
"datastore.update", "storage.disk", disk["identifier"],
{"zfs_guid": guid}, {"prefix": "disk_", "send_events": False},
)
elif disk["zfs_guid"]:
devname = disk_to_guid.inv.get(disk["zfs_guid"])
if devname is not None and devname != disk["devname"]:
self.logger.debug(
"Removing disk %r (%r) zfs_guid %r as %r has it",
disk["identifier"], disk["devname"], disk["zfs_guid"], devname,
)
events.add(disk["identifier"])
await self.middleware.call(
"datastore.update", "storage.disk", disk["identifier"],
{"zfs_guid": None}, {"prefix": "disk_", "send_events": False},
)
if events:
disks = {i["identifier"]: i for i in await self.middleware.call("disk.query", [], {"prefix": "disk_"})}
for event in events:
if event in disks:
self.middleware.send_event("disk.query", "CHANGED", id=event, fields=disks[event])
async def zfs_events_hook(middleware, data):
if data["class"] == "sysevent.fs.zfs.config_sync":
try:
await middleware.call("disk.sync_zfs_guid", data["pool"])
except MatchNotFound:
pass
async def hook(middleware, pool):
await middleware.call("disk.sync_zfs_guid", pool)
async def setup(middleware):
middleware.register_hook("zfs.pool.events", zfs_events_hook)
middleware.register_hook("pool.post_create_or_update", hook)
| 5,137 | Python | .py | 108 | 34.518519 | 115 | 0.549521 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,909 | sed.py | truenas_middleware/src/middlewared/middlewared/plugins/disk_/sed.py | import re
from middlewared.service import Service, private
from middlewared.utils import run
RE_HDPARM_DRIVE_LOCKED = re.compile(r'Security.*\n\s*locked', re.DOTALL)
class DiskService(Service):
@private
async def unlock_ata_security(self, devname, _adv, password):
locked = unlocked = False
cp = await run('hdparm', '-I', devname, check=False)
if cp.returncode:
return locked, unlocked
output = cp.stdout.decode()
if RE_HDPARM_DRIVE_LOCKED.search(output):
locked = True
cmd = ['hdparm', '--user-master', _adv['sed_user'][0].lower(), '--security-unlock', password, devname]
cp = await run(cmd, check=False)
if cp.returncode == 0:
locked = False
unlocked = True
return locked, unlocked
| 840 | Python | .py | 20 | 33.2 | 114 | 0.62069 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,910 | availability.py | truenas_middleware/src/middlewared/middlewared/plugins/disk_/availability.py | from collections import defaultdict
from middlewared.service import accepts, private, Service
from middlewared.service_exception import ValidationErrors
from middlewared.schema import Bool, Dict, Str
from middlewared.utils.disks import dev_to_ident
class DiskService(Service):
@private
async def get_exported_disks(self, info, disks=None):
disks = set() if disks is None else disks
if isinstance(info, dict):
path = info.get('path')
if path and path.startswith('/dev/'):
path = path.removeprefix('/dev/')
if disk := await self.middleware.call('disk.label_to_disk', path):
disks.add(disk)
for key in info:
await self.get_exported_disks(info[key], disks)
elif isinstance(info, list):
for idx, entry in enumerate(info):
await self.get_exported_disks(info[idx], disks)
return disks
@private
async def details_impl(self, data):
# see `self.details` for arguments and their meaning
in_use_disks_imported = {}
for in_use_disk, info in (
await self.middleware.call('zpool.status', {'real_paths': True})
)['disks'].items():
in_use_disks_imported[in_use_disk] = info['pool_name']
in_use_disks_exported = {}
for i in await self.middleware.call('zfs.pool.find_import'):
for in_use_disk in await self.get_exported_disks(i['groups']):
in_use_disks_exported[in_use_disk] = i['name']
enc_info = dict()
for enc in await self.middleware.call('enclosure2.query'):
for slot, info in filter(lambda x: x[1], enc['elements']['Array Device Slot'].items()):
enc_info[info['dev']] = (int(slot), enc['id'])
used, unused = [], []
serial_to_disk = defaultdict(list)
sys_disks = await self.middleware.call('device.get_disks')
for dname, i in sys_disks.items():
if not i['size']:
# seen on an internal system during QA. The disk had actually been spun down
# by OS because it had so many errors so the size was an empty string in our db
# SMART data reported the following for the disk: "device is NOT READY (e.g. spun down, busy)"
continue
i['identifier'] = dev_to_ident(dname, sys_disks)
i['enclosure_slot'] = enc_info.get(dname, ())
serial_to_disk[(i['serial'], i['lunid'])].append(i)
# add enclosure information
i['enclosure'] = {}
if enc := i.pop('enclosure_slot'):
i['enclosure'].update({'drive_bay_number': enc[0], 'id': enc[1]})
# query partitions for the disk(s) if requested
i['partitions'] = []
if data['join_partitions']:
i['partitions'] = await self.middleware.call('disk.list_partitions', i['name'])
# TODO: UI needs to remove dependency on `devname` since `name` is sufficient
i['devname'] = i['name']
try:
i['size'] = int(i['size'])
except ValueError:
i['size'] = None
# disk is "technically" not "in use" but the zpool is exported
# and can be imported so the disk would be "in use" if the zpool
# was imported so we'll mark this disk specially so that end-user
# can be warned appropriately
i['exported_zpool'] = in_use_disks_exported.get(dname)
# disk is in use by a zpool that is currently imported
i['imported_zpool'] = in_use_disks_imported.get(dname)
if any((
i['imported_zpool'] is not None,
i['exported_zpool'] is not None,
)):
used.append(i)
else:
unused.append(i)
for i in used + unused:
# need to add a `duplicate_serial` key so that webUI can give an appropriate warning to end-user
# about disks with duplicate serial numbers (I'm looking at you USB "disks")
i['duplicate_serial'] = [
j['name'] for j in serial_to_disk[(i['serial'], i['lunid'])] if j['name'] != i['name']
]
return {'used': used, 'unused': unused}
@accepts(Bool('join_partitions', default=False), roles=['REPORTING_READ'])
async def get_unused(self, join_partitions):
"""
Return disks that are NOT in use by any zpool that is currently imported OR exported.
`join_partitions`: Bool, when True will return all partitions currently written to disk
NOTE: this is an expensive operation
"""
return (await self.details_impl({'join_partitions': join_partitions}))['unused']
@accepts(Bool('join_partitions', default=False), roles=['REPORTING_READ'])
async def get_used(self, join_partitions):
"""
Return disks that are in use by any zpool that is currently imported. It will
also return disks that are in use by any zpool that is exported.
`join_partitions`: Bool, when True will return all partitions currently written to disk
NOTE: this is an expensive operation
"""
return (await self.details_impl({'join_partitions': join_partitions}))['used']
@accepts(
Dict(
'disk_details_args',
Bool('join_partitions', default=False),
Str('type', enum=['USED', 'UNUSED', 'BOTH'], default='BOTH'),
),
roles=['REPORTING_READ'],
)
async def details(self, data):
"""Return detailed information for all disks on the system.
`data`: dict
`join_partitions`: Bool, when True will return all partitions
currently written to disk (NOTE: this is expensive)
`type`: str, what type of disk information will be returned.
If `USED`, only disks that are IN USE will be returned.
If `UNUSED`, only disks that are NOT IN USE are returned.
If `BOTH`, used and unused disks will be returned.
"""
results = await self.details_impl(data)
if data['type'] == 'BOTH':
return results
else:
return results[data['type'].lower()]
@private
async def get_reserved(self):
return await self.middleware.call('boot.get_disks') + await self.middleware.call('pool.get_disks')
@private
async def check_disks_availability(self, disks, allow_duplicate_serials):
"""
Makes sure the disks are present in the system and not reserved
by anything else (boot, pool, iscsi, etc).
Returns:
verrors, dict - validation errors (if any) and disk.query for all disks
"""
verrors = ValidationErrors()
disks_cache = dict(map(lambda x: (x['devname'], x), await self.middleware.call('disk.query')))
disks_set = set(disks)
disks_not_in_cache = disks_set - set(disks_cache.keys())
if disks_not_in_cache:
verrors.add(
'topology',
f'The following disks were not found in system: {"," .join(disks_not_in_cache)}.'
)
disks_reserved = await self.middleware.call('disk.get_reserved')
already_used = disks_set - (disks_set - set(disks_reserved))
if already_used:
verrors.add(
'topology',
f'The following disks are already in use: {"," .join(already_used)}.'
)
if not allow_duplicate_serials and not verrors:
serial_to_disk = defaultdict(set)
for disk in disks:
serial_to_disk[(disks_cache[disk]['serial'], disks_cache[disk]['lunid'])].add(disk)
for reserved_disk in disks_reserved:
reserved_disk_cache = disks_cache.get(reserved_disk)
if not reserved_disk_cache:
continue
serial_to_disk[(reserved_disk_cache['serial'], reserved_disk_cache['lunid'])].add(reserved_disk)
if duplicate_serials := {serial for serial, serial_disks in serial_to_disk.items()
if len(serial_disks) > 1}:
error = ', '.join(map(lambda serial: f'{serial[0]!r} ({", ".join(sorted(serial_to_disk[serial]))})',
duplicate_serials))
verrors.add('topology', f'Disks have duplicate serial numbers: {error}.')
return verrors
| 8,588 | Python | .py | 166 | 39.789157 | 116 | 0.586433 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,911 | identify.py | truenas_middleware/src/middlewared/middlewared/plugins/disk_/identify.py | import re
from middlewared.schema import accepts, Dict, Str
from middlewared.service import Service, private
from middlewared.utils.disks import get_disks_with_identifiers
class DiskService(Service):
RE_IDENTIFIER = re.compile(r'^\{(?P<type>.+?)\}(?P<value>.+)$')
@private
@accepts(Str('name'), Dict('disks', additional_attrs=True))
def device_to_identifier(self, name, disks):
"""
Given a device `name` (e.g. sda) returns an unique identifier string
for this device.
This identifier is in the form of {type}string, "type" can be one of
the following:
- serial_lunid - for disk serial concatenated with the lunid
- serial - disk serial
- uuid - uuid of a ZFS GPT partition
- label - label name from geom label
- devicename - name of the device if any other could not be used/found
`disks` is value returned by `device.get_disks`. This can be passed to avoid collecting system
data again if the consumer already has it.
Returns:
str - identifier
"""
return get_disks_with_identifiers([name], disks).get(name, '')
@private
async def identifier_to_device(self, ident, disks):
if not ident:
return None
search = self.RE_IDENTIFIER.search(ident)
if not search:
return None
tp = search.group('type')
value = search.group('value')
mapping = {'uuid': 'uuid', 'devicename': 'name', 'serial_lunid': 'serial_lunid', 'serial': 'serial'}
if tp not in mapping:
return None
elif tp == 'uuid':
partition = await self.middleware.call('disk.list_all_partitions', [['partition_uuid', '=', value]])
if partition:
return partition[0]['disk']
else:
disk = next(
(b for b in (
disks or await self.middleware.call('device.get_disks')
).values() if b[mapping[tp]] == value), None
)
return disk['name'] if disk else None
| 2,106 | Python | .py | 48 | 34.3125 | 112 | 0.602245 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,912 | smart.py | truenas_middleware/src/middlewared/middlewared/plugins/disk_/smart.py | from middlewared.service import private, Service
class DiskService(Service):
@private
async def toggle_smart_off(self, name):
await self.middleware.call('disk.smartctl', name, ['--smart=off'], {'silent': True})
@private
async def toggle_smart_on(self, name):
await self.middleware.call('disk.smartctl', name, ['--smart=on'], {'silent': True})
| 378 | Python | .py | 8 | 41.875 | 92 | 0.683924 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,913 | info.py | truenas_middleware/src/middlewared/middlewared/plugins/disk_/info.py | from middlewared.schema import accepts, Str
from middlewared.service import filterable, private, Service
from middlewared.utils import filter_list
class DiskService(Service):
@private
@filterable
async def list_all_partitions(self, filters, options):
"""
Returns list of all partitions present in the system
"""
disks = await self.middleware.call('device.get_disks')
parts = []
for disk in disks:
parts.extend(await self.middleware.call('disk.list_partitions', disk))
return filter_list(parts, filters, options)
@private
@accepts(Str('disk'))
async def get_partition(self, disk):
# Will retrieve zfs partition on disk if any
return await self.get_partition_with_uuids(disk, [await self.middleware.call('disk.get_zfs_part_type')])
@private
async def get_partition_with_uuids(self, disk, uuids):
part = next(
(p for p in await self.middleware.call('disk.list_partitions', disk) if p['partition_type'] in uuids),
None
)
return part
@private
async def get_partition_uuid_from_name(self, part_type_name):
mapping = {
'freebsd-zfs': '516e7cba-6ecf-11d6-8ff8-00022d09712b',
'freebsd-swap': '516e7cb5-6ecf-11d6-8ff8-00022d09712b',
'freebsd-boot': '83bd6b9d-7f41-11dc-be0b-001560b84f0f',
}
return mapping.get(part_type_name)
| 1,454 | Python | .py | 35 | 33.742857 | 114 | 0.660297 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,914 | smart_attributes.py | truenas_middleware/src/middlewared/middlewared/plugins/disk_/smart_attributes.py | import json
import re
from middlewared.schema import Bool, Dict, Int, List, returns, Str
from middlewared.service import accepts, CallError, private, Service
RE_SATA_DOM_LIFETIME = re.compile(r'^164\s+.*\s+([0-9]+)$', re.M)
class DiskService(Service):
@accepts(Str('name'))
@returns(List('smart_attributes', items=[Dict(
'smart_attribute',
Int('id', required=True),
Int('value', required=True),
Int('worst', required=True),
Int('thresh', required=True),
Str('name', required=True),
Str('when_failed', required=True),
Dict(
'flags',
Int('value', required=True),
Str('string', required=True),
Bool('prefailure', required=True),
Bool('updated_online', required=True),
Bool('performance', required=True),
Bool('error_rate', required=True),
Bool('event_count', required=True),
Bool('auto_keep', required=True),
),
Dict(
'raw',
Int('value', required=True),
Str('string', required=True),
)
)]))
async def smart_attributes(self, name):
"""
Returns S.M.A.R.T. attributes values for specified disk name.
"""
output = json.loads(await self.middleware.call('disk.smartctl', name, ['-a', '--json=c']))
if 'ata_smart_attributes' in output:
return output['ata_smart_attributes']['table']
if 'nvme_smart_health_information_log' in output:
return output['nvme_smart_health_information_log']
if 'scsi_error_counter_log' in output and 'scsi_grown_defect_list' in output:
return {'scsi_error_counter_log': output['scsi_error_counter_log'], 'scsi_grown_defect_list': output['scsi_grown_defect_list']}
raise CallError('Only ATA/SCSI/NVMe devices support S.M.A.R.T. attributes')
@private
async def sata_dom_lifetime_left(self, name):
output = await self.middleware.call('disk.smartctl', name, ['-A'], {'silent': True})
if output is None:
return None
m = RE_SATA_DOM_LIFETIME.search(output)
if m:
aec = int(m.group(1))
return max(1.0 - aec / 3000, 0)
| 2,259 | Python | .py | 53 | 33.471698 | 139 | 0.595086 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,915 | resize.py | truenas_middleware/src/middlewared/middlewared/plugins/disk_/resize.py | import os
import asyncio
import subprocess
from middlewared.utils import run, UnexpectedFailure
from middlewared.service import Service, CallError, private, accepts, returns, job, ValidationErrors
from middlewared.schema import Dict, Str, Int, List, Bool
class DiskService(Service):
@private
async def resize_impl(self, disk):
cmd = ['disk_resize', disk['name']]
err = f'DISK: {disk["name"]!r}'
if disk['size']:
err += f' SIZE: {disk["size"]} gigabytes'
cmd.append(f'{disk["size"]}G')
try:
cp = await run(cmd, stderr=subprocess.STDOUT, encoding='utf-8')
except Exception as e:
err += f' ERROR: {str(e)}'
raise UnexpectedFailure(err)
else:
if cp.returncode != 0:
err += f' ERROR: {cp.stdout}'
raise OSError(cp.returncode, os.strerror(cp.returncode), err)
@accepts(
List('disks', required=True, items=[
Dict(
Str('name', required=True),
Int('size', required=False, default=None),
)
]),
Bool('sync', default=True),
Bool('raise_error', default=False)
)
@returns()
@job(lock='disk_resize')
async def resize(self, job, data, sync, raise_error):
"""
Takes a list of disks. Each list entry is a dict that requires a key, value pair.
`name`: string (the name of the disk (i.e. sda))
`size`: integer (given in gigabytes)
`sync`: boolean, when true (default) will synchronize the new size of the disk(s)
with the database cache.
`raise_error`: boolean
when true, will raise a `CallError` if any failures occur
when false, will will log the errors if any failures occur
NOTE:
if `size` is given, the disk with `name` will be resized
to `size` (overprovision).
if `size` is not given, the disk with `name` will be resized
to it's original size (unoverprovision).
"""
verrors = ValidationErrors()
disks = []
for disk in data:
if disk['name'] in disks:
verrors.add('disk.resize', f'Disk {disk["name"]!r} specified more than once.')
else:
disks.append(disk['name'])
verrors.check()
exceptions = await asyncio.gather(*[self.resize_impl(disk) for disk in data], return_exceptions=True)
failures = []
success = []
for disk, exc in zip(data, exceptions):
if isinstance(exc, Exception):
failures.append(str(exc))
else:
self.logger.info('Successfully resized %r', disk['name'])
success.append(disk['name'])
if sync and success:
if len(success) > 1:
await (await self.middleware.call('disk.sync_all')).wait()
else:
await self.middleware.call('disk.sync', success[0])
if failures:
err = f'Failure resizing: {", ".join(failures)}'
if raise_error:
raise CallError(err)
else:
self.logger.error(err)
| 3,239 | Python | .py | 79 | 30.291139 | 109 | 0.566847 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,916 | disk_info.py | truenas_middleware/src/middlewared/middlewared/plugins/disk_/disk_info.py | import collections
import contextlib
import glob
import os
import pathlib
import pyudev
from middlewared.service import CallError, private, Service
# The basic unit of a block I/O is a sector. A sector is
# 512 (2 ** 9) bytes. In sysfs, the files (sector_t type)
# `<disk>/<part>/start` and `<disk>/<part>/size` are
# shown as a multiple of 512 bytes. Most user-space
# tools (fdisk, parted, sfdisk, etc) treat the partition
# offsets in sectors.
BYTES_512 = 512
PART_INFO_FIELDS = (
# queue/logical_block_size (reported as a multiple of BYTES_512)
'lbs',
# starting offset of partition in sectors
'start_sector',
# ending offset of partition in sectors
'end_sector',
# total partition size in sectors
'total_sectors',
# starting offset of partition in bytes
'start_byte',
# ending offset of partition in bytes
'end_byte',
# total size of partition in bytes
'total_bytes',
)
PART_INFO = collections.namedtuple('part_info', PART_INFO_FIELDS, defaults=(0,) * len(PART_INFO_FIELDS))
def get_partition_size_info(disk_name, s_offset, s_size):
"""Kernel sysfs reports most disk files related to "size" in 512 bytes.
To properly calculate the starting SECTOR of partitions, you must
look at logical_block_size (again, reported in 512 bytes) and
do some calculations. It is _very_ important to do this properly
since almost all userspace tools that format disks expect partition
positions to be in sectors."""
lbs = 0
with contextlib.suppress(FileNotFoundError, ValueError):
with open(f'/sys/block/{disk_name}/queue/logical_block_size') as f:
lbs = int(f.read().strip())
if not lbs:
# this should never happen
return PART_INFO()
# important when dealing with 4kn drives
divisor = lbs // BYTES_512
# sectors
start_sector = s_offset // divisor
total_sectors = s_size // divisor
end_sector = total_sectors + start_sector - 1
# bytes
start_byte = start_sector * lbs
end_byte = (end_sector * lbs) + lbs - 1
total_bytes = total_sectors * lbs
return PART_INFO(*(
lbs, start_sector, end_sector, total_sectors,
start_byte, end_byte, total_bytes,
))
class DiskService(Service):
@private
def get_dev_size(self, device):
try:
dev = pyudev.Devices.from_name(pyudev.Context(), 'block', device)
except pyudev.DeviceNotFoundByNameError:
return
else:
if dev.get('DEVTYPE') not in ('disk', 'partition'):
return
return dev.attributes.asint('size') * BYTES_512
@private
def list_partitions(self, disk):
parts = []
try:
bd = pyudev.Devices.from_name(pyudev.Context(), 'block', disk)
except pyudev.DeviceNotFoundByNameError:
return parts
if not bd.children:
return parts
req_keys = ('ID_PART_ENTRY_' + i for i in ('TYPE', 'UUID', 'NUMBER', 'SIZE'))
for p in filter(lambda p: all(p.get(k) for k in req_keys), bd.children):
part_name = self.get_partition_for_disk(disk, p['ID_PART_ENTRY_NUMBER'])
pinfo = get_partition_size_info(disk, int(p['ID_PART_ENTRY_OFFSET']), int(p['ID_PART_ENTRY_SIZE']))
part = {
'name': part_name,
'partition_type': p['ID_PART_ENTRY_TYPE'],
'partition_number': int(p['ID_PART_ENTRY_NUMBER']),
'partition_uuid': p['ID_PART_ENTRY_UUID'],
'disk': disk,
'start_sector': pinfo.start_sector,
'start': pinfo.start_byte,
'end_sector': pinfo.end_sector,
'end': pinfo.end_byte,
'size': pinfo.total_bytes,
'id': part_name,
'path': os.path.join('/dev', part_name),
'encrypted_provider': None,
}
encrypted_provider = glob.glob(f'/sys/block/dm-*/slaves/{part["name"]}')
if encrypted_provider:
part['encrypted_provider'] = os.path.join('/dev', encrypted_provider[0].split('/')[3])
parts.append(part)
return parts
@private
def gptid_from_part_type(self, disk, part_type):
try:
block_device = pyudev.Devices.from_name(pyudev.Context(), 'block', disk)
except pyudev.DeviceNotFoundByNameError:
raise CallError(f'{disk} not found')
if not block_device.children:
raise CallError(f'{disk} has no partitions')
part = next(
(p['ID_PART_ENTRY_UUID'] for p in block_device.children if all(
p.get(k) for k in ('ID_PART_ENTRY_UUID', 'ID_PART_ENTRY_TYPE')
) and p['ID_PART_ENTRY_TYPE'] == part_type), None
)
if not part:
raise CallError(f'Partition type {part_type} not found on {disk}')
return f'disk/by-partuuid/{part}'
@private
async def get_efi_part_type(self):
return 'c12a7328-f81f-11d2-ba4b-00a0c93ec93b'
@private
async def get_zfs_part_type(self):
return '6a898cc3-1dd2-11b2-99a6-080020736631'
@private
def label_to_dev(self, label):
label_path = os.path.join('/dev', label)
if not os.path.exists(label_path):
return None
dev = os.path.basename(os.path.realpath(label_path))
if not pathlib.Path(os.path.join('/dev/', dev)).is_block_device():
return None
return dev
@private
def label_to_disk(self, label):
partition_or_disk = self.label_to_dev(label)
if partition_or_disk is None:
return None
if os.path.exists(os.path.join('/sys/class/block', partition_or_disk, 'partition')):
return self.get_disk_from_partition(partition_or_disk)
else:
return partition_or_disk
@private
def get_disk_from_partition(self, part_name):
if not os.path.exists(os.path.join('/dev', part_name)):
return None
try:
with open(os.path.join('/sys/class/block', part_name, 'partition'), 'r') as f:
part_num = f.read().strip()
except FileNotFoundError:
return part_name
else:
if part_name.startswith(('nvme', 'pmem')):
# nvme/pmem partitions would be like nvmen1p1 where disk is nvmen1
part_num = f'p{part_num}'
return part_name.rsplit(part_num, 1)[0].strip()
@private
def get_partition_for_disk(self, disk, partition):
if disk.startswith(('nvme', 'pmem')):
# FIXME: This is a hack for nvme/pmem disks, however let's please come up with a better way
# to link disks with their partitions
return f'{disk}p{partition}'
else:
return f'{disk}{partition}'
| 6,866 | Python | .py | 165 | 32.921212 | 111 | 0.61241 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,917 | gmail.py | truenas_middleware/src/middlewared/middlewared/plugins/mail_/gmail.py | import base64
from threading import Lock
import googleapiclient
from googleapiclient.discovery import build
from google.oauth2.credentials import Credentials
import google_auth_httplib2
import httplib2
from middlewared.service import private, Service
class GmailService:
def __init__(self, config):
self.config = config
self._lock = Lock()
self._service = None
def __eq__(self, other):
return isinstance(other, GmailService) and self.config["oauth"] == other.config["oauth"]
@property
def service(self):
with self._lock:
if self._service is None:
credentials = Credentials.from_authorized_user_info(self.config["oauth"])
# `google-api-python-client` is not thread-safe which can lead to interpreter segfaults.
# We fix this by providing every thread its own `httplib2.Http()` object.
# See https://googleapis.github.io/google-api-python-client/docs/thread_safety.html
self._service = build(
"gmail", "v1",
credentials=credentials,
requestBuilder=lambda http, *args, **kwargs: googleapiclient.http.HttpRequest(
google_auth_httplib2.AuthorizedHttp(credentials, http=httplib2.Http()),
*args, **kwargs,
),
)
return self._service
def close(self):
with self._lock:
if self._service is not None:
self._service.close()
self._service = None
class MailService(Service):
gmail_service = None
@private
def gmail_initialize(self):
config = self.middleware.call_sync("mail.config")
if self.gmail_service is not None:
self.gmail_service.close()
self.gmail_service = self.middleware.call_sync("mail.gmail_build_service", config)
@private
def gmail_build_service(self, config):
if config["oauth"]:
return GmailService(config)
return None
@private
def gmail_send(self, message, config, _retry_broken_pipe=True):
gmail_service = self.middleware.call_sync("mail.gmail_build_service", config)
if gmail_service == self.gmail_service:
# Use existing gmail service if credentials match to avoid extra access token refresh
gmail_service = self.gmail_service
else:
_retry_broken_pipe = False
if gmail_service is None:
raise RuntimeError("GMail service is not initialized")
try:
gmail_service.service.users().messages().send(userId="me", body={
"raw": base64.urlsafe_b64encode(message.as_string().encode("ascii")).decode("ascii"),
}).execute()
except BrokenPipeError:
if not _retry_broken_pipe:
raise
self.middleware.logger.debug("BrokenPipeError in gmail_send, retrying")
if self.gmail_service is not None:
self.gmail_service.close()
return self.gmail_send(message, config, _retry_broken_pipe=False)
if gmail_service != self.gmail_service:
gmail_service.close()
async def setup(middleware):
await middleware.call("mail.gmail_initialize")
| 3,326 | Python | .py | 75 | 33.826667 | 104 | 0.62651 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,918 | kernel.py | truenas_middleware/src/middlewared/middlewared/plugins/dlm_/kernel.py | import binascii
import contextlib
import ctypes
import glob
import ipaddress
import os
import os.path
import pathlib
import socket
import subprocess
from middlewared.service import Service
class sockaddr_in(ctypes.Structure):
_fields_ = [('sa_family', ctypes.c_ushort), # sin_family
('sin_port', ctypes.c_ushort),
('sin_addr', ctypes.c_byte * 4),
('__pad', ctypes.c_byte * 8)] # struct sockaddr_in is 16 bytes
def to_sockaddr(address, port=None):
addr_obj = ipaddress.ip_address(address)
if addr_obj.version == 4:
addr = sockaddr_in()
addr.sa_family = ctypes.c_ushort(socket.AF_INET)
if port:
addr.sin_port = ctypes.c_ushort(socket.htons(port))
if address:
bytes_ = [int(i) for i in address.split('.')]
addr.sin_addr = (ctypes.c_byte * 4)(*bytes_)
else:
raise NotImplementedError('IPv6 not implemented')
return addr
class KernelDistributedLockManagerService(Service):
"""
Simple synchronous interface with the kernel dlm.
"""
class Config:
private = True
namespace = 'dlm.kernel'
SYSFS_DIR = '/sys/kernel/dlm'
CLUSTER_DIR = '/sys/kernel/config/dlm/cluster'
SPACES_DIR = CLUSTER_DIR + '/spaces'
COMMS_DIR = CLUSTER_DIR + '/comms'
CLUSTER_NAME = "HA"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.stopped = {}
def load_kernel_module(self, name="HA"):
if not os.path.isdir(KernelDistributedLockManagerService.SYSFS_DIR):
self.logger.info('Loading kernel dlm')
try:
subprocess.run(["modprobe", "dlm"])
except subprocess.CalledProcessError as e:
self.logger.error('Failed to load dlm kernel module. Error %r', e)
for d in (KernelDistributedLockManagerService.CLUSTER_DIR,
KernelDistributedLockManagerService.SPACES_DIR,
KernelDistributedLockManagerService.COMMS_DIR):
with contextlib.suppress(FileExistsError):
os.mkdir(d)
if d == KernelDistributedLockManagerService.CLUSTER_DIR:
with open(f'{d}/cluster_name', 'w') as f:
f.write(name)
def comms_add_node(self, nodeid, addr, local, port=0, mark=None):
# Create comms directory for this node if necessary
node_path = os.path.join(KernelDistributedLockManagerService.COMMS_DIR, str(nodeid))
with contextlib.suppress(FileExistsError):
os.mkdir(node_path)
# Set the nodeid
with open(os.path.join(node_path, 'nodeid'), 'w') as f:
f.write(str(nodeid))
# Set the address
sockbytes = bytes(to_sockaddr(addr, port))
data = sockbytes + bytes(128 - len(sockbytes))
with open(os.path.join(node_path, 'addr'), 'wb') as f:
f.write(data)
# Set skb mark.
# Added to kernel 5.9 in a5b7ab6352bf ("fs: dlm: set skb mark for listen socket")
if mark is not None:
with open(os.path.join(node_path, 'mark'), 'w') as f:
f.write(str(mark))
# Finally set whether local or not
with open(os.path.join(node_path, 'local'), 'w') as f:
f.write('1' if local else '0')
def comms_remove_node(self, nodeid):
node_path = os.path.join(KernelDistributedLockManagerService.COMMS_DIR, str(nodeid))
with contextlib.suppress(FileNotFoundError):
os.rmdir(node_path)
def comms_node_ready(self, nodeid):
p = pathlib.Path(KernelDistributedLockManagerService.COMMS_DIR, str(nodeid))
return p.is_dir()
def set_sysfs(self, section, attribute, value):
with open(os.path.join(KernelDistributedLockManagerService.SYSFS_DIR, section, attribute), 'w') as f:
f.write(str(value))
def set_sysfs_control(self, lockspace_name, value):
self.set_sysfs(lockspace_name, 'control', value)
def set_sysfs_event_done(self, lockspace_name, value):
self.logger.debug('Event done lockspace %s value %s', lockspace_name, value)
self.set_sysfs(lockspace_name, 'event_done', value)
def set_sysfs_id(self, lockspace_name, value):
self.set_sysfs(lockspace_name, 'id', value)
def set_sysfs_nodir(self, lockspace_name, value):
self.set_sysfs(lockspace_name, 'nodir', value)
def lockspace_set_global_id(self, lockspace_name):
self.logger.debug('Setting global id for lockspace %s', lockspace_name)
self.set_sysfs_id(lockspace_name, binascii.crc32(f'dlm:ls:{lockspace_name}\00'.encode('utf-8')))
def lockspace_present(self, lockspace_name):
return os.path.isdir(os.path.join(KernelDistributedLockManagerService.SYSFS_DIR, lockspace_name))
def lockspace_mark_stopped(self, lockspace_name):
self.stopped[lockspace_name] = True
def lockspace_is_stopped(self, lockspace_name):
return self.stopped.get(lockspace_name, False)
def lockspace_stop(self, lockspace_name):
if not self.stopped.get(lockspace_name, False):
self.set_sysfs_control(lockspace_name, 0)
self.stopped[lockspace_name] = True
self.logger.debug('Stopped lockspace %s', lockspace_name)
return True
else:
return False
def lockspace_start(self, lockspace_name):
if self.stopped.get(lockspace_name, False):
self.set_sysfs_control(lockspace_name, 1)
self.stopped[lockspace_name] = False
self.logger.debug('Started lockspace %s', lockspace_name)
return True
else:
return False
def lockspace_add_node(self, lockspace_name, nodeid, weight=None):
"""
Add the specified node to the lockspace
"""
self.logger.debug('Adding node %s to lockspace %s', nodeid, lockspace_name)
spaces_path = os.path.join(KernelDistributedLockManagerService.SPACES_DIR, lockspace_name)
with contextlib.suppress(FileExistsError):
os.mkdir(spaces_path)
# Check to see if we already have the directory, and remove it if so
# so dlm-kernel can notice they've left and rejoined.
node_path = os.path.join(spaces_path, 'nodes', '%d' % nodeid)
with contextlib.suppress(FileNotFoundError):
os.rmdir(node_path)
with contextlib.suppress(FileExistsError):
os.mkdir(node_path)
with open(os.path.join(node_path, 'nodeid'), 'w') as f:
f.write(str(nodeid))
if weight is not None:
with open(os.path.join(node_path, 'weight'), 'w') as f:
f.write(str(weight))
def lockspace_remove_node(self, lockspace_name, nodeid):
"""
Remove the specified nodeid from the lockspace.
"""
self.logger.debug('Removing node %s from lockspace %s', nodeid, lockspace_name)
node_path = os.path.join(KernelDistributedLockManagerService.SPACES_DIR, lockspace_name, 'nodes', '%d' % nodeid)
with contextlib.suppress(FileNotFoundError):
# Have verified that this directory under /sys will NOT fail with ENOTEMPTY
os.rmdir(node_path)
def lockspace_leave(self, lockspace_name):
"""
Current node is leaving the lockspace.
Remove all nodes and delete the lockspace.
"""
self.logger.debug('Leaving lockspace %s', lockspace_name)
spaces_path = os.path.join(KernelDistributedLockManagerService.SPACES_DIR, lockspace_name)
with contextlib.suppress(FileNotFoundError):
for d in glob.glob(os.path.join(spaces_path, 'nodes', '*')):
os.rmdir(d)
os.rmdir(spaces_path)
if lockspace_name in self.stopped:
del self.stopped[lockspace_name]
def destroy(self):
with contextlib.suppress(FileNotFoundError):
for dirname in glob.glob(os.path.join(KernelDistributedLockManagerService.COMMS_DIR, '*')):
os.rmdir(dirname)
for dirname in glob.glob(os.path.join(KernelDistributedLockManagerService.SPACES_DIR, '*')):
os.rmdir(dirname)
os.rmdir(KernelDistributedLockManagerService.CLUSTER_DIR)
def node_lockspaces(self, nodeid):
"""
Return an iterator that will yield the names of the lockspaces that contain
the specified nodeid.
"""
p = pathlib.Path(KernelDistributedLockManagerService.SPACES_DIR)
for lsnp in p.glob(f'*/nodes/{nodeid}'):
yield lsnp.parts[-3]
| 8,728 | Python | .py | 181 | 38.320442 | 120 | 0.636866 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,919 | sync.py | truenas_middleware/src/middlewared/middlewared/plugins/kmip/sync.py | # Copyright (c) - iXsystems Inc.
#
# Licensed under the terms of the TrueNAS Enterprise License Agreement
# See the file LICENSE.IX for complete terms and conditions
from middlewared.schema import Bool, returns
from middlewared.service import accepts, CallError, job, periodic, private, Service
from .connection import KMIPServerMixin
class KMIPService(Service, KMIPServerMixin):
@private
def connection_config(self, data=None):
config = self.middleware.call_sync('kmip.config')
config.update(data or {})
cert = self.middleware.call_sync('certificate.query', [['id', '=', config['certificate']]])
ca = self.middleware.call_sync('certificateauthority.query', [['id', '=', config['certificate_authority']]])
if not cert or not ca:
raise CallError('Certificate/CA not setup correctly')
return {
**config, 'cert': cert[0]['certificate_path'],
'cert_key': cert[0]['privatekey_path'], 'ca': ca[0]['certificate_path']
}
@private
def test_connection(self, data=None, raise_alert=False):
try:
result = self._test_connection(self.connection_config(data))
except CallError as e:
result = {'error': True, 'exception': str(e)}
if result['error']:
if raise_alert:
config = self.middleware.call_sync('kmip.config')
self.middleware.call_sync(
'alert.oneshot_create', 'KMIPConnectionFailed',
{'server': config['server'], 'error': result['exception']}
)
return False
else:
return True
@accepts(roles=['KMIP_READ'])
@returns(Bool('pending_kmip_sync'))
async def kmip_sync_pending(self):
"""
Returns true or false based on if there are keys which are to be synced from local database to remote KMIP
server or vice versa.
"""
return await self.middleware.call('kmip.zfs_keys_pending_sync') or await self.middleware.call(
'kmip.sed_keys_pending_sync'
)
@periodic(interval=86400)
@accepts(roles=['KMIP_WRITE'])
@returns()
async def sync_keys(self):
"""
Sync ZFS/SED keys between KMIP Server and TN database.
"""
if not await self.middleware.call('kmip.kmip_sync_pending') or \
not await self.middleware.call('failover.is_single_master_node'):
return
await self.middleware.call('kmip.sync_zfs_keys')
await self.middleware.call('kmip.sync_sed_keys')
@accepts(roles=['KMIP_WRITE'])
@returns()
async def clear_sync_pending_keys(self):
"""
Clear all keys which are pending to be synced between KMIP server and TN database.
For ZFS/SED keys, we remove the UID from local database with which we are able to retrieve ZFS/SED keys.
It should be used with caution.
"""
config = await self.middleware.call('kmip.config')
clear = not config['enabled']
if clear or not config['manage_zfs_keys']:
await self.middleware.call('kmip.clear_sync_pending_zfs_keys')
if clear or not config['manage_sed_disks']:
await self.middleware.call('kmip.clear_sync_pending_sed_keys')
@private
def delete_kmip_secret_data(self, uid):
with self._connection(self.connection_config()) as conn:
return self._revoke_and_destroy_key(uid, conn, self.middleware.logger)
@private
@job(lock='initialize_kmip_keys')
async def initialize_keys(self, job):
kmip_config = await self.middleware.call('kmip.config')
if kmip_config['enabled'] and await self.middleware.call('failover.is_single_master_node'):
connection_success = await self.middleware.call(
'kmip.test_connection', None, kmip_config['manage_zfs_keys'] or kmip_config['manage_sed_disks']
)
if kmip_config['manage_zfs_keys']:
await self.middleware.call('kmip.initialize_zfs_keys', connection_success)
if kmip_config['manage_sed_disks']:
await self.middleware.call('kmip.initialize_sed_keys', connection_success)
@private
async def kmip_memory_keys(self):
return {
'zfs': await self.middleware.call('kmip.retrieve_zfs_keys'),
'sed': await self.middleware.call('kmip.sed_keys'),
}
@private
async def update_memory_keys(self, data):
for key, method in filter(
lambda k: k[0] in data, (
('zfs', 'update_zfs_keys'),
('sed', 'update_sed_keys'),
)
):
await self.middleware.call(f'kmip.{method}', data[key])
| 4,758 | Python | .py | 103 | 36.786408 | 116 | 0.626589 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,920 | zfs_keys.py | truenas_middleware/src/middlewared/middlewared/plugins/kmip/zfs_keys.py | # Copyright (c) - iXsystems Inc.
#
# Licensed under the terms of the TrueNAS Enterprise License Agreement
# See the file LICENSE.IX for complete terms and conditions
from middlewared.service import job, private, Service
from .connection import KMIPServerMixin
class KMIPService(Service, KMIPServerMixin):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.zfs_keys = {}
@private
async def zfs_keys_pending_sync(self):
config = await self.middleware.call('kmip.config')
for ds in await self.middleware.call('datastore.query', 'storage.encrypteddataset'):
if config['enabled'] and config['manage_zfs_keys'] and (
ds['encryption_key'] or ds['name'] not in self.zfs_keys
):
return True
elif any(not config[k] for k in ('enabled', 'manage_zfs_keys')) and ds['kmip_uid']:
return True
return False
@private
def push_zfs_keys(self, ids=None):
datasets = self.middleware.call_sync(
'datastore.query', 'storage.encrypteddataset', [['id', 'in', ids]] if ids else []
)
existing_datasets = {ds['name']: ds for ds in self.middleware.call_sync('pool.dataset.query')}
failed = []
with self._connection(self.middleware.call_sync('kmip.connection_config')) as conn:
for ds in filter(lambda d: d['name'] in existing_datasets, datasets):
if not ds['encryption_key']:
# We want to make sure we have the KMIP server's keys and in-memory keys in sync
try:
if ds['name'] in self.zfs_keys and self.middleware.call_sync(
'zfs.dataset.check_key', ds['name'], {'key': self.zfs_keys[ds['name']]}
):
continue
else:
key = self._retrieve_secret_data(ds['kmip_uid'], conn)
except Exception as e:
self.middleware.logger.debug(f'Failed to retrieve key for {ds["name"]}: {e}')
else:
self.zfs_keys[ds['name']] = key
continue
self.zfs_keys[ds['name']] = ds['encryption_key']
destroy_successful = False
if ds['kmip_uid']:
# This needs to be revoked and destroyed
destroy_successful = self._revoke_and_destroy_key(ds['kmip_uid'], conn, self.middleware.logger)
if not destroy_successful:
self.middleware.logger.debug(f'Failed to destroy key from KMIP Server for {ds["name"]}')
try:
uid = self._register_secret_data(ds['name'], self.zfs_keys[ds['name']], conn)
except Exception:
failed.append(ds['name'])
update_data = {'kmip_uid': None} if destroy_successful else {}
else:
update_data = {'encryption_key': None, 'kmip_uid': uid}
if update_data:
self.middleware.call_sync('datastore.update', 'storage.encrypteddataset', ds['id'], update_data)
self.zfs_keys = {k: v for k, v in self.zfs_keys.items() if k in existing_datasets}
return failed
@private
def pull_zfs_keys(self):
datasets = self.middleware.call_sync('datastore.query', 'storage.encrypteddataset', [['kmip_uid', '!=', None]])
existing_datasets = {ds['name']: ds for ds in self.middleware.call_sync('pool.dataset.query')}
failed = []
connection_successful = self.middleware.call_sync('kmip.test_connection')
for ds in filter(lambda d: d['name'] in existing_datasets, datasets):
try:
if ds['encryption_key']:
key = ds['encryption_key']
elif ds['name'] in self.zfs_keys and self.middleware.call_sync(
'zfs.dataset.check_key', ds['name'], {'key': self.zfs_keys[ds['name']]}
):
key = self.zfs_keys[ds['name']]
elif connection_successful:
with self._connection(self.middleware.call_sync('kmip.connection_config')) as conn:
key = self._retrieve_secret_data(ds['kmip_uid'], conn)
else:
raise Exception('Failed to sync dataset')
except Exception:
failed.append(ds['name'])
else:
update_data = {'encryption_key': key, 'kmip_uid': None}
self.middleware.call_sync('datastore.update', 'storage.encrypteddataset', ds['id'], update_data)
self.zfs_keys.pop(ds['name'], None)
if connection_successful:
self.middleware.call_sync('kmip.delete_kmip_secret_data', ds['kmip_uid'])
self.zfs_keys = {k: v for k, v in self.zfs_keys.items() if k in existing_datasets}
return failed
@private
@job(lock=lambda args: f'kmip_sync_zfs_keys_{args}')
def sync_zfs_keys(self, job, ids=None):
if not self.middleware.call_sync('kmip.zfs_keys_pending_sync'):
return
config = self.middleware.call_sync('kmip.config')
conn_successful = self.middleware.call_sync('kmip.test_connection', None, True)
if config['enabled'] and config['manage_zfs_keys']:
if conn_successful:
failed = self.push_zfs_keys(ids)
else:
return
else:
failed = self.pull_zfs_keys()
if failed:
self.middleware.call_sync(
'alert.oneshot_create', 'KMIPZFSDatasetsSyncFailure', {'datasets': ','.join(failed)}
)
self.middleware.call_hook_sync('kmip.zfs_keys_sync')
return failed
@private
async def clear_sync_pending_zfs_keys(self):
to_remove = []
for ds in await self.middleware.call(
'datastore.query', 'storage.encrypteddataset', [['kmip_uid', '!=', None]]
):
if ds['encryption_key']:
await self.middleware.call('datastore.update', 'storage.encrypteddataset', {'kmip_uid': None})
else:
to_remove.append(ds['id'])
await self.middleware.call('datastore.delete', 'storage.encrypteddataset', [['id', 'in', to_remove]])
self.zfs_keys = {}
@private
def initialize_zfs_keys(self, connection_success):
locked_datasets = [ds['id'] for ds in self.middleware.call_sync('zfs.dataset.locked_datasets')]
for ds in self.middleware.call_sync('datastore.query', 'storage.encrypteddataset',):
if ds['encryption_key']:
self.zfs_keys[ds['name']] = ds['encryption_key']
elif ds['kmip_uid'] and connection_success:
try:
with self._connection(self.middleware.call_sync('kmip.connection_config')) as conn:
key = self._retrieve_secret_data(ds['kmip_uid'], conn)
except Exception:
self.middleware.logger.debug(f'Failed to retrieve key for {ds["name"]}')
else:
self.zfs_keys[ds['name']] = key
if ds['name'] in self.zfs_keys and ds['name'] in locked_datasets:
self.middleware.call_sync('pool.dataset.unlock', ds['name'])
@private
async def retrieve_zfs_keys(self):
return self.zfs_keys
@private
async def reset_zfs_key(self, dataset, kmip_uid):
self.zfs_keys.pop(dataset, None)
if kmip_uid:
try:
await self.middleware.call('kmip.delete_kmip_secret_data', kmip_uid)
except Exception as e:
self.middleware.logger.debug(
f'Failed to remove encryption key from KMIP server for "{dataset}" Dataset: {e}'
)
await self.middleware.call_hook('kmip.zfs_keys_sync')
@private
async def update_zfs_keys(self, zfs_keys):
self.zfs_keys = zfs_keys
| 8,127 | Python | .py | 156 | 38.589744 | 119 | 0.566114 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,921 | utils.py | truenas_middleware/src/middlewared/middlewared/plugins/kmip/utils.py | # Copyright (c) - iXsystems Inc.
#
# Licensed under the terms of the TrueNAS Enterprise License Agreement
# See the file LICENSE.IX for complete terms and conditions
SUPPORTED_SSL_VERSIONS = ['PROTOCOL_TLSv1', 'PROTOCOL_TLSv1_1', 'PROTOCOL_TLSv1_2']
| 251 | Python | .py | 5 | 49 | 83 | 0.77551 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,922 | __init__.py | truenas_middleware/src/middlewared/middlewared/plugins/kmip/__init__.py | # Copyright (c) - iXsystems Inc.
#
# Licensed under the terms of the TrueNAS Enterprise License Agreement
# See the file LICENSE.IX for complete terms and conditions
async def initialize_kmip_keys(middleware):
if (await middleware.call('kmip.config'))['enabled']:
await middleware.call('kmip.initialize_keys')
async def __event_system_ready(middleware, event_type, args):
await initialize_kmip_keys(middleware)
async def setup(middleware):
await middleware.call('network.general.register_activity', 'kmip', 'KMIP')
middleware.event_subscribe('system.ready', __event_system_ready)
if await middleware.call('system.ready'):
await initialize_kmip_keys(middleware)
| 702 | Python | .py | 14 | 46.214286 | 78 | 0.75549 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,923 | attachment.py | truenas_middleware/src/middlewared/middlewared/plugins/kmip/attachment.py | # Copyright (c) - iXsystems Inc.
#
# Licensed under the terms of the TrueNAS Enterprise License Agreement
# See the file LICENSE.IX for complete terms and conditions
from middlewared.common.attachment.certificate import CertificateServiceAttachmentDelegate
from middlewared.common.ports import ServicePortDelegate
class KmipCertificateAttachment(CertificateServiceAttachmentDelegate):
HUMAN_NAME = 'KMIP Service'
SERVICE = 'kmip'
SERVICE_VERB = 'start'
class KMIPServicePortDelegate(ServicePortDelegate):
name = 'kmip'
namespace = 'kmip'
port_fields = ['port']
title = 'KMIP Service'
async def setup(middleware):
await middleware.call('certificate.register_attachment_delegate', KmipCertificateAttachment(middleware))
await middleware.call('port.register_attachment_delegate', KMIPServicePortDelegate(middleware))
| 859 | Python | .py | 18 | 44.222222 | 108 | 0.80649 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,924 | sed_keys.py | truenas_middleware/src/middlewared/middlewared/plugins/kmip/sed_keys.py | # Copyright (c) - iXsystems Inc.
#
# Licensed under the terms of the TrueNAS Enterprise License Agreement
# See the file LICENSE.IX for complete terms and conditions
from middlewared.service import job, private, Service
from .connection import KMIPServerMixin
'''
SED keys are stored in 2 places:
1) system.advanced table
2) storage.disk table
There are 3 possible cases which we need to handle for storage.disk
1) A disk row can have SED key
2) A disk row can have a blank SED key
3) A disk row can be removed
There are 2 possible cases which we need to handle for system.advanced
1) system.advanced.config can have global SED password
2) system.advanced.config cannot have global SED password
'''
class KMIPService(Service, KMIPServerMixin):
def __init__(self, *args, **kwargs):
"""
System will never directly query KMIP server to determine the SED keys when it actually uses the SED keys.
Instead when middleware boots, we will cache keys and maintain a record of them in memory which the system
will use and rely on for SED related tasks.
"""
super().__init__(*args, **kwargs)
self.disks_keys = {}
self.global_sed_key = ''
@private
async def sed_keys_pending_sync(self):
"""
We determine if we have SED keys pending sync by verifying following scenarios:
1) kmip.config.enabled and kmip.config.manage_sed_disks are set - which means we have to push all SED keys
from storage.disk and system.advanced to KMIP server
2) kmip.config.enabled or kmip.config.manage_sed_disks is unset ( any one of them ) - which means we have to
pull SED keys from the KMIP server for the relevant rows
How the flow is designed to work for storage.disk when a key is added is following:
1) User adds password for disk
2) Password is saved in database
3) If KMIP service is enabled, kmip sync is initiated for SED keys
4) For the disk in question, value of password in database is given priority and pushed to the KMIP server
5) If KMIP uid field is already set for the disk in question, system is going to remove that key and push the
new password to the KMIP server.
6) Once the key has been pushed successfully, it is removed from the database and added to memory for fast
retrieval.
For above case, we determine that a key needs to be synced based on the fact that KMIP sync is enabled for
SED keys and we have sed key saved in database.
Flow when key is removed for storage.disk:
1) User sets empty value for the password
2) It is saved in database
3) If the key had been saved already to KMIP server, it is removed and also it
is removed from the memory.
4) Difference from above case is that a sync is not initiated in this case and on key
removal from database, KMIP uid is revoked/removed at the same time.
For the above case, we don't get to a state where we can have pending sync as database is updated instantly
removing kmip uid and flushing password.
When a disk is removed, the same steps as above are carried out.
Above cases took into account when KMIP sync was enabled, when KMIP sync is disabled for SED keys,
following steps are performed to determine if we have kmip sync pending for SED keys.
Flow when KMIP sync is disabled for SED keys:
1) KMIP server is contacted for disks which have kmip uid field set.
2) Key is retrieved and updated for the disk in question.
3) If KMIP server could not be contacted, we have sync pending for the disks in question then.
4) Meanwhile if the user sets a new password for the disks, that password will be given precedence over
the key saved in KMIP Server and it will be removed as soon as KMIP server can be contacted.
For the above case, sync is declared pending if kmip uid field has a uid present.
The same steps are followed for system.advanced except for the one where we remove disks which is not
true for system.advanced.
During this, we also declare sync is pending if we have SED sync enabled and the keys
are not in the memory as that is what we rely on while actually using the SED keys functionality.
"""
adv_config = await self.middleware.call('datastore.config', 'system.advanced', {'prefix': 'adv_'})
disks = await self.middleware.call('datastore.query', 'storage.disk', [], {'prefix': 'disk_'})
config = await self.middleware.call('kmip.config')
check_db_key = config['enabled'] and config['manage_sed_disks']
for disk in disks:
if check_db_key and (disk['passwd'] or (disk['kmip_uid'] and disk['identifier'] not in self.disks_keys)):
return True
elif not check_db_key and disk['kmip_uid']:
return True
if check_db_key and (adv_config['sed_passwd'] or (not self.global_sed_key and adv_config['kmip_uid'])):
return True
elif not check_db_key and adv_config['kmip_uid']:
return True
return False
@private
def push_sed_keys(self, ids=None):
"""
When push SED keys is initiated, we carry out following steps:
1) For any disk which has it's key stored in KMIP server and not in database, we first update memory
cache to reflect the key present in the KMIP server.
2) If the disk in question does not have a SED key and no kmip uid, we don't have a key set for it and we
dismiss that disk.
3) For point (1), the key has already been pushed to the KMIP server so we don't need to do that again.
4) Moving on, we are left with the case where we have SED key stored in database for disk with/without a
kmip uid present in the disk row.
5) If kmip uid present for the disk in question, we first revoke/remove it.
6) Existing SED key present in the database is pushed to the KMIP Server and database is updated
with new kmip uid.
The same steps are followed for system.advanced.
"""
adv_config = self.middleware.call_sync('datastore.config', 'system.advanced', {'prefix': 'adv_'})
failed = []
with self._connection(self.middleware.call_sync('kmip.connection_config')) as conn:
for disk in self.middleware.call_sync(
'datastore.query', 'storage.disk', [['identifier', 'in', ids]] if ids else [], {'prefix': 'disk_'}
):
if not disk['passwd'] and disk['kmip_uid']:
try:
key = self._retrieve_secret_data(disk['kmip_uid'], conn)
except Exception as e:
self.middleware.logger.debug(f'Failed to retrieve key for {disk["identifier"]}: {e}')
else:
self.disks_keys[disk['identifier']] = key
continue
elif not disk['passwd']:
continue
self.disks_keys[disk['identifier']] = disk['passwd']
destroy_successful = False
if disk['kmip_uid']:
# This needs to be revoked and destroyed
destroy_successful = self._revoke_and_destroy_key(
disk['kmip_uid'], conn, self.middleware.logger, disk['identifier']
)
try:
uid = self._register_secret_data(disk['identifier'], self.disks_keys[disk['identifier']], conn)
except Exception:
failed.append(disk['identifier'])
update_data = {'kmip_uid': None} if destroy_successful else {}
else:
update_data = {'passwd': '', 'kmip_uid': uid}
if update_data:
self.middleware.call_sync(
'datastore.update', 'storage.disk', disk['identifier'], update_data, {'prefix': 'disk_'}
)
if not adv_config['sed_passwd'] and adv_config['kmip_uid']:
try:
key = self._retrieve_secret_data(adv_config['kmip_uid'], conn)
except Exception:
failed.append('Global SED Key')
else:
self.global_sed_key = key
elif adv_config['sed_passwd']:
if adv_config['kmip_uid']:
self._revoke_and_destroy_key(
adv_config['kmip_uid'], conn, self.middleware.logger, 'SED Global Password'
)
self.middleware.call_sync(
'datastore.update', 'system.advanced', adv_config['id'], {'adv_kmip_uid': None}
)
self.global_sed_key = adv_config['sed_passwd']
try:
uid = self._register_secret_data('global_sed_key', self.global_sed_key, conn)
except Exception:
failed.append('Global SED Key')
else:
self.middleware.call_sync(
'datastore.update', 'system.advanced',
adv_config['id'], {'adv_sed_passwd': '', 'adv_kmip_uid': uid}
)
return failed
@private
def pull_sed_keys(self):
"""
We pull SED keys from the KMIP server when SED sync has been disabled. In this case, following steps
are executed:
1) If a disk has a SED key saved in database, that is given preference over the key saved in the KMIP server.
Which in this case kmip uid is simply removed and database is updated to reflect that.
2) If a disk does not have a SED key saved in the database, we first check if we have the key saved
in memory cache and use that to update the database and remove the kmip uid from the database.
3) If memory cache also does not have the SED key, we finally try to retrieve the key from the KMIP server
and if we succeed, we update the database to reflect that.
The same steps are carried out for system.advanced.
"""
failed = []
connection_successful = self.middleware.call_sync('kmip.test_connection')
for disk in self.middleware.call_sync(
'datastore.query', 'storage.disk', [['kmip_uid', '!=', None]], {'prefix': 'disk_'}
):
try:
if disk['passwd']:
key = disk['passwd']
elif self.disks_keys.get(disk['identifier']):
key = self.disks_keys[disk['identifier']]
elif connection_successful:
with self._connection(self.middleware.call_sync('kmip.connection_config')) as conn:
key = self._retrieve_secret_data(disk['kmip_uid'], conn)
else:
raise Exception('Failed to sync disk')
except Exception:
failed.append(disk['identifier'])
else:
update_data = {'passwd': key, 'kmip_uid': None}
self.middleware.call_sync(
'datastore.update', 'storage.disk', disk['identifier'], update_data, {'prefix': 'disk_'}
)
self.disks_keys.pop(disk['identifier'], None)
if connection_successful:
self.middleware.call_sync('kmip.delete_kmip_secret_data', disk['kmip_uid'])
adv_config = self.middleware.call_sync('datastore.config', 'system.advanced', {'prefix': 'adv_'})
if adv_config['kmip_uid']:
key = None
if adv_config['sed_passwd']:
key = adv_config['sed_passwd']
elif self.global_sed_key:
key = self.global_sed_key
elif connection_successful:
try:
with self._connection(self.middleware.call_sync('kmip.connection_config')) as conn:
key = self._retrieve_secret_data(adv_config['kmip_uid'], conn)
except Exception:
failed.append('Global SED Key')
if key:
self.middleware.call_sync(
'datastore.update', 'system.advanced',
adv_config['id'], {
'adv_sed_passwd': key, 'adv_kmip_uid': None
}
)
self.global_sed_key = ''
if connection_successful:
self.middleware.call_sync('kmip.delete_kmip_secret_data', adv_config['kmip_uid'])
return failed
@job(lock=lambda args: f'kmip_sync_sed_keys_{args}')
@private
def sync_sed_keys(self, job, ids=None):
"""
SED keys are synced if we have sync pending for SED keys. If SED sync is enabled with KMIP, we push
SED keys, else we pull SED keys and update the database in both cases.
"""
if not self.middleware.call_sync('kmip.sed_keys_pending_sync'):
return
config = self.middleware.call_sync('kmip.config')
conn_successful = self.middleware.call_sync('kmip.test_connection', None, True)
if config['enabled'] and config['manage_sed_disks']:
if conn_successful:
failed = self.push_sed_keys(ids)
else:
return
else:
failed = self.pull_sed_keys()
ret_failed = failed.copy()
try:
failed.remove('Global SED Key')
except ValueError:
pass
else:
self.middleware.call_sync('alert.oneshot_create', 'KMIPSEDGlobalPasswordSyncFailure')
finally:
if failed:
self.middleware.call_sync(
'alert.oneshot_create', 'KMIPSEDDisksSyncFailure', {'disks': ','.join(failed)}
)
self.middleware.call_hook_sync('kmip.sed_keys_sync')
return ret_failed
@private
async def clear_sync_pending_sed_keys(self):
"""
We expose an option to clear keys which are pending kmip sync, this can be done if the user knows for certain
that the KMIP server can never be reached now and he/she does not want the system trying again to initiate
a sync with the KMIP server.
"""
for disk in await self.middleware.call(
'datastore.query', 'storage.disk', [['kmip_uid', '!=', None]], {'prefix': 'disk_'}
):
await self.middleware.call(
'datastore.update', 'storage.disk', disk['identifier'], {'disk_kmip_uid': None}
)
adv_config = await self.middleware.call('datastore.config', 'system.advanced', {'prefix': 'adv_'})
if adv_config['kmip_uid']:
await self.middleware.call(
'datastore.update', 'system.advanced', adv_config['id'], {'adv_kmip_uid': None}
)
self.global_sed_key = ''
self.disks_keys = {}
@private
def initialize_sed_keys(self, connection_success):
"""
On middleware boot, we initialize memory cache to contain all the SED keys which we can later use
for SED related functionality.
"""
for disk in self.middleware.call_sync(
'datastore.query', 'storage.disk', [], {'prefix': 'disk_'}
):
if disk['passwd']:
self.disks_keys[disk['identifier']] = disk['passwd']
elif disk['kmip_uid'] and connection_success:
try:
with self._connection(self.middleware.call_sync('kmip.connection_config')) as conn:
key = self._retrieve_secret_data(disk['kmip_uid'], conn)
except Exception:
self.middleware.logger.debug(f'Failed to retrieve SED disk key for {disk["identifier"]}')
else:
self.disks_keys[disk['identifier']] = key
adv_config = self.middleware.call_sync('datastore.config', 'system.advanced', {'prefix': 'adv_'})
if adv_config['sed_passwd']:
self.global_sed_key = adv_config['sed_passwd']
elif connection_success and adv_config['kmip_uid']:
try:
with self._connection(self.middleware.call_sync('kmip.connection_config')) as conn:
key = self._retrieve_secret_data(adv_config['kmip_uid'], conn)
except Exception:
self.middleware.logger.debug('Failed to retrieve global SED key')
else:
self.global_sed_key = key
@private
async def update_sed_keys(self, data):
if 'global_password' in data:
self.global_sed_key = data['global_password']
if 'sed_disks_keys' in data:
self.disks_keys = data['sed_disks_keys']
@private
async def sed_keys(self):
return {
'global_password': await self.sed_global_password(),
'sed_disks_keys': await self.retrieve_sed_disks_keys(),
}
@private
async def sed_global_password(self):
return self.global_sed_key
@private
async def reset_sed_global_password(self, kmip_uid):
self.global_sed_key = ''
if kmip_uid:
try:
await self.middleware.call('kmip.delete_kmip_secret_data', kmip_uid)
except Exception as e:
self.middleware.logger.debug(
f'Failed to remove password from KMIP server for SED Global key: {e}'
)
@private
async def reset_sed_disk_password(self, disk_id, kmip_uid):
self.disks_keys.pop(disk_id, None)
if kmip_uid:
try:
await self.middleware.call('kmip.delete_kmip_secret_data', kmip_uid)
except Exception as e:
self.middleware.logger.debug(
f'Failed to remove password from KMIP server for {disk_id}: {e}'
)
await self.middleware.call_hook('kmip.sed_keys_sync')
@private
async def retrieve_sed_disks_keys(self):
return self.disks_keys
| 18,274 | Python | .py | 344 | 40.648256 | 117 | 0.597999 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,925 | connection.py | truenas_middleware/src/middlewared/middlewared/plugins/kmip/connection.py | # Copyright (c) - iXsystems Inc.
#
# Licensed under the terms of the TrueNAS Enterprise License Agreement
# See the file LICENSE.IX for complete terms and conditions
import contextlib
import socket
import uuid
from kmip.core import enums
from kmip.pie.client import ProxyKmipClient
from kmip.pie.exceptions import ClientConnectionFailure, ClientConnectionNotOpen, KmipOperationFailure
from kmip.pie.objects import SecretData
from middlewared.service import CallError
class KMIPServerMixin:
@contextlib.contextmanager
def _connection(self, data=None):
self.middleware.call_sync('network.general.will_perform_activity', 'kmip')
data = data or {}
mapping = {
'hostname': 'server',
'port': 'port',
'cert': 'cert',
'key': 'cert_key',
'ca': 'ca',
'ssl_version': 'ssl_version'
}
try:
with ProxyKmipClient(**{k: data[v] for k, v in mapping.items() if data.get(v)}) as conn:
yield conn
except (ClientConnectionFailure, ClientConnectionNotOpen, socket.timeout) as e:
raise CallError(f'Failed to connect to KMIP Server: {e}')
def _test_connection(self, data=None):
# Test if we are able to connect to the KMIP Server
try:
with self._connection(data):
pass
except Exception as e:
return {'error': True, 'exception': str(e)}
else:
return {'error': False, 'exception': None}
def _revoke_key(self, uid, conn):
# Revoke key from the KMIP Server
try:
conn.revoke(enums.RevocationReasonCode.CESSATION_OF_OPERATION, uid)
except KmipOperationFailure as e:
raise CallError(f'Failed to revoke key: {e}')
def _revoke_and_destroy_key(self, uid, conn, logger=None, key_id=None):
try:
self._revoke_key(uid, conn)
except Exception as e:
if logger:
logger.debug(f'Failed to revoke key for {key_id or uid}: {e}')
try:
self._destroy_key(uid, conn)
except Exception as e:
if logger:
logger.debug(f'Failed to destroy key for {key_id or uid}: {e}')
return False
else:
return True
def _destroy_key(self, uid, conn):
# Destroy key from the KMIP Server
try:
conn.destroy(uid)
except KmipOperationFailure as e:
raise CallError(f'Failed to destroy key: {e}')
def _retrieve_secret_data(self, uid, conn):
# Query key from the KMIP Server
try:
obj = conn.get(uid)
except KmipOperationFailure as e:
raise CallError(f'Failed to retrieve secret data: {e}')
else:
if not isinstance(obj, SecretData):
raise CallError('Retrieved managed object is not secret data')
return obj.value.decode()
def _register_secret_data(self, name, key, conn):
# Create key on the KMIP Server
secret_data = SecretData(key.encode(), enums.SecretDataType.PASSWORD, name=f'{name}-{str(uuid.uuid4())[:7]}')
try:
uid = conn.register(secret_data)
except KmipOperationFailure as e:
raise CallError(f'Failed to register key with KMIP server: {e}')
else:
try:
conn.activate(uid)
except KmipOperationFailure as e:
error = f'Failed to activate key: {e}'
try:
self._destroy_key(uid, conn)
except CallError as ce:
error += f'\nFailed to destroy created key: {ce}'
raise CallError(error)
return uid
| 3,763 | Python | .py | 93 | 30.376344 | 117 | 0.601313 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,926 | update.py | truenas_middleware/src/middlewared/middlewared/plugins/kmip/update.py | # Copyright (c) - iXsystems Inc.
#
# Licensed under the terms of the TrueNAS Enterprise License Agreement
# See the file LICENSE.IX for complete terms and conditions
import middlewared.sqlalchemy as sa
from middlewared.async_validators import validate_port
from middlewared.schema import Bool, Dict, Int, Patch, Str
from middlewared.service import accepts, CallError, ConfigService, job, private, returns, ValidationErrors
from middlewared.validators import Port
from .utils import SUPPORTED_SSL_VERSIONS
class KMIPModel(sa.Model):
__tablename__ = 'system_kmip'
id = sa.Column(sa.Integer(), primary_key=True)
server = sa.Column(sa.String(128), default=None, nullable=True)
ssl_version = sa.Column(sa.String(128), default='PROTOCOL_TLSv1_2')
port = sa.Column(sa.SmallInteger(), default=5696)
certificate_id = sa.Column(sa.ForeignKey('system_certificate.id'), index=True, nullable=True)
certificate_authority_id = sa.Column(sa.ForeignKey('system_certificateauthority.id'), index=True, nullable=True)
manage_sed_disks = sa.Column(sa.Boolean(), default=False)
manage_zfs_keys = sa.Column(sa.Boolean(), default=False)
enabled = sa.Column(sa.Boolean(), default=False)
class KMIPService(ConfigService):
class Config:
datastore = 'system_kmip'
datastore_extend = 'kmip.kmip_extend'
cli_namespace = 'system.kmip'
role_prefix = 'KMIP'
ENTRY = Dict(
'kmip_entry',
Int('id', required=True),
Bool('enabled', required=True),
Bool('manage_sed_disks', required=True),
Bool('manage_zfs_keys', required=True),
Int('certificate', null=True, required=True),
Int('certificate_authority', null=True, required=True),
Int('port', validators=[Port()], required=True),
Str('server', required=True, null=True),
Str('ssl_version', required=True, enum=SUPPORTED_SSL_VERSIONS),
)
@private
async def kmip_extend(self, data):
for k in filter(lambda v: data[v], ('certificate', 'certificate_authority')):
data[k] = data[k]['id']
return data
@accepts(
Patch(
'kmip_entry', 'kmip_update',
('rm', {'name': 'id'}),
('add', Bool('enabled')),
('add', Bool('force_clear')),
('add', Bool('change_server')),
('add', Bool('validate')),
('attr', {'update': True}),
)
)
@job(lock='kmip_update')
async def do_update(self, job, data):
"""
Update KMIP Server Configuration.
System currently authenticates connection with remote KMIP Server with a TLS handshake. `certificate` and
`certificate_authority` determine the certs which will be used to initiate the TLS handshake with `server`.
`validate` is enabled by default. When enabled, system will test connection to `server` making sure
it's reachable.
`manage_zfs_keys`/`manage_sed_disks` when enabled will sync keys from local database to remote KMIP server.
When disabled, if there are any keys left to be retrieved from the KMIP server,
it will sync them back to local database.
`enabled` if true, cannot be set to disabled if there are existing keys pending to be synced. However users
can still perform this action by enabling `force_clear`.
`ssl_version` can be specified to match the ssl configuration being used by KMIP server.
`change_server` is a boolean field which allows users to migrate data between two KMIP servers. System
will first migrate keys from old KMIP server to local database and then migrate the keys from local database
to new KMIP server. If it is unable to retrieve all the keys from old server, this will fail. Users can bypass
this by enabling `force_clear`.
`force_clear` is a boolean option which when enabled will in this case remove all
pending keys to be synced from database. It should be used with extreme caution as users may end up with
not having ZFS dataset or SED disks keys leaving them locked forever. It is disabled by default.
"""
old = await self.config()
new = old.copy()
new.update(data)
verrors = ValidationErrors()
if not new['server'] and new['enabled']:
verrors.add('kmip_update.server', 'Please specify a valid hostname or an IPv4 address')
if new['enabled']:
verrors.extend((await self.middleware.call(
'certificate.cert_services_validation', new['certificate'], 'kmip_update.certificate', False
)))
verrors.extend(await validate_port(self.middleware, 'kmip_update.port', new['port'], 'kmip'))
ca = await self.middleware.call('certificateauthority.query', [['id', '=', new['certificate_authority']]])
if ca and not verrors:
ca = ca[0]
if not await self.middleware.call(
'cryptokey.validate_cert_with_chain',
(await self.middleware.call('certificate.get_instance', new['certificate']))['certificate'],
[ca['certificate']]
):
verrors.add(
'kmip_update.certificate_authority',
'Certificate chain could not be verified with specified certificate authority.'
)
elif not ca and new['enabled']:
verrors.add('kmip_update.certificate_authority', 'Please specify a valid id.')
if new.pop('validate', True) and new['enabled'] and not verrors:
if not await self.middleware.call('kmip.test_connection', new):
verrors.add('kmip_update.server', f'Unable to connect to {new["server"]}:{new["port"]} KMIP server.')
change_server = new.pop('change_server', False)
if change_server and new['server'] == old['server']:
verrors.add('kmip_update.change_server', 'Please update server field to reflect the new server.')
if change_server and not new['enabled']:
verrors.add('kmip_update.enabled', 'Must be enabled when change server is enabled.')
force_clear = new.pop('force_clear', False)
clear_keys = force_clear if change_server else False
sync_error = 'KMIP sync is pending, please make sure database and KMIP server ' \
'are in sync before proceeding with this operation.'
if old['enabled'] != new['enabled'] and await self.middleware.call('kmip.kmip_sync_pending'):
if force_clear:
clear_keys = True
else:
verrors.add('kmip_update.enabled', sync_error)
verrors.check()
job.set_progress(30, 'Initial Validation complete')
if clear_keys:
await self.middleware.call('kmip.clear_sync_pending_keys')
job.set_progress(50, 'Cleared keys pending sync')
if change_server:
# We will first migrate all the keys to local database - once done with that,
# we will proceed with pushing it to the new server - we should have the old server
# old server -> db
# db -> new server
# First can be skipped if old server is not reachable and we want to clear keys
job.set_progress(55, 'Starting migration from existing server to new server')
await self.middleware.call(
'datastore.update', self._config.datastore, old['id'], {
'manage_zfs_keys': False, 'manage_sed_disks': False
}
)
job.set_progress(60, 'Syncing keys from existing server to local database')
sync_jobs = [
(await self.middleware.call(f'kmip.{i}')) for i in ('sync_zfs_keys', 'sync_sed_keys')
]
errors = []
for sync_job in sync_jobs:
await sync_job.wait()
if sync_job.error:
errors.append(sync_job.error)
elif sync_job.result:
errors.append(f'Failed to sync {",".join(sync_job.result)}')
if errors:
await self.middleware.call('datastore.update', self._config.datastore, old['id'], old)
# We do this because it's possible a few datasets/disks got synced to db and few didn't - this is
# to push all the data of interest back to the KMIP server from db
await self.middleware.call('kmip.sync_keys')
errors = '\n'.join(errors)
raise CallError(f'Failed to sync keys from {old["server"]} to host: {errors}')
if await self.middleware.call('kmip.kmip_sync_pending'):
raise CallError(sync_error)
job.set_progress(80, 'Successfully synced keys from existing server to local database')
await self.middleware.call(
'datastore.update', self._config.datastore, old['id'], new,
)
await self.middleware.call('service.start', 'kmip')
if new['enabled'] and old['enabled'] != new['enabled']:
await self.middleware.call('kmip.initialize_keys')
if any(old[k] != new[k] for k in ('enabled', 'manage_zfs_keys', 'manage_sed_disks')) or change_server:
job.set_progress(90, 'Starting sync between local database and configured KMIP server')
await self.middleware.call('kmip.sync_keys')
return await self.config()
| 9,492 | Python | .py | 167 | 46.161677 | 118 | 0.634984 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,927 | client.py | truenas_middleware/src/middlewared/middlewared/plugins/ntp_/client.py | import dataclasses
import socket
import struct
import time
def format_ntp_packet(data):
# unpack the response
unpacked = struct.unpack('!B B B b 11I', data)
recv_timestamp = unpacked[11] + float(unpacked[12]) / 2 ** 32
ntp_time = recv_timestamp - 2208988800 # NTP timestamp starts from 1st January 1900
recv_utc_time = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(ntp_time))
return {
'leap': unpacked[0] >> 6 & 0x7,
'version': unpacked[0] >> 3 & 0x7,
'mode': unpacked[0] & 0x7,
'stratum': unpacked[1],
'poll': unpacked[2],
'precision': unpacked[3],
'root_delay': float(unpacked[4]) / 2 ** 16,
'root_dispersion': float(unpacked[5]) / 2 ** 16,
'ref_id': unpacked[6],
'ref_timestamp': unpacked[7] + float(unpacked[8]) / 2 ** 32,
'orig_timestamp': unpacked[9] + float(unpacked[10]) / 2 ** 32,
'recv_timestamp': recv_timestamp,
'recv_timestamp_formatted': recv_utc_time,
'tx_timestamp': unpacked[13] + float(unpacked[14]) / 2 ** 32,
}
@dataclasses.dataclass(slots=True)
class NTPClient:
host: str
timeout: int = 5 # second
def make_request(self):
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
# set timeout
s.settimeout(self.timeout)
# send a ntp formatted packet
s.sendto(b'\x1b' + 47 * b'\0', (self.host, 123))
# receive response from ntp peer
data, addr = s.recvfrom(1024)
# format the received data
return format_ntp_packet(data)
| 1,612 | Python | .py | 40 | 32.675 | 88 | 0.596289 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,928 | enums.py | truenas_middleware/src/middlewared/middlewared/plugins/ntp_/enums.py | import enum
class Mode(enum.Enum):
SERVER = 'SERVER'
PEER = 'PEER'
LOCAL = 'LOCAL'
@staticmethod
def from_str(strval):
if strval in ('SERVER', '^'):
return Mode.SERVER
elif strval in ('PEER', '='):
return Mode.PEER
elif strval in ('LOCAL', '#'):
return Mode.LOCAL
else:
raise NotImplementedError(f'Invalid mode: {strval}')
def __str__(self):
return str(self.value)
class State(enum.Enum):
BEST = 'BEST'
SELECTED = 'SELECTED'
SELECTABLE = 'SELECTABLE'
FALSE_TICKER = 'FALSE_TICKER'
TOO_VARIABLE = 'TOO_VARIABLE'
NOT_SELECTABLE = 'NOT_SELECTABLE'
@staticmethod
def from_str(strval):
if strval in ('BEST', '*'):
return State.BEST
elif strval in ('SELECTED', '+'):
return State.SELECTED
elif strval in ('SELECTABLE', '-'):
return State.SELECTABLE
elif strval in ('FALSE_TICKER', 'x'):
return State.FALSE_TICKER
elif strval in ('TOO_VARIABLE', '~'):
return State.TOO_VARIABLE
elif strval in ('NOT_SELECTABLE', '?'):
return State.NOT_SELECTABLE
else:
raise NotImplementedError(f'Invalid state: {strval}')
def is_active(self):
return self in [State.BEST, State.SELECTED]
@staticmethod
def is_active_qq(val):
if type(val) == str:
val = State.from_str(val)
return val in [State.BEST, State.SELECTED]
def __str__(self):
return str(self.value)
| 1,587 | Python | .py | 49 | 24.163265 | 65 | 0.575262 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,929 | util.py | truenas_middleware/src/middlewared/middlewared/plugins/datastore/util.py | from middlewared.schema import accepts
from middlewared.service import CallError, private, Service
from middlewared.sqlalchemy import Model
from .schema import SchemaMixin
class DatastoreService(Service, SchemaMixin):
class Config:
private = True
@private
async def get_backrefs(self, name):
"""
Returns list of (datastore_name, column_name) for all tables that reference this table
without being ON DELETE CASCADE / ON DELETE SET NULL.
"""
table = self._get_table(name)
result = []
for other_table in Model.metadata.tables.values():
for column in other_table.c:
if column.foreign_keys:
foreign_key = list(column.foreign_keys)[0]
if foreign_key.column.table == table:
if foreign_key.ondelete is None:
result.append((
other_table.name.replace('_', '.', 1),
column.name[:-3] if column.name.endswith('_id') else column.name,
))
return result
@private
async def sql(self, query, *args):
try:
if query.strip().split()[0].upper() == 'SELECT':
return [dict(row) for row in await self.middleware.call('datastore.fetchall', query, *args)]
else:
await self.middleware.call('datastore.execute', query, *args)
except Exception as e:
raise CallError(e)
@accepts()
async def dump_json(self):
models = []
for table, in await self.middleware.call(
"datastore.fetchall",
"SELECT name FROM sqlite_master WHERE type = 'table'"
):
try:
entries = await self.middleware.call("datastore.sql", f"SELECT * FROM {table}")
except CallError as e:
self.logger.debug("%r", e)
continue
models.append({
"table_name": table,
"verbose_name": table,
"fields": [
{
"name": row[1],
"verbose_name": row[1],
"database_type": row[2],
}
for row in await self.middleware.call("datastore.fetchall", "PRAGMA table_info('%s');" % table)
],
"entries": entries,
})
return models
| 2,511 | Python | .py | 61 | 27.393443 | 115 | 0.520295 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,930 | event.py | truenas_middleware/src/middlewared/middlewared/plugins/datastore/event.py | from collections import defaultdict
from middlewared.schema import accepts, Dict, Str
from middlewared.service import Service
class DatastoreService(Service):
class Config:
private = True
events = defaultdict(list)
@accepts(Dict(
"options",
Str("description", required=True),
Str("datastore", required=True),
Str("plugin", required=True),
Str("prefix", default=""),
Dict("extra", additional_attrs=True),
Str("id", default="id"),
Str("process_event", null=True, default=None),
strict=True,
))
async def register_event(self, options):
self.events[options["datastore"]].append(options)
self.middleware.event_register(f"{options['plugin']}.query", options["description"], roles=["READONLY_ADMIN"])
async def send_insert_events(self, datastore, row):
for options in self.events[datastore]:
await self._send_event(
options,
"ADDED",
id=row[options["prefix"] + options["id"]],
fields=await self._fields(options, row),
)
async def send_update_events(self, datastore, id_):
for options in self.events[datastore]:
fields = await self._fields(options, {options["prefix"] + options["id"]: id_}, False)
if not fields:
# It is possible the row in question got deleted with the update
# event still pending, in this case we skip sending update event
continue
await self._send_event(
options,
"CHANGED",
id=id_,
fields=fields[0],
)
async def send_delete_events(self, datastore, id_):
for options in self.events[datastore]:
await self._send_event(options, "REMOVED", id=id_)
async def _fields(self, options, row, get=True):
query_options = {"get": get}
if options.get("extra"):
query_options["extra"] = options["extra"]
return await self.middleware.call(
f"{options['plugin']}.query",
[[options["id"], "=", row[options["prefix"] + options["id"]]]],
query_options,
)
async def _send_event(self, options, type_, **kwargs):
if options["process_event"]:
processed = await self.middleware.call(options["process_event"], type_, kwargs)
if processed is None:
return
type_, kwargs = processed
self.middleware.send_event(
f"{options['plugin']}.query",
type_,
**kwargs,
)
| 2,673 | Python | .py | 65 | 30.461538 | 118 | 0.575617 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,931 | filter.py | truenas_middleware/src/middlewared/middlewared/plugins/datastore/filter.py | import operator
from .schema import SchemaMixin
def in_(col, value):
has_nulls = None in value
value = [v for v in value if v is not None]
expr = col.in_(value)
if has_nulls:
expr = expr | (col == None) # noqa
return expr
def nin(col, value):
has_nulls = None in value
value = [v for v in value if v is not None]
expr = ~col.in_(value)
if has_nulls:
expr = expr & (col != None) # noqa
return expr
class FilterMixin(SchemaMixin):
def _filters_to_queryset(self, filters, table, prefix, aliases):
opmap = {
'=': operator.eq,
'!=': operator.ne,
'>': operator.gt,
'>=': operator.ge,
'<': operator.lt,
'<=': operator.le,
'~': lambda col, value: col.op('regexp')(value),
'in': in_,
'nin': nin,
'^': lambda col, value: col.startswith(value),
'$': lambda col, value: col.endswith(value),
}
rv = []
for f in filters:
if not isinstance(f, (list, tuple)):
raise ValueError('Filter must be a list or tuple: {0}'.format(f))
if len(f) == 3:
name, op, value = f
if '__' in name:
fk, name = name.split('__', 1)
col = self._get_col(aliases[list(self._get_col(table, fk, prefix).foreign_keys)[0]], name, '')
else:
col = self._get_col(table, name, prefix)
if op not in opmap:
raise ValueError('Invalid operation: {0}'.format(op))
q = opmap[op](col, value)
rv.append(q)
elif len(f) == 2:
op, value = f
if op == 'OR':
or_value = None
for value in self._filters_to_queryset(value, table, prefix, aliases):
if or_value is None:
or_value = value
else:
or_value |= value
rv.append(or_value)
else:
raise ValueError('Invalid operation: {0}'.format(op))
else:
raise ValueError('Invalid filter {0}'.format(f))
return rv
| 2,323 | Python | .py | 61 | 25.098361 | 114 | 0.468681 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,932 | write.py | truenas_middleware/src/middlewared/middlewared/plugins/datastore/write.py | from sqlalchemy import and_, types
from sqlalchemy.sql import sqltypes
from middlewared.schema import accepts, Any, Bool, Dict, Str
from middlewared.service import Service
from .filter import FilterMixin
from .schema import SchemaMixin
"""
By default, when an update/insert/delete operation occurs we will
emit an event via our event plugin to be processed for the webui.
This is important, for example, when a new disk is inserted/removed.
In either of the above scenarios, the webUI will process this event
and update the front-end accordingly. It negates the front-end having
to poll the backend (which is expensive). However, on very large
systems (i.e. systems with 100+ disks) emitting an event can become absurdly
expensive. This reason why this becomes expensive is because for every
db operation, we run the plugins associated "query" method. So if we
update 1000 table entries, then we run "disk.query" 1000 times. In
real world testing, this has shown to take roughly 82 seconds to update
100 entries on the `storage_disk` table when there are 641 entries total.
The database was on a NVMe disk. The solution to this is adding the
`send_events` key. If this is set to False, then an event will not be
sent for the db operation. It is the callers responsibility to emit an event
after all the db operations are complete.
"""
class DatastoreService(Service, FilterMixin, SchemaMixin):
class Config:
private = True
@accepts(
Str('name'),
Dict('data', additional_attrs=True),
Dict(
'options',
Bool('ha_sync', default=True),
Str('prefix', default=''),
Bool('send_events', default=True),
),
)
async def insert(self, name, data, options):
"""
Insert a new entry to `name`.
"""
table = self._get_table(name)
insert, relationships = self._extract_relationships(table, options['prefix'], data)
for column in table.c:
if column.default is not None:
insert.setdefault(column.name, column.default.arg)
if not column.nullable:
if isinstance(column.type, (types.String, types.Text)):
insert.setdefault(column.name, '')
pk_column = self._get_pk(table)
return_last_insert_rowid = type(pk_column.type) == sqltypes.Integer
result = await self.middleware.call(
'datastore.execute_write',
table.insert().values(**insert),
{
'ha_sync': options['ha_sync'],
'return_last_insert_rowid': return_last_insert_rowid,
},
)
if return_last_insert_rowid:
pk = result
else:
pk = insert[pk_column.name]
await self._handle_relationships(pk, relationships)
if options['send_events']:
await self.middleware.call('datastore.send_insert_events', name, insert)
return pk
@accepts(
Str('name'),
Any('id_or_filters'),
Dict('data', additional_attrs=True),
Dict(
'options',
Bool('ha_sync', default=True),
Str('prefix', default=''),
Bool('send_events', default=True),
),
)
async def update(self, name, id_or_filters, data, options):
"""
Update an entry `id` in `name`.
"""
table = self._get_table(name)
data = data.copy()
if isinstance(id_or_filters, list):
rows = await self.middleware.call('datastore.query', name, id_or_filters, {'prefix': options['prefix']})
if len(rows) != 1:
raise RuntimeError(f'{len(rows)} found, expecting one')
id_ = rows[0][self._get_pk(table).name]
else:
id_ = id_or_filters
for column in table.c:
if column.foreign_keys:
if column.name[:-3] in data:
data[column.name] = data.pop(column.name[:-3])
update, relationships = self._extract_relationships(table, options['prefix'], data)
if update:
result = await self.middleware.call(
'datastore.execute_write',
table.update().values(**update).where(self._where_clause(table, id_, {'prefix': options['prefix']})),
{
'ha_sync': options['ha_sync'],
},
)
if result.rowcount != 1:
raise RuntimeError('No rows were updated')
if options['send_events']:
await self.middleware.call('datastore.send_update_events', name, id_)
await self._handle_relationships(id_, relationships)
return id_
def _extract_relationships(self, table, prefix, data):
relationships = self._get_relationships(table)
insert = {}
insert_relationships = []
for k, v in data.items():
relationship = relationships.get(prefix + k)
if relationship:
insert_relationships.append((relationship, v))
else:
insert[self._get_col(table, k, prefix).name] = v
return insert, insert_relationships
async def _handle_relationships(self, pk, relationships):
for relationship, values in relationships:
assert len(relationship.synchronize_pairs) == 1
assert len(relationship.secondary_synchronize_pairs) == 1
local_pk, relationship_local_pk = relationship.synchronize_pairs[0]
remote_pk, relationship_remote_pk = relationship.secondary_synchronize_pairs[0]
await self.middleware.call(
'datastore.execute_write',
relationship_local_pk.table.delete().where(relationship_local_pk == pk)
)
for value in values:
await self.middleware.call(
'datastore.execute_write',
relationship_local_pk.table.insert().values({
relationship_local_pk.name: pk,
relationship_remote_pk.name: value,
})
)
def _where_clause(self, table, id_or_filters, options):
if isinstance(id_or_filters, list):
return and_(*self._filters_to_queryset(id_or_filters, table, options['prefix'], {}))
else:
return self._get_pk(table) == id_or_filters
@accepts(
Str('name'),
Any('id_or_filters'),
Dict(
'options',
Bool('ha_sync', default=True),
Str('prefix', default=''),
Bool('send_events', default=True),
),
)
async def delete(self, name, id_or_filters, options):
"""
Delete an entry `id` in `name`.
"""
table = self._get_table(name)
await self.middleware.call(
'datastore.execute_write',
table.delete().where(self._where_clause(table, id_or_filters, {'prefix': options['prefix']})),
{
'ha_sync': options['ha_sync'],
},
)
if not isinstance(id_or_filters, list) and options['send_events']:
await self.middleware.call('datastore.send_delete_events', name, id_or_filters)
return True
| 7,301 | Python | .py | 169 | 32.863905 | 117 | 0.59876 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,933 | read.py | truenas_middleware/src/middlewared/middlewared/plugins/datastore/read.py | from collections import defaultdict
import re
from sqlalchemy import and_, func, select
from sqlalchemy.sql import Alias
from sqlalchemy.sql.elements import UnaryExpression
from sqlalchemy.sql.expression import nullsfirst, nullslast
from sqlalchemy.sql.operators import desc_op, nullsfirst_op, nullslast_op
from middlewared.schema import accepts, Bool, Dict, Int, List, Ref, Str
from middlewared.service import Service
from middlewared.service_exception import MatchNotFound
from middlewared.utils import filters
from middlewared.validators import QueryFilters, QueryOptions
from .filter import FilterMixin
from .schema import SchemaMixin
do_select = filters().do_select
def regexp(expr, item):
reg = re.compile(expr, re.I)
return reg.search(item) is not None
class DatastoreService(Service, FilterMixin, SchemaMixin):
class Config:
private = True
@accepts(
Str('name'),
List('query-filters', items=[List('query-filter')], validators=[QueryFilters()], register=True),
Dict(
'query-options',
Bool('relationships', default=True),
Str('extend', default=None, null=True),
Str('extend_context', default=None, null=True),
Str('prefix', default=None, null=True),
Dict('extra', additional_attrs=True),
List('order_by'),
List('select'),
Bool('count', default=False),
Bool('get', default=False),
Int('offset', default=0),
Int('limit', default=0),
Bool('force_sql_filters', default=False),
register=True,
validators=[QueryOptions()]
),
)
async def query(self, name, filters, options):
"""
Query for items in a given collection `name`.
`filters` is a list which each entry can be in one of the following formats:
entry: simple_filter | conjuntion
simple_filter: '[' attribute_name, OPERATOR, value ']'
conjunction: '[' CONJUNCTION, '[' simple_filter (',' simple_filter)* ']]'
OPERATOR: ('=' | '!=' | '>' | '>=' | '<' | '<=' | '~' | 'in' | 'nin')
CONJUNCTION: 'OR'
e.g.
`['OR', [ ['username', '=', 'root' ], ['uid', '=', 0] ] ]`
`[ ['username', '=', 'root' ] ]`
.. examples(websocket)::
Querying for username "root" and returning a single item:
:::javascript
{
"id": "d51da71b-bb48-4b8b-a8f7-6046fcc892b4",
"msg": "method",
"method": "datastore.query",
"params": ["account.bsdusers", [ ["username", "=", "root" ] ], {"get": true}]
}
"""
table = self._get_table(name)
# We do not want to make changes to original options
# which might happen with "prefix"
options = options.copy()
aliases = {}
if options['count']:
qs = select([func.count(self._get_pk(table))])
else:
columns = list(table.c)
from_ = table
if options['relationships']:
aliases = self._get_queryset_joins(table)
for foreign_key, alias in aliases.items():
columns.extend(list(alias.c))
from_ = from_.outerjoin(alias, alias.c[foreign_key.column.name] == foreign_key.parent)
qs = select(columns).select_from(from_)
prefix = options['prefix']
if filters:
qs = qs.where(and_(*self._filters_to_queryset(filters, table, prefix, aliases)))
if options['count']:
return (await self.middleware.call("datastore.fetchall", qs))[0][0]
order_by = options['order_by']
if order_by:
# Do not change original order_by
order_by = order_by[:]
for i, order in enumerate(order_by):
if order.startswith('nulls_first:'):
wrapper = nullsfirst
order = order[len('nulls_first:'):]
elif order.startswith('nulls_last:'):
wrapper = nullslast
order = order[len('nulls_last:'):]
else:
wrapper = lambda x: x # noqa
if order.startswith('-'):
order_by[i] = self._get_col(table, order[1:], prefix).desc()
else:
order_by[i] = self._get_col(table, order, prefix)
order_by[i] = wrapper(order_by[i])
qs = qs.order_by(*order_by)
if options['offset']:
qs = qs.offset(options['offset'])
if options['limit']:
qs = qs.limit(options['limit'])
result = await self.middleware.call("datastore.fetchall", qs)
relationships = [{} for row in result]
if options['relationships']:
# This will only fetch many-to-many relationships for primary table, not for joins, but that's enough
relationships = await self._fetch_many_to_many(table, result)
result = await self._queryset_serialize(
result,
table, aliases, relationships, options['extend'], options['extend_context'], options['prefix'],
options['select'], options['extra'],
)
if options['get']:
try:
return result[0]
except IndexError:
raise MatchNotFound() from None
return result
@accepts(Str('name'), Ref('query-options'))
async def config(self, name, options):
"""
Get configuration settings object for a given `name`.
This is a shortcut for `query(name, {"get": true})`.
"""
options['get'] = True
return await self.query(name, [], options)
def _get_queryset_joins(self, table):
result = {}
for column in table.c:
if column.foreign_keys:
if len(column.foreign_keys) > 1:
raise RuntimeError('Multiple foreign keys are not supported')
foreign_key = list(column.foreign_keys)[0]
alias = foreign_key.column.table.alias(foreign_key.name)
result[foreign_key] = alias
if foreign_key.column.table != (table.original if isinstance(table, Alias) else table):
result.update(self._get_queryset_joins(alias))
return result
async def _queryset_serialize(
self, qs, table, aliases, relationships, extend, extend_context, field_prefix, select, extra_options,
):
rows = []
for i, row in enumerate(qs):
rows.append(self._serialize(row, table, aliases, relationships[i], field_prefix))
if extend_context:
extend_context_value = await self.middleware.call(extend_context, rows, extra_options)
else:
extend_context_value = None
return [
await self._extend(data, extend, extend_context, extend_context_value, select)
for data in rows
]
def _serialize(self, obj, table, aliases, relationships, field_prefix):
data = self._serialize_row(obj, table, aliases)
data.update(relationships)
return {self._strip_prefix(k, field_prefix): v for k, v in data.items()}
async def _extend(self, data, extend, extend_context, extend_context_value, select):
if extend:
if extend_context:
data = await self.middleware.call(extend, data, extend_context_value)
else:
data = await self.middleware.call(extend, data)
if not select:
return data
else:
return do_select([data], select)[0]
def _strip_prefix(self, k, field_prefix):
return k[len(field_prefix):] if field_prefix and k.startswith(field_prefix) else k
def _serialize_row(self, obj, table, aliases):
data = {}
for column in table.c:
# aliases == {} when we are loading without relationships, let's leave fk values in that case
if not column.foreign_keys or not aliases:
data[str(column.name)] = obj[column]
for foreign_key, alias in aliases.items():
column = foreign_key.parent
if column.table != table:
continue
if not column.name.endswith('_id'):
raise RuntimeError('Foreign key column must end with _id')
data[column.name[:-3]] = (
self._serialize_row(obj, alias, aliases)
if obj[column] is not None and obj[self._get_pk(alias)] is not None
else None
)
return data
async def _fetch_many_to_many(self, table, rows):
pk = self._get_pk(table)
pk_values = [row[pk] for row in rows]
relationships = [{} for row in rows]
if pk_values:
for relationship_name, relationship in self._get_relationships(table).items():
# We can only join by single primary key
assert len(relationship.synchronize_pairs) == 1
assert len(relationship.secondary_synchronize_pairs) == 1
local_pk, relationship_local_pk = relationship.synchronize_pairs[0]
remote_pk, relationship_remote_pk = relationship.secondary_synchronize_pairs[0]
assert local_pk == pk
all_children_ids = set()
pk_to_children_ids = defaultdict(set)
for connection in await self.query(
relationship.secondary.name.replace('_', '.', 1),
[[relationship_local_pk.name, 'in', pk_values]],
{'relationships': False},
):
child_id = connection[relationship_remote_pk.name]
all_children_ids.add(child_id)
pk_to_children_ids[connection[relationship_local_pk.name]].add(child_id)
all_children = {}
if all_children_ids:
for child in await self.query(
relationship.target.name.replace('_', '.', 1),
[[remote_pk.name, 'in', all_children_ids]],
{'relationships': False},
):
all_children[child[remote_pk.name]] = child
for i, row in enumerate(rows):
relationships[i][relationship_name] = [
all_children[child_id]
for child_id in pk_to_children_ids[row[pk]]
if child_id in all_children
]
return relationships
| 10,722 | Python | .py | 229 | 34.375546 | 113 | 0.565509 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,934 | connection.py | truenas_middleware/src/middlewared/middlewared/plugins/datastore/connection.py | from concurrent.futures import ThreadPoolExecutor
import re
import shutil
import time
from sqlalchemy import create_engine
from middlewared.service import private, Service
from middlewared.plugins.config import FREENAS_DATABASE
thread_pool = ThreadPoolExecutor(1)
def regexp(expr, item):
if item is None:
return False
reg = re.compile(expr, re.I)
return reg.search(item) is not None
class DatastoreService(Service):
class Config:
private = True
thread_pool = thread_pool
engine = None
connection = None
@private
def handle_constraint_violation(self, row, journal):
self.logger.warning("Row %d in table %s violates foreign key constraint on table %s.",
row.rowid, row.table, row.parent)
if row.table == "directoryservice_idmap_domain" and row.rowid <= 5 and row.parent == "system_certificate":
"""
In commit 5265c8c49f8 a migration was written to use AUTOINCREMENT to ensure id uniqueness.
In commit 85f5b97ec9a the aforementioned migration was modified to also fix potential constraint
violation in this field.
Since there was a gap between these two commits, it is impossible that the original
migration without the subsequent revision and therefore the user's DB still contains the original
constraint violation. This table entry is critical to the proper function of the AD
plugin and since it is user-configurable, deletion cannot be repaired without manual
intervention.
"""
self.logger.warning("Removing certificate id for default idmap table entry.")
self.connection.execute(f"UPDATE {row.table} SET idmap_domain_certificate_id = NULL WHERE rowid = {row.rowid}")
return
self.logger.warning("Deleting row %d from table %s.", row.rowid, row.table)
op = f"DELETE FROM {row.table} WHERE rowid = {row.rowid}"
self.connection.execute(op)
journal.write(f'{op}\n')
@private
def setup(self):
if self.engine is not None:
self.engine.dispose()
if self.connection is not None:
self.connection.close()
self.engine = create_engine(f'sqlite:///{FREENAS_DATABASE}')
self.connection = self.engine.connect()
self.connection.connection.create_function("REGEXP", 2, regexp)
self.connection.connection.execute("PRAGMA foreign_keys=ON")
if (constraint_violations := self.connection.execute("PRAGMA foreign_key_check").fetchall()):
ts = int(time.time())
shutil.copy(FREENAS_DATABASE, f'{FREENAS_DATABASE}_{ts}.bak')
with open(f'{FREENAS_DATABASE}_{ts}_journal.txt', 'w') as f:
for row in constraint_violations:
self.handle_constraint_violation(row, f)
self.connection.connection.execute("VACUUM")
@private
def execute(self, *args):
return self.connection.execute(*args)
@private
def execute_write(self, stmt, options=None):
options = options or {}
options.setdefault('ha_sync', True)
options.setdefault('return_last_insert_rowid', False)
compiled = stmt.compile(self.engine, compile_kwargs={"render_postcompile": True})
sql = compiled.string
binds = []
for param in compiled.positiontup:
bind = compiled.binds[param]
value = bind.value
bind_processor = compiled.binds[param].type.bind_processor(self.engine.dialect)
if bind_processor:
binds.append(bind_processor(value))
else:
binds.append(value)
result = self.connection.execute(sql, binds)
self.middleware.call_hook_inline("datastore.post_execute_write", sql, binds, options)
if options['return_last_insert_rowid']:
return self.fetchall("SELECT last_insert_rowid()")[0][0]
return result
@private
def fetchall(self, query, params=None):
cursor = self.connection.execute(query, params or [])
try:
return cursor.fetchall()
finally:
cursor.close()
| 4,240 | Python | .py | 89 | 38.101124 | 123 | 0.657441 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,935 | schema.py | truenas_middleware/src/middlewared/middlewared/plugins/datastore/schema.py | from sqlalchemy import inspect
from middlewared.sqlalchemy import Model
class SchemaMixin:
def _get_table(self, name):
return Model.metadata.tables[name.replace('.', '_').lower()]
def _get_pk(self, table):
return [col for col in table.c if col.primary_key][0]
def _get_col(self, table, name, prefix=None):
col = self._get_col_by_django_name(table, name)
if col is not None:
return col
if prefix:
col = self._get_col_by_django_name(table, prefix + name)
if col is not None:
return col
raise KeyError(name)
def _get_col_by_django_name(self, table, name):
if name in table.c:
return table.c[name]
if f'{name}_id' in table.c:
return table.c[f'{name}_id']
def _get_relationships(self, table):
for model in Model.registry._class_registry.values():
if hasattr(model, "__tablename__") and model.__tablename__ == table.name:
break
else:
raise RuntimeError("Could not find model for table %s" % table.name)
return inspect(model).relationships
| 1,168 | Python | .py | 28 | 32.321429 | 85 | 0.60496 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,936 | enums.py | truenas_middleware/src/middlewared/middlewared/plugins/truecommand/enums.py | import enum
class Status(enum.Enum):
CONNECTED = 'CONNECTED'
CONNECTING = 'CONNECTING'
DISABLED = 'DISABLED'
FAILED = 'FAILED'
# In the database we save 3 states, CONNECTED/DISABLED/FAILED
# Connected is saved when portal has approved an api key
# Disabled is saved when TC service is disabled
# Failed is saved when portal revokes an api key
#
# We report CONNECTED to the user when we have an active wireguard
# connection with TC which is not failing a health check.
# If portal has not approved the api key yet but has registered it
# we report CONNECTING to the user.
# Connecting is also reported when wireguard connection fails health
# check
class StatusReason(enum.Enum):
CONNECTED = 'Truecommand service is connected.'
CONNECTING = 'Pending Confirmation From iX Portal for Truecommand API Key.'
DISABLED = 'Truecommand service is disabled.'
FAILED = 'Truecommand API Key Disabled by iX Portal.'
class PortalResponseState(enum.Enum):
ACTIVE = 'ACTIVE'
FAILED = 'FAILED' # This is not given by the API but is our internal check
PENDING = 'PENDING'
UNKNOWN = 'UNKNOWN'
| 1,132 | Python | .py | 27 | 38.888889 | 79 | 0.758652 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,937 | wireguard.py | truenas_middleware/src/middlewared/middlewared/plugins/truecommand/wireguard.py | import asyncio
import re
import time
from middlewared.schema import Bool, Dict, IPAddr, returns, Str
from middlewared.service import accepts, CallError, periodic, private, Service
from middlewared.utils import run
from .enums import Status
from .utils import WIREGUARD_INTERFACE_NAME
HEALTH_CHECK_SECONDS = 1800
WIREGUARD_HEALTH_RE = re.compile(r'=\s*(.*)')
class TruecommandService(Service):
@private
async def generate_wg_keys(self):
cp = await run(['wg', 'genkey'], check=False)
private_key = cp.stdout
if cp.returncode:
raise CallError(
f'Failed to generate key for wireguard with exit code ({cp.returncode}): {cp.stderr.decode()}'
)
cp = await run(['wg', 'pubkey'], input=private_key, check=False)
public_key = cp.stdout
if cp.returncode:
raise CallError(
f'Failed to generate public key for wireguard with exit code ({cp.returncode}): {cp.stderr.decode()}'
)
return {'wg_public_key': public_key.decode().strip(), 'wg_private_key': private_key.decode().strip()}
@private
@periodic(HEALTH_CHECK_SECONDS, run_on_start=False)
async def health_check(self):
# The purpose of this method is to ensure that the wireguard connection
# is active. If wireguard service is running, we want to make sure that the last
# handshake we have had was under 30 minutes.
if not await self.middleware.call('failover.is_single_master_node') or Status(
(await self.middleware.call('datastore.config', 'system.truecommand'))['api_key_state']
) != Status.CONNECTED:
await self.middleware.call('alert.oneshot_delete', 'TruecommandConnectionHealth', None)
return
if not await self.wireguard_connection_health():
# Stop wireguard if it's running and start polling the api to see what's up
await self.middleware.call('truecommand.set_status', Status.CONNECTING.value)
await self.stop_truecommand_service()
await self.middleware.call('alert.oneshot_create', 'TruecommandConnectionHealth', None)
await self.middleware.call('truecommand.poll_api_for_status')
else:
# Mark the connection as connected - we do this for just in case user never called
# truecommand.config and is in WAITING state right now assuming that an event will be
# raised when TC finally connects
await self.middleware.call('truecommand.set_status', Status.CONNECTED.value)
await self.middleware.call('truecommand.dismiss_alerts', False, True)
@private
async def wireguard_connection_health(self):
"""
Returns true if we are connected and wireguard connection has have had a handshake within last
HEALTH_CHECK_SECONDS
"""
health_error = not (await self.middleware.call('service.started', 'truecommand'))
if not health_error:
cp = await run(['wg', 'show', WIREGUARD_INTERFACE_NAME, 'latest-handshakes'], encoding='utf8', check=False)
if cp.returncode:
health_error = True
else:
timestamp = WIREGUARD_HEALTH_RE.findall(cp.stdout)
if not timestamp:
health_error = True
else:
timestamp = timestamp[0].strip()
if timestamp == '0' or not timestamp.isdigit() or (
int(time.time()) - int(timestamp)
) > HEALTH_CHECK_SECONDS:
# We never established handshake with TC if timestamp is 0, otherwise it's been more
# then 30 minutes, error out please
health_error = True
else:
# It's possible that IP of TC changed and we just need to get up to speed with the
# new IP. So if we have a correct handshake, we should ping the TC IP to see if it's
# still reachable
config = await self.middleware.call('datastore.config', 'system.truecommand')
cp = await run([
'ping', '-w', '5', '-q', str(config['remote_address'].split('/', 1)[0])
], check=False)
if cp.returncode:
# We have return code of 0 if we heard at least one response from the host
health_error = True
return not health_error
@accepts(roles=['TRUECOMMAND_READ'])
@returns(Dict(
'truecommand_connected',
Bool('connected', required=True),
IPAddr('truecommand_ip', null=True, required=True),
Str('truecommand_url', null=True, required=True),
Str('status', required=True),
Str('status_reason', required=True),
))
async def info(self):
"""
Returns information which shows if system has an authenticated api key
and has initiated a VPN connection with TrueCommand.
"""
tc_config = await self.middleware.call('truecommand.config')
connected = Status(tc_config['status']) == Status.CONNECTED
return {
'connected': connected,
'truecommand_ip': tc_config['remote_ip_address'] if connected else None,
'truecommand_url': tc_config['remote_url'] if connected else None,
'status': tc_config['status'],
'status_reason': tc_config['status_reason'],
}
@private
async def start_truecommand_service(self):
config = await self.middleware.call('datastore.config', 'system.truecommand')
if config['enabled'] and await self.middleware.call('failover.is_single_master_node'):
if Status(config['api_key_state']) == Status.CONNECTED and all(
config[k] for k in ('wg_private_key', 'remote_address', 'endpoint', 'tc_public_key', 'wg_address')
):
await self.middleware.call('service.start', 'truecommand', {'ha_propagate': False})
await self.middleware.call('service.reload', 'http', {'ha_propagate': False})
asyncio.get_event_loop().call_later(
30, # 30 seconds is enough time to initiate a health check to see if the connection is alive
lambda: self.middleware.create_task(self.middleware.call('truecommand.health_check')),
)
else:
# start polling iX Portal to see what's up and why we don't have these values set
# This can happen in instances where system was polling and then was rebooted,
# So we should continue polling in this case
await self.middleware.call('truecommand.poll_api_for_status')
@private
async def stop_truecommand_service(self):
await self.middleware.call('service.reload', 'http')
if await self.middleware.call('service.started', 'truecommand'):
await self.middleware.call('service.stop', 'truecommand')
| 7,084 | Python | .py | 130 | 42.769231 | 119 | 0.620317 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,938 | __init__.py | truenas_middleware/src/middlewared/middlewared/plugins/truecommand/__init__.py | from .enums import Status
async def _event_system_ready(middleware, event_type, args):
if await middleware.call('failover.licensed'):
return
await middleware.call('truecommand.start_truecommand_service')
async def setup(middleware):
await middleware.call('truecommand.config')
middleware.event_register(
'truecommand.config',
'Sent on TrueCommand configuration changes.',
roles=['READONLY_ADMIN']
)
status = Status((await middleware.call('datastore.config', 'system.truecommand'))['api_key_state'])
if status == Status.CONNECTED:
status = Status.CONNECTING
await middleware.call('truecommand.set_status', status.value)
middleware.event_subscribe('system.ready', _event_system_ready)
if await middleware.call('system.ready'):
if not await middleware.call('failover.licensed'):
middleware.create_task(middleware.call('truecommand.start_truecommand_service'))
| 965 | Python | .py | 20 | 41.85 | 103 | 0.723586 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,939 | portal.py | truenas_middleware/src/middlewared/middlewared/plugins/truecommand/portal.py | import asyncio
from middlewared.service import CallError, job, private, Service
from .connection import TruecommandAPIMixin
from .enums import PortalResponseState, Status
class TruecommandService(Service, TruecommandAPIMixin):
POLLING_GAP_MINUTES = 5
@private
@job(lock='poll_ix_portal_api_truecommand', lock_queue_size=1)
async def poll_api_for_status(self, job):
await self.middleware.call('truecommand.set_status', Status.CONNECTING.value)
config = await self.middleware.call('datastore.config', 'system.truecommand')
while config['enabled']:
try:
status = await self.poll_once(config)
except asyncio.CancelledError:
raise
except Exception as e:
status = {
'error': f'Failed to poll for status of API Key: {e}',
'state': PortalResponseState.FAILED,
}
if status['state'] == PortalResponseState.ACTIVE:
await self.middleware.call(
'datastore.update',
'system.truecommand',
config['id'], {
'tc_public_key': status['tc_pubkey'],
'wg_address': status['wg_netaddr'],
'remote_address': status['tc_wg_netaddr'],
'endpoint': status['wg_accesspoint'],
'api_key_state': Status.CONNECTED.value,
}
)
self.middleware.send_event(
'truecommand.config', 'CHANGED', fields=(await self.middleware.call('truecommand.event_config'))
)
if status.get('tc_state') == 'running':
await self.middleware.call('truecommand.dismiss_alerts')
await self.middleware.call('truecommand.start_truecommand_service')
else:
await self.middleware.call('truecommand.dismiss_alerts', True)
await self.middleware.call('alert.oneshot_create', 'TruecommandContainerHealth', None)
asyncio.get_event_loop().call_later(
self.POLLING_GAP_MINUTES * 60,
lambda: self.middleware.create_task(
self.middleware.call('truecommand.start_truecommand_service')
),
)
break
elif status['state'] == PortalResponseState.UNKNOWN:
# We are not going to poll anymore as this definitely means
# that iX Portal has deactivated this key and is not going to work with this
# api key again
# Clear TC pending alerts if any, what only matters now is that key has been disabled by portal
await self.middleware.call('truecommand.dismiss_alerts', True)
await self.middleware.call(
'alert.oneshot_create', 'TruecommandConnectionDisabled', {
'error': status['error'],
}
)
self.middleware.logger.debug('iX Portal has disabled API Key: %s', status['error'])
await self.middleware.call('truecommand.set_status', Status.FAILED.value)
# Let's remove TC's address if they are there and if the api key state was enabled
# Also let's make sure truecommand service is not running, it shouldn't be but still enforce it
await self.middleware.call(
'datastore.update',
'system.truecommand',
config['id'], {
**{k: None for k in ('tc_public_key', 'remote_address', 'endpoint', 'wg_address')},
'api_key_state': Status.FAILED.value,
}
)
self.middleware.send_event(
'truecommand.config', 'CHANGED', fields=(await self.middleware.call('truecommand.event_config'))
)
await self.middleware.call('truecommand.stop_truecommand_service')
break
else:
await self.middleware.call(
'alert.oneshot_create', 'TruecommandConnectionPending', {
'error': status['error']
}
)
self.middleware.logger.debug(
'Pending Confirmation From iX Portal for Truecommand API Key: %s', status['error']
)
await asyncio.sleep(self.POLLING_GAP_MINUTES * 60)
config = await self.middleware.call('datastore.config', 'system.truecommand')
@private
async def poll_once(self, config):
response = await self._post_call(payload={
'action': 'status-wireguard-key',
'apikey': config['api_key'],
'nas_pubkey': config['wg_public_key'],
})
if response['error']:
response.update({
'state': PortalResponseState.FAILED.value,
'error': f'Failed to poll for status of API Key: {response["error"]}'
})
else:
response = response['response']
if 'state' not in response or response['state'].upper() not in PortalResponseState.__members__:
response.update({
'state': PortalResponseState.FAILED.value,
'error': 'Malformed response returned by iX Portal'
})
else:
response['error'] = None
status_dict = {'error': response.pop('error'), 'state': PortalResponseState(response.pop('state').upper())}
# There are 3 states here which the api can give us - active, pending, unknown
if status_dict['state'] == PortalResponseState.ACTIVE:
if any(
k not in response for k in
('tc_pubkey', 'wg_netaddr', 'wg_accesspoint', 'nas_pubkey', 'tc_wg_netaddr')
):
status_dict.update({
'state': PortalResponseState.FAILED,
'error': f'Malformed ACTIVE response received by iX Portal with {", ".join(response)} keys'
})
elif response['nas_pubkey'] != config['wg_public_key']:
status_dict.update({
'state': PortalResponseState.FAILED,
'error': f'Public key "{response["nas_pubkey"]}" of TrueNAS from iX Portal does not '
f'match TrueNAS Config public key "{config["wg_public_key"]}".'
})
else:
status_dict.update(response)
elif status_dict['state'] == PortalResponseState.UNKNOWN:
status_dict['error'] = response.get('details') or 'API Key has been disabled by the iX Portal'
elif status_dict['state'] == PortalResponseState.PENDING:
# This is pending now
status_dict['error'] = 'Waiting for iX Portal to confirm API Key'
return status_dict
@private
async def register_with_portal(self, config):
# We are going to register the api key with the portal and if it fails,
# We are going to fail hard and fast without saving any information in the database if we fail to
# register for whatever reason.
response = await self._post_call(payload={
'action': 'add-truecommand-wg-key',
'apikey': config['api_key'],
'nas_pubkey': config['wg_public_key'],
'hostname': await self.middleware.call('system.hostname'),
'sysversion': await self.middleware.call('system.version'),
})
if response['error']:
raise CallError(f'Failed to register API Key with portal: {response["error"]}')
else:
response = response['response']
if 'state' not in response or str(response['state']).lower() not in ('pending', 'duplicate', 'denied'):
raise CallError(f'Unknown response got from iX portal API: {response}')
elif response['state'].lower() == 'denied':
# Discussed with Ken and he said it's safe to assume that if we get denied
# we should assume the API Key is invalid
raise CallError('The provided API Key is invalid.')
| 8,391 | Python | .py | 156 | 38.269231 | 116 | 0.558369 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,940 | connection.py | truenas_middleware/src/middlewared/middlewared/plugins/truecommand/connection.py | import aiohttp
import async_timeout
import asyncio
import json
class TruecommandAPIMixin:
PORTAL_URI = 'https://portal.ixsystems.com/api'
async def _post_call(self, options=None, payload=None):
if not await self.middleware.call('network.general.can_perform_activity', 'truecommand'):
return {'error': 'Network activity denied for TrueCommand service'}
options = options or {}
timeout = options.get('timeout', 15)
response = {'error': None, 'response': {}}
try:
async with async_timeout.timeout(timeout):
async with aiohttp.ClientSession(
raise_for_status=True, trust_env=True,
) as session:
req = await session.post(
self.PORTAL_URI,
data=json.dumps(payload or {}),
headers={'Content-type': 'application/json'},
)
except asyncio.TimeoutError:
response['error'] = f'Unable to connect with iX portal in {timeout} seconds.'
except aiohttp.ClientResponseError as e:
response['error'] = f'Error Code ({req.status}): {e}'
else:
response['response'] = await req.json()
return response
async def setup(middleware):
await middleware.call('network.general.register_activity', 'truecommand', 'TrueCommand iX portal')
| 1,421 | Python | .py | 31 | 34.677419 | 102 | 0.605206 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,941 | update.py | truenas_middleware/src/middlewared/middlewared/plugins/truecommand/update.py | import asyncio
import middlewared.sqlalchemy as sa
from middlewared.schema import Bool, Dict, Int, IPAddr, Password, Str
from middlewared.service import accepts, ConfigService, private, ValidationErrors
from middlewared.validators import Range
from .enums import Status, StatusReason
CONNECTING_STATUS_REASON = 'Waiting for connection from Truecommand.'
TRUECOMMAND_UPDATE_LOCK = asyncio.Lock()
class TrueCommandModel(sa.Model):
__tablename__ = 'system_truecommand'
id = sa.Column(sa.Integer(), primary_key=True)
api_key = sa.Column(sa.EncryptedText(), default=None, nullable=True)
api_key_state = sa.Column(sa.String(128), default='DISABLED', nullable=True)
wg_public_key = sa.Column(sa.String(255), default=None, nullable=True)
wg_private_key = sa.Column(sa.EncryptedText(), default=None, nullable=True)
wg_address = sa.Column(sa.String(255), default=None, nullable=True)
tc_public_key = sa.Column(sa.String(255), default=None, nullable=True)
endpoint = sa.Column(sa.String(255), default=None, nullable=True)
remote_address = sa.Column(sa.String(255), default=None, nullable=True)
enabled = sa.Column(sa.Boolean(), default=False)
class TruecommandService(ConfigService):
STATUS = Status.DISABLED
class Config:
datastore = 'system.truecommand'
datastore_extend = 'truecommand.tc_extend'
cli_namespace = 'system.truecommand'
role_prefix = 'TRUECOMMAND'
ENTRY = Dict(
'truecommand_entry',
Int('id', required=True),
Password('api_key', required=True, null=True),
Str('status', required=True, enum=[s.value for s in Status]),
Str('status_reason', required=True, enum=[s.value for s in StatusReason] + [CONNECTING_STATUS_REASON]),
Str('remote_url', required=True, null=True),
IPAddr('remote_ip_address', required=True, null=True),
Bool('enabled', required=True),
)
@private
async def tc_extend(self, config):
for key in ('wg_public_key', 'wg_private_key', 'tc_public_key', 'endpoint', 'wg_address'):
config.pop(key)
# In database we will have CONNECTED when the portal has approved the key
# Connecting basically represents 2 phases - where we wait for TC to connect to
# NAS and where we are waiting to hear back from the portal after registration
status_reason = None
if await self.middleware.call('failover.is_single_master_node'):
if Status(config.pop('api_key_state')) == self.STATUS.CONNECTED and self.STATUS == Status.CONNECTING:
if await self.middleware.call('truecommand.wireguard_connection_health'):
await self.set_status(Status.CONNECTED.value)
else:
status_reason = CONNECTING_STATUS_REASON
else:
if self.STATUS != Status.DISABLED:
await self.set_status(Status.DISABLED.value)
status_reason = 'Truecommand service is disabled on standby controller'
config['remote_ip_address'] = config['remote_url'] = config.pop('remote_address')
if config['remote_ip_address']:
config['remote_ip_address'] = config.pop('remote_ip_address').split('/', 1)[0]
config['remote_url'] = f'http://{config["remote_ip_address"]}/'
config.update({
'status': self.STATUS.value,
'status_reason': status_reason or StatusReason.__members__[self.STATUS.value].value
})
return config
@accepts(
Dict(
'truecommand_update',
Bool('enabled'),
Password('api_key', null=True, validators=[Range(min_=16, max_=16)]),
)
)
async def do_update(self, data):
"""
Update Truecommand service settings.
`api_key` is a valid API key generated by iX Portal.
"""
# We have following cases worth mentioning wrt updating TC credentials
# 1) User enters API Key and enables the service
# 2) User disables the service
# 3) User changes API Key and service is enabled
#
# Another point to document is how we intend to poll, we are going to send a request to iX Portal
# and if it returns active state with the data we require for wireguard connection, we mark the
# API Key as connected. As long as we keep polling iX portal, we are going to be in a connecting state,
# no matter what errors we are getting from the polling bits. The failure case is when iX Portal sends
# us the state "unknown", which after confirming with Ken means that the portal has revoked the api key
# in question and we no longer use it. In this case we are going to stop polling and mark the connection
# as failed.
#
# For case (1), when user enters API key and enables the service, we are first going to generate wg keys
# if they haven't been generated already. Then we are going to register the new api key with ix portal.
# Once done, we are going to start polling. If polling gets us in success state, we are going to start
# wireguard connection, for the other case, we are going to emit an event with truecommand failure status.
#
# For case (2), if the service was running previously, we do nothing except for stopping wireguard and
# ensuring it is not started at boot as well. The connection details remain secure in the database.
#
# For case (3), everything is similar to how we handle case (1), however we are going to stop wireguard
# if it was running with previous api key credentials.
async with TRUECOMMAND_UPDATE_LOCK:
old = await self.middleware.call('datastore.config', self._config.datastore)
new = old.copy()
new.update(data)
verrors = ValidationErrors()
if new['enabled'] and not new['api_key']:
verrors.add(
'truecommand_update.api_key',
'API Key must be provided when Truecommand service is enabled.'
)
verrors.check()
if all(old[k] == new[k] for k in ('enabled', 'api_key')):
# Nothing changed
return await self.config()
polling_jobs = await self.middleware.call(
'core.get_jobs', [
['method', '=', 'truecommand.poll_api_for_status'], ['state', 'in', ['WAITING', 'RUNNING']]
]
)
for polling_job in polling_jobs:
await self.middleware.call('core.job_abort', polling_job['id'])
await self.set_status(Status.DISABLED.value)
new['api_key_state'] = Status.DISABLED.value
if old['api_key'] != new['api_key']:
new.update({
'remote_address': None,
'endpoint': None,
'tc_public_key': None,
'wg_address': None,
'wg_public_key': None,
'wg_private_key': None,
'api_key_state': Status.DISABLED.value,
})
if new['enabled']:
if not new['wg_public_key'] or not new['wg_private_key']:
new.update(**(await self.middleware.call('truecommand.generate_wg_keys')))
if old['api_key'] != new['api_key']:
await self.middleware.call('truecommand.register_with_portal', new)
# Registration succeeded, we are good to poll now
elif all(
new[k] for k in ('wg_address', 'wg_private_key', 'remote_address', 'endpoint', 'tc_public_key')
):
# Api key hasn't changed and we have wireguard details, let's please start wireguard in this case
await self.set_status(Status.CONNECTING.value)
new['api_key_state'] = Status.CONNECTED.value
await self.dismiss_alerts(True)
await self.middleware.call(
'datastore.update',
self._config.datastore,
old['id'],
new
)
self.middleware.send_event('truecommand.config', 'CHANGED', fields=(await self.event_config()))
# We are going to stop truecommand service with this update anyways as only 2 possible actions
# can happen on update
# 1) Service enabled/disabled
# 2) Api Key changed
await self.middleware.call('truecommand.stop_truecommand_service')
if new['enabled']:
if new['api_key'] != old['api_key'] or any(
not new[k] for k in ('wg_address', 'wg_private_key', 'remote_address', 'endpoint', 'tc_public_key')
):
# We are going to start polling here
await self.middleware.call('truecommand.poll_api_for_status')
else:
# User just enabled the service after disabling it - we have wireguard details and
# we can initiate the connection. If it is not good, health check will fail and we will
# poll iX Portal to see what's up. Let's just start wireguard now
await self.middleware.call('truecommand.start_truecommand_service')
return await self.config()
@private
async def set_status(self, new_status):
assert new_status in Status.__members__
self.STATUS = Status(new_status)
self.middleware.send_event('truecommand.config', 'CHANGED', fields=(await self.event_config()))
@private
async def dismiss_alerts(self, dismiss_health=False, dismiss_health_only=False):
# We do not dismiss health by default because it's possible that the key has not been revoked
# and it's just that TC has not connected to TN in 30 minutes, so we only should dismiss it when
# we update TC service or the health is okay now with the service running or when service is not running
health_alerts = {'TruecommandConnectionHealth', 'TruecommandContainerHealth'}
non_health_alerts = {'TruecommandConnectionDisabled', 'TruecommandConnectionPending'}
if dismiss_health_only:
to_dismiss_alerts = health_alerts
else:
to_dismiss_alerts = health_alerts | non_health_alerts if dismiss_health else non_health_alerts
for klass in to_dismiss_alerts:
await self.middleware.call('alert.oneshot_delete', klass, None)
@private
async def event_config(self):
config = await self.config()
config.pop('api_key', None)
return config
| 10,785 | Python | .py | 192 | 44.546875 | 119 | 0.623851 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,942 | crud.py | truenas_middleware/src/middlewared/middlewared/plugins/replication_/crud.py | from middlewared.schema import accepts, Dataset, Dict, Int, Str
from middlewared.service import item_method, pass_app, Service
class ReplicationService(Service):
@item_method
@accepts(
Int("id"),
Dict(
"replication_restore",
Str("name", required=True),
Dataset("target_dataset", required=True),
strict=True,
),
roles=["REPLICATION_TASK_WRITE"],
)
@pass_app(require=True, rest=True)
async def restore(self, app, id_, data):
"""
Create the opposite of replication task `id` (PULL if it was PUSH and vice versa).
"""
replication_task = await self.middleware.call("replication.query", [["id", "=", id_]], {"get": True})
if replication_task["direction"] == "PUSH":
data["direction"] = "PULL"
if replication_task["name_regex"]:
data["name_regex"] = replication_task["name_regex"]
else:
data["naming_schema"] = list(
{pst["naming_schema"] for pst in replication_task["periodic_snapshot_tasks"]} |
set(replication_task["also_include_naming_schema"])
)
else:
data["direction"] = "PUSH"
if replication_task["name_regex"]:
data["name_regex"] = replication_task["name_regex"]
else:
data["also_include_naming_schema"] = replication_task["naming_schema"]
data["source_datasets"], _ = (
await self.middleware.call("zettarepl.reverse_source_target_datasets",
replication_task["source_datasets"],
replication_task["target_dataset"])
)
for k in ["transport", "ssh_credentials", "netcat_active_side", "netcat_active_side_listen_address",
"netcat_active_side_port_min", "netcat_active_side_port_max", "netcat_passive_side_connect_address",
"recursive", "properties", "replicate", "compression", "large_block", "embed", "compressed",
"retries"]:
data[k] = replication_task[k]
if data["ssh_credentials"] is not None:
data["ssh_credentials"] = data["ssh_credentials"]["id"]
data["retention_policy"] = "NONE"
data["auto"] = False
data["enabled"] = False # Do not run it automatically
return await self.middleware.call("replication.create", data, app=app)
| 2,513 | Python | .py | 51 | 36.882353 | 118 | 0.569099 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,943 | config.py | truenas_middleware/src/middlewared/middlewared/plugins/replication_/config.py | from middlewared.schema import accepts, Dict, Int
from middlewared.service import ConfigService
import middlewared.sqlalchemy as sa
from middlewared.validators import Range
class ReplicationConfigModel(sa.Model):
__tablename__ = "storage_replication_config"
id = sa.Column(sa.Integer(), primary_key=True)
max_parallel_replication_tasks = sa.Column(sa.Integer(), nullable=True, default=5)
class ReplicationConfigService(ConfigService):
class Config:
namespace = "replication.config"
datastore = "storage.replication_config"
cli_namespace = "task.replication.config"
role_prefix = "REPLICATION_TASK_CONFIG"
@accepts(
Dict(
"replication_config_update",
Int("max_parallel_replication_tasks", validators=[Range(min_=1)], null=True),
update=True,
)
)
async def do_update(self, data):
"""
`max_parallel_replication_tasks` represents a maximum number of parallel replication tasks running.
"""
old_config = await self.config()
config = old_config.copy()
config.update(data)
await self.middleware.call(
"datastore.update",
self._config.datastore,
config['id'],
config
)
update = {}
for k in config:
if config[k] != old_config[k]:
update[k] = config[k]
if update:
await self.middleware.call("zettarepl.update_config", update)
return await self.config()
| 1,550 | Python | .py | 41 | 29.317073 | 107 | 0.639519 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,944 | cert_attachments.py | truenas_middleware/src/middlewared/middlewared/plugins/ldap_/cert_attachments.py | from middlewared.common.attachment.certificate import CertificateServiceAttachmentDelegate
class LdapCertificateAttachmentDelegate(CertificateServiceAttachmentDelegate):
HUMAN_NAME = 'LDAP Service'
SERVICE = 'ldap'
SERVICE_VERB = 'start'
async def setup(middleware):
await middleware.call('certificate.register_attachment_delegate', LdapCertificateAttachmentDelegate(middleware))
| 401 | Python | .py | 7 | 53.285714 | 116 | 0.840617 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,945 | ldap_client.py | truenas_middleware/src/middlewared/middlewared/plugins/ldap_/ldap_client.py | import copy
import threading
import ldap as pyldap
from ldap.controls import SimplePagedResultsControl
from middlewared.plugins.directoryservices import SSL
client_lock = threading.RLock()
def ldap_client_lock(fn):
def inner(*args, **kwargs):
with client_lock:
return fn(*args, **kwargs)
return inner
class LDAPClient:
pagesize = 1024
_handle = None
ldap_parameters = None
def __init__(self):
pyldap.protocol_version = pyldap.VERSION3
pyldap.set_option(pyldap.OPT_REFERRALS, 0)
def __setup_ssl(self, data):
if SSL(data['security']['ssl']) == SSL.NOSSL:
return
cert = data['security']['client_certificate']
if cert:
pyldap.set_option(
pyldap.OPT_X_TLS_CERTFILE,
f"/etc/certificates/{cert}.crt"
)
pyldap.set_option(
pyldap.OPT_X_TLS_KEYFILE,
f"/etc/certificates/{cert}.key"
)
pyldap.set_option(
pyldap.OPT_X_TLS_CACERTFILE,
'/etc/ssl/certs/ca-certificates.crt'
)
if data['security']['validate_certificates']:
pyldap.set_option(
pyldap.OPT_X_TLS_REQUIRE_CERT,
pyldap.OPT_X_TLS_DEMAND
)
else:
pyldap.set_option(
pyldap.OPT_X_TLS_REQUIRE_CERT,
pyldap.OPT_X_TLS_ALLOW
)
pyldap.set_option(pyldap.OPT_X_TLS_NEWCTX, 0)
def __perform_bind(self, data, uri, raise_error=True):
try:
self._handle = pyldap.initialize(uri)
except Exception:
self._handle = None
if not raise_error:
return False
raise
pyldap.set_option(pyldap.OPT_NETWORK_TIMEOUT, data['options']['dns_timeout'])
self.__setup_ssl(data)
if SSL(data['security']['ssl']) == SSL.USESTARTTLS:
try:
self._handle.start_tls_s()
except Exception:
self._handle = None
if not raise_error:
return False
raise
try:
if data['bind_type'] == 'ANONYMOUS':
bound = self._handle.simple_bind_s()
elif data['bind_type'] == 'EXTERNAL':
bound = self._handle.sasl_non_interactive_bind_s('EXTERNAL')
elif data['bind_type'] == 'GSSAPI':
self._handle.set_option(pyldap.OPT_X_SASL_NOCANON, 1)
self._handle.sasl_gssapi_bind_s()
bound = True
else:
bound = self._handle.simple_bind_s(
data['credentials']['binddn'],
data['credentials']['bindpw']
)
except Exception:
self._handle = None
if not raise_error:
return False
raise
return bound
@ldap_client_lock
def open(self, data, force_new=False):
"""
We can only intialize a single host. In this case,
we iterate through a list of hosts until we get one that
works and then use that to set our LDAP handle.
SASL GSSAPI bind only succeeds when DNS reverse lookup zone
is correctly populated. Fall through to simple bind if this
fails.
"""
bound = False
if self._handle and self.ldap_parameters == data and not force_new:
return
elif self._handle:
self.close()
self._handle = None
if not data['uri_list']:
raise ValueError("No URIs specified")
saved_error = None
for server in data['uri_list']:
try:
bound = self.__perform_bind(data, server)
except Exception as e:
saved_error = e
bound = False
continue
if bound:
break
if not bound:
self.handle = None
if saved_error:
raise saved_error
else:
raise RuntimeError(f"Failed to bind to URIs: {data['uri_list']}")
self.ldap_parameters = copy.deepcopy(data)
return
@ldap_client_lock
def close(self):
if self._handle:
self._handle.unbind()
self._handle = None
self.ldap_parameters = None
@ldap_client_lock
def search(self, ldap_config, basedn='', scope=pyldap.SCOPE_SUBTREE, filterstr='', sizelimit=0):
self.open(ldap_config)
result = []
clientctrls = None
paged = SimplePagedResultsControl(
criticality=False,
size=self.pagesize,
cookie=''
)
paged_ctrls = {SimplePagedResultsControl.controlType: SimplePagedResultsControl}
retry = True
page = 0
while True:
serverctrls = [paged]
try:
id_ = self._handle.search_ext(
basedn,
scope,
filterstr=filterstr,
attrlist=None,
attrsonly=0,
serverctrls=serverctrls,
clientctrls=clientctrls,
timeout=ldap_config['options']['timeout'],
sizelimit=sizelimit
)
(rtype, rdata, rmsgid, serverctrls) = self._handle.result3(
id_, resp_ctrl_classes=paged_ctrls
)
except Exception:
# our session may have died, try to re-open one time before failing.
if not retry:
raise
self.open(ldap_config, True)
retry = False
continue
result.extend(rdata)
paged.size = 0
paged.cookie = cookie = None
for sc in serverctrls:
if sc.controlType == SimplePagedResultsControl.controlType:
cookie = sc.cookie
if cookie:
paged.cookie = cookie
paged.size = self.pagesize
break
if not cookie:
break
page += 1
return result
LdapClient = LDAPClient()
| 6,355 | Python | .py | 178 | 22.949438 | 100 | 0.521604 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,946 | constants.py | truenas_middleware/src/middlewared/middlewared/plugins/ldap_/constants.py | from middlewared.schema import Dict, Str, LDAP_DN
# Different server_types that we can auto-detect
SERVER_TYPE_ACTIVE_DIRECTORY = 'ACTIVE_DIRECTORY'
SERVER_TYPE_FREEIPA = 'FREEIPA'
SERVER_TYPE_GENERIC = 'GENERIC'
SERVER_TYPE_OPENLDAP = 'OPENLDAP'
# keys for search_bases in our LDAP plugin schema
SEARCH_BASE_USER = 'base_user'
SEARCH_BASE_GROUP = 'base_group'
SEARCH_BASE_NETGROUP = 'base_netgroup'
# keys for `passwd` attribute map
ATTR_USER_OBJ = 'user_object_class'
ATTR_USER_NAME = 'user_name'
ATTR_USER_UID = 'user_uid'
ATTR_USER_GID = 'user_gid'
ATTR_USER_GECOS = 'user_gecos'
ATTR_USER_HOMEDIR = 'user_home_directory'
ATTR_USER_SHELL = 'user_shell'
# keys for `shadow` attribute map
ATTR_SHADOW_OBJ = 'shadow_object_class'
ATTR_SHADOW_LAST_CHANGE = 'shadow_last_change'
ATTR_SHADOW_MIN = 'shadow_min'
ATTR_SHADOW_MAX = 'shadow_max'
ATTR_SHADOW_WARNING = 'shadow_warning'
ATTR_SHADOW_INACTIVE = 'shadow_inactive'
ATTR_SHADOW_EXPIRE = 'shadow_expire'
# keys for `group` attribute map
ATTR_GROUP_OBJ = 'group_object_class'
ATTR_GROUP_GID = 'group_gid'
ATTR_GROUP_MEMBER = 'group_member'
# keys for `netgroup` attribute map
ATTR_NETGROUP_OBJ = 'netgroup_object_class'
ATTR_NETGROUP_MEMBER = 'netgroup_member'
ATTR_NETGROUP_TRIPLE = 'netgroup_triple'
LDAP_SEARCH_BASE_KEYS = (
SEARCH_BASE_USER,
SEARCH_BASE_GROUP,
SEARCH_BASE_NETGROUP,
)
LDAP_PASSWD_MAP_KEYS = (
ATTR_USER_OBJ,
ATTR_USER_NAME,
ATTR_USER_UID,
ATTR_USER_GID,
ATTR_USER_GECOS,
ATTR_USER_HOMEDIR,
ATTR_USER_SHELL,
)
LDAP_SHADOW_MAP_KEYS = (
ATTR_SHADOW_OBJ,
ATTR_SHADOW_LAST_CHANGE,
ATTR_SHADOW_MIN,
ATTR_SHADOW_MAX,
ATTR_SHADOW_WARNING,
ATTR_SHADOW_INACTIVE,
ATTR_SHADOW_EXPIRE,
)
LDAP_GROUP_MAP_KEYS = (
ATTR_GROUP_OBJ,
ATTR_GROUP_GID,
ATTR_GROUP_MEMBER
)
LDAP_NETGROUP_MAP_KEYS = (
ATTR_NETGROUP_OBJ,
ATTR_NETGROUP_MEMBER,
ATTR_NETGROUP_TRIPLE
)
LDAP_MAP_KEYS = set.union(
set(LDAP_PASSWD_MAP_KEYS),
set(LDAP_SHADOW_MAP_KEYS),
set(LDAP_GROUP_MAP_KEYS),
set(LDAP_NETGROUP_MAP_KEYS),
)
LDAP_ADVANCED_KEYS = set(LDAP_SEARCH_BASE_KEYS) | LDAP_MAP_KEYS
# Below are middleware schema configurations for advanced LDAP parameters
LDAP_SEARCH_BASES_SCHEMA_NAME = 'search_bases'
LDAP_SEARCH_BASES_SCHEMA = Dict(
LDAP_SEARCH_BASES_SCHEMA_NAME,
LDAP_DN(SEARCH_BASE_USER, null=True),
LDAP_DN(SEARCH_BASE_GROUP, null=True),
LDAP_DN(SEARCH_BASE_NETGROUP, null=True),
)
LDAP_PASSWD_MAP_SCHEMA_NAME = 'passwd'
LDAP_PASSWD_MAP_SCHEMA = Dict(
LDAP_PASSWD_MAP_SCHEMA_NAME,
Str(ATTR_USER_OBJ, null=True),
Str(ATTR_USER_NAME, null=True),
Str(ATTR_USER_UID, null=True),
Str(ATTR_USER_GID, null=True),
Str(ATTR_USER_GECOS, null=True),
Str(ATTR_USER_HOMEDIR, null=True),
Str(ATTR_USER_SHELL, null=True)
)
LDAP_SHADOW_MAP_SCHEMA_NAME = 'shadow'
LDAP_SHADOW_MAP_SCHEMA = Dict(
LDAP_SHADOW_MAP_SCHEMA_NAME,
Str(ATTR_SHADOW_OBJ, null=True),
Str(ATTR_SHADOW_LAST_CHANGE, null=True),
Str(ATTR_SHADOW_MIN, null=True),
Str(ATTR_SHADOW_MAX, null=True),
Str(ATTR_SHADOW_WARNING, null=True),
Str(ATTR_SHADOW_INACTIVE, null=True),
Str(ATTR_SHADOW_EXPIRE, null=True)
)
LDAP_GROUP_MAP_SCHEMA_NAME = 'group'
LDAP_GROUP_MAP_SCHEMA = Dict(
LDAP_GROUP_MAP_SCHEMA_NAME,
Str(ATTR_GROUP_OBJ, null=True),
Str(ATTR_GROUP_GID, null=True),
Str(ATTR_GROUP_MEMBER, null=True)
)
LDAP_NETGROUP_MAP_SCHEMA_NAME = 'netgroup'
LDAP_NETGROUP_MAP_SCHEMA = Dict(
LDAP_NETGROUP_MAP_SCHEMA_NAME,
Str(ATTR_NETGROUP_OBJ, null=True),
Str(ATTR_NETGROUP_MEMBER, null=True),
Str(ATTR_NETGROUP_TRIPLE, null=True)
)
LDAP_ATTRIBUTE_MAP_SCHEMA_NAME = 'attribute_maps'
LDAP_ATTRIBUTE_MAP_SCHEMA = Dict(
LDAP_ATTRIBUTE_MAP_SCHEMA_NAME,
LDAP_PASSWD_MAP_SCHEMA,
LDAP_SHADOW_MAP_SCHEMA,
LDAP_GROUP_MAP_SCHEMA,
LDAP_NETGROUP_MAP_SCHEMA
)
LDAP_ATTRIBUTE_MAPS = {
LDAP_PASSWD_MAP_SCHEMA_NAME: LDAP_PASSWD_MAP_KEYS,
LDAP_SHADOW_MAP_SCHEMA_NAME: LDAP_SHADOW_MAP_KEYS,
LDAP_GROUP_MAP_SCHEMA_NAME: LDAP_GROUP_MAP_KEYS,
LDAP_NETGROUP_MAP_SCHEMA_NAME: LDAP_NETGROUP_MAP_KEYS
}
| 4,136 | Python | .py | 132 | 28.234848 | 73 | 0.728597 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,947 | utils.py | truenas_middleware/src/middlewared/middlewared/plugins/ldap_/utils.py | from . import constants
def search_base_data_to_params(data):
"""
This method converts the data from our schema key `search_bases` into
nslcd configuration information
"""
search_params = []
for base in constants.LDAP_SEARCH_BASE_KEYS:
if not (value := data.get(base)):
continue
match base:
case constants.SEARCH_BASE_USER:
search_params.append(f'ldap_user_search_base = {value}')
case constants.SEARCH_BASE_GROUP:
search_params.append(f'ldap_group_search_base = {value}')
case constants.SEARCH_BASE_NETGROUP:
search_params.append(f'ldap_netgroup_search_base = {value}')
case _:
raise ValueError(f'{base}: unexpected LDAP search base type')
return search_params
def attribute_maps_data_to_params(data):
"""
This method converts the data from our schema key `attribute_maps` into
nslcd configuration information
"""
map_params = []
for nss_type, keys in constants.LDAP_ATTRIBUTE_MAPS.items():
for key in keys:
if not (value := data.get(nss_type, {}).get(key)):
continue
match key:
# passwd
case constants.ATTR_USER_OBJ:
map_params.append(f'ldap_user_object_class = (objectClass={value})')
case constants.ATTR_USER_NAME:
map_params.append(f'ldap_user_name = {value}')
case constants.ATTR_USER_UID:
map_params.append(f'ldap_user_uid_number = {value}')
case constants.ATTR_USER_GID:
map_params.append(f'ldap_user_gid_number = {value}')
case constants.ATTR_USER_GECOS:
map_params.append(f'ldap_user_gecos = {value}')
case constants.ATTR_USER_HOMEDIR:
map_params.append(f'ldap_user_home_directory = {value}')
case constants.ATTR_USER_SHELL:
map_params.append(f'ldap_user_shell = {value}')
# shadow
case constants.ATTR_SHADOW_OBJ:
# SSSD does not support overriding object class for shadow
map_params.append('')
case constants.ATTR_SHADOW_LAST_CHANGE:
map_params.append(f'ldap_user_shadow_last_change = {value}')
case constants.ATTR_SHADOW_MIN:
map_params.append(f'ldap_user_shadow_min = {value}')
case constants.ATTR_SHADOW_MAX:
map_params.append(f'ldap_user_shadow_max = {value}')
case constants.ATTR_SHADOW_WARNING:
map_params.append(f'ldap_user_shadow_warning = {value}')
case constants.ATTR_SHADOW_INACTIVE:
map_params.append(f'ldap_user_shadow_inactive = {value}')
case constants.ATTR_SHADOW_EXPIRE:
map_params.append(f'ldap_user_shadow_expire = {value}')
# group
case constants.ATTR_GROUP_OBJ:
map_params.append(f'ldap_group_object_class = (objectClass={value})')
case constants.ATTR_GROUP_GID:
map_params.append(f'ldap_group_gid_number = {value}')
case constants.ATTR_GROUP_MEMBER:
map_params.append(f'ldap_group_member = {value}')
# netgroup
case constants.ATTR_NETGROUP_OBJ:
map_params.append(f'ldap_netgroup_object_class = (objectClass={value})')
case constants.ATTR_NETGROUP_MEMBER:
map_params.append(f'ldap_netgroup_member = {value}')
case constants.ATTR_NETGROUP_TRIPLE:
map_params.append(f'ldap_netgroup_triple = {value}')
case _:
raise ValueError(f'{key}: unexpected attribute map parameter for {nss_type}')
return map_params
| 4,030 | Python | .py | 79 | 36.151899 | 97 | 0.565736 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,948 | cert_attachments.py | truenas_middleware/src/middlewared/middlewared/plugins/idmap_/cert_attachments.py | from middlewared.common.attachment.certificate import CertificateCRUDServiceAttachmentDelegate
class IdmapCertificateAttachmentDelegate(CertificateCRUDServiceAttachmentDelegate):
CERT_FILTER_KEY = 'certificate.id'
HUMAN_NAME = 'IDMAP Service'
NAMESPACE = 'idmap'
async def redeploy(self, cert_id):
pass
async def setup(middleware):
await middleware.call('certificate.register_attachment_delegate', IdmapCertificateAttachmentDelegate(middleware))
| 480 | Python | .py | 9 | 48.555556 | 117 | 0.819355 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,949 | idmap_constants.py | truenas_middleware/src/middlewared/middlewared/plugins/idmap_/idmap_constants.py | import enum
import wbclient
# Base datastore `id` value for entries that are returned as user.query and
# group.query results. We add the posix uid / gid to this number to ensure
# that it is unique at a given point in time.
BASE_SYNTHETIC_DATASTORE_ID = 100000000
TRUENAS_IDMAP_MAX = 2147000000 # Maximum ID that we allow winbind / sssd to provide
TRUENAS_IDMAP_DEFAULT_LOW = 90000001
SID_LOCAL_USER_PREFIX = "S-1-22-1-"
SID_LOCAL_GROUP_PREFIX = "S-1-22-2-"
SID_BUILTIN_PREFIX = "S-1-5-32-"
MAX_REQUEST_LENGTH = 100
class IDType(enum.IntEnum):
"""
SSSD and libwbclient use identical values for id types
"""
USER = wbclient.ID_TYPE_UID
GROUP = wbclient.ID_TYPE_GID
BOTH = wbclient.ID_TYPE_BOTH
def wbc_str(self):
# py-libwbclient uses string repesentation of id type
if self == IDType.USER:
val = "UID"
elif self == IDType.GROUP:
val = "GID"
else:
val = "BOTH"
return val
| 983 | Python | .py | 28 | 30.178571 | 84 | 0.678609 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,950 | gencache.py | truenas_middleware/src/middlewared/middlewared/plugins/idmap_/gencache.py | import enum
import wbclient
from middlewared.service import Service
from middlewared.service_exception import MatchNotFound
from middlewared.utils import filter_list
from middlewared.utils.tdb import (
get_tdb_handle,
TDBDataType,
TDBError,
TDBOptions,
TDBPathType,
)
GENCACHE_FILE = '/var/run/samba-lock/gencache.tdb'
GENCACHE_TDB_OPTIONS = TDBOptions(TDBPathType.CUSTOM, TDBDataType.BYTES)
class IDMAPCacheType(enum.Enum):
UID2SID = 'IDMAP/UID2SID'
GID2SID = 'IDMAP/GID2SID'
SID2XID = 'IDMAP/SID2XID'
SID2NAME = 'SID2NAME'
NAME2SID = 'NAME2SID'
def fetch_gencache_entry(key: str) -> str:
with get_tdb_handle(GENCACHE_FILE, GENCACHE_TDB_OPTIONS) as hdl:
return hdl.get(key)
def store_gencache_entry(key: str, val: str) -> None:
with get_tdb_handle(GENCACHE_FILE, GENCACHE_TDB_OPTIONS) as hdl:
return hdl.store(key, val)
def remove_gencache_entry(key: str) -> None:
with get_tdb_handle(GENCACHE_FILE, GENCACHE_TDB_OPTIONS) as hdl:
return hdl.delete(key)
def wipe_gencache_entries() -> None:
""" wrapper around tdb_wipe_all for file """
with get_tdb_handle(GENCACHE_FILE, GENCACHE_TDB_OPTIONS) as hdl:
return hdl.clear()
def flush_gencache_entries() -> None:
"""
delete all keys in gencache
This matches behavior of "net cache flush" which iterates and
deletes entries. If we fail due to corrupt TDB file then it will
be wiped.
"""
with get_tdb_handle(GENCACHE_FILE, GENCACHE_TDB_OPTIONS) as hdl:
for entry in hdl.entries():
hdl.delete(entry['key'])
def query_gencache_entries(filters: list, options: dict) -> list | dict:
with get_tdb_handle(GENCACHE_FILE, GENCACHE_TDB_OPTIONS) as hdl:
return filter_list(hdl.entries(), filters, options)
class Gencache(Service):
class Config:
namespace = 'idmap.gencache'
cli_private = True
private = True
def __construct_gencache_key(self, data):
cache_type = IDMAPCacheType[data['entry_type']]
match cache_type:
case IDMAPCacheType.UID2SID | IDMAPCacheType.GID2SID:
parsed_entry = data['entry']
if not isinstance(parsed_entry, int):
raise ValueError(f'{parsed_entry}: UID/GID must be integer')
case IDMAPCacheType.SID2XID | IDMAPCacheType.SID2NAME:
parsed_entry = data['entry'].upper()
if not wbclient.sid_is_valid(parsed_entry):
raise ValueError(f'{parsed_entry}: not a valid SID')
case IDMAPCacheType.NAME2SID:
parsed_entry = data['entry'].upper()
case _:
raise NotImplementedError(data["entry_type"])
return f'{cache_type.value}/{parsed_entry}'
def get_idmap_cache_entry(self, data):
key = self.__construct_gencache_key(data)
return fetch_gencache_entry(key)
def del_idmap_cache_entry(self, data):
key = self.__construct_gencache_key(data)
try:
return remove_gencache_entry(key)
except RuntimeError as e:
if len(e.args) == 0:
raise e from None
match e.args[0]:
case TDBError.CORRUPT:
wipe_gencache_entries()
raise e from None
case TDBError.NOEXIST:
raise MatchNotFound(key) from None
case _:
raise e from None
def flush(self):
"""
Perform equivalent of `net cache flush`.
"""
try:
flush_gencache_entries()
except RuntimeError as e:
if len(e.args) == 0 or e.args[0] != TDBError.CORRUPT:
raise e from None
wipe_gencache_entries()
| 3,811 | Python | .py | 95 | 31.294737 | 80 | 0.632692 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,951 | idmap_winbind.py | truenas_middleware/src/middlewared/middlewared/plugins/idmap_/idmap_winbind.py | import errno
import wbclient
from .idmap_constants import IDType, MAX_REQUEST_LENGTH
from middlewared.utils.itertools import batched
from middlewared.service_exception import MatchNotFound
WBCErr = {
wbclient.WBC_ERR_SUCCESS: None,
wbclient.WBC_ERR_NOT_IMPLEMENTED: errno.ENOSYS,
wbclient.WBC_ERR_UNKNOWN_FAILURE: errno.EFAULT,
wbclient.WBC_ERR_NO_MEMORY: errno.ENOMEM,
wbclient.WBC_ERR_INVALID_SID: errno.EINVAL,
wbclient.WBC_ERR_WINBIND_NOT_AVAILABLE: errno.ENOTCONN,
wbclient.WBC_ERR_DOMAIN_NOT_FOUND: errno.ENOENT,
wbclient.WBC_ERR_INVALID_RESPONSE: errno.EBADMSG,
wbclient.WBC_ERR_NSS_ERROR: errno.EFAULT,
wbclient.WBC_ERR_AUTH_ERROR: errno.EPERM,
wbclient.WBC_ERR_UNKNOWN_USER: errno.ENOENT,
wbclient.WBC_ERR_UNKNOWN_GROUP: errno.ENOENT,
wbclient.WBC_ERR_PWD_CHANGE_FAILED: errno.EFAULT
}
class WBClient:
def __init__(self, **kwargs):
self.ctx = wbclient.Ctx()
self.dom = {}
self.separator = self.ctx.separator.decode()
def _pyuidgid_to_dict(self, entry):
""" convert python wbclient uidgid object to dictionary """
return {
'id_type': IDType(entry.id_type).name,
'id': entry.id,
'name': f'{entry.domain}{self.separator}{entry.name}' if entry.name else None,
'sid': entry.sid
}
def _as_dict(self, results, convert_unmapped=False):
for entry in list(results['mapped'].keys()):
new = self._pyuidgid_to_dict(results['mapped'][entry])
results['mapped'][entry] = new
# The unmapped entry value may be uidgid type or simply SID string
# in latter case we shouldn't try to convert
if convert_unmapped:
for entry in list(results['unmapped'].keys()):
new = self._pyuidgid_to_dict(results['unmapped'][entry])
results['unmapped'][entry] = new
return results
def init_domain(self, name='$thisdom'):
domain = self.dom.get(name)
if domain:
return domain
if name == '$thisdom':
domain = self.ctx.domain()
else:
domain = self.ctx.domain(name)
self.dom[name] = domain
return domain
def ping_dc(self, name='$thisdom'):
""" perform wbinfo --ping-dc """
dom = self.init_domain(name)
return dom.ping_dc()
def check_trust(self, name='$thisdom'):
""" perform wbinfo -t """
dom = self.init_domain(name)
return dom.check_secret()
def domain_info(self, name='$thisdom'):
""" perform wbinfo --domain-info """
dom = self.init_domain(name)
return dom.domain_info()
def _batch_request(self, request_fn, list_in):
output = {'mapped': {}, 'unmapped': {}}
for chunk in batched(list_in, MAX_REQUEST_LENGTH):
results = request_fn(list(chunk))
output['mapped'] |= results['mapped']
output['unmapped'] |= results['unmapped']
return output
def sids_to_idmap_entries(self, sidlist):
"""
Bulk conversion of SIDs to idmap entries
Returns dictionary:
{"mapped": {}, "unmapped": {}
`mapped` contains entries keyed by SID
sid: {
'id': uid or gid,
'id_type': string ("USER", "GROUP", "BOTH"),
'name': string,
'sid': sid string
}
`unmapped` contains enries keyed by SID as well
but they only map to the sid itself. This is simply
to facilitate faster lookups of failures.
"""
data = self._batch_request(
self.ctx.uid_gid_objects_from_sids,
sidlist
)
return self._as_dict(data)
def users_and_groups_to_idmap_entries(self, uidgids):
"""
Bulk conversion of list of dictionaries containing the
following keys:
id_type : string. possible values "USER", "GROUP"
id : integer
Returns dictionary:
{"mapped": {}, "unmapped": {}
`mapped` contains entries keyed by string with either
UID:<xid>, or GID:<xid>
'UID:1000': {
'id': 1000,
'id_type': 'USER',
'name': 'bob',
'sid': sid string
}
"""
payload = [{
'id_type': IDType[entry["id_type"]].wbc_str(),
'id': entry['id']
} for entry in uidgids]
data = self._batch_request(
self.ctx.uid_gid_objects_from_unix_ids,
payload
)
return self._as_dict(data, True)
def sid_to_idmap_entry(self, sid):
mapped = self.sids_to_users_and_groups([sid])['mapped']
if not mapped:
raise MatchNotFound(sid)
return mapped[sid]
def name_to_idmap_entry(self, name):
try:
entry = self.ctx.uid_gid_object_from_name(name)
except wbclient.WBCError as e:
if e.error_code == wbclient.WBC_ERR_DOMAIN_NOT_FOUND:
raise MatchNotFound
raise
return self._pyuidgid_to_dict(entry)
def uidgid_to_idmap_entry(self, data):
"""
Convert payload specified in `data` to a idmap entry. Wraps around
users_and_groups_to_idmap_entries. See above.
Raises:
MatchNotFound
"""
mapped = self.users_and_groups_to_idmap_entries([data])['mapped']
if not mapped:
raise MatchNotFound(str(data))
return mapped[f'{IDType[data["id_type"]].wbc_str()}:{data["id"]}']
def all_domains(self):
return self.ctx.all_domains()
| 5,645 | Python | .py | 146 | 29.520548 | 90 | 0.594471 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,952 | idmap_sss.py | truenas_middleware/src/middlewared/middlewared/plugins/idmap_/idmap_sss.py | import pysss_nss_idmap as sssclient
from .idmap_constants import IDType
from middlewared.service_exception import MatchNotFound
class SSSClient:
def _username_to_entry(self, username):
"""
Sample entry returned by pysss_nss_idmap
`getsidbyusername`
{'smbuser': {'sid': 'S-1-5-21-3696504179-2855309571-923743039-1020', 'type': 1}}
`getidbysid`
{'S-1-5-21-3696504179-2855309571-923743039-1020': {'id': 565200020, 'type': 1}}
Sample of what we return:
{
'id_type': 'USER',
'id': 565200020,
'name': 'smbuser',
'sid': 'S-1-5-21-3696504179-2855309571-923743039-1020'
}
"""
if not (sid_entry := sssclient.getsidbyusername(username)):
return None
sid = sid_entry[username]['sid']
id_type = sid_entry[username]['type']
if not (id_entry := sssclient.getidbysid(sid)):
return None
return {
'id_type': IDType(id_type).name,
'id': id_entry[sid]['id'],
'name': username,
'sid': sid
}
def _groupname_to_entry(self, groupname):
"""
Sample entry returned by pysss_nss_idmap
`getsidbygroupname`
{'smbuser': {'sid': 'S-1-5-21-3696504179-2855309571-923743039-1020', 'type': 1}}
`getidbysid`
{'S-1-5-21-3696504179-2855309571-923743039-1020': {'id': 565200020, 'type': 1}}
Sample of what we return:
{
'id_type': 'GROUP',
'id': 565200020,
'name': 'smbuser',
'sid': 'S-1-5-21-3696504179-2855309571-923743039-1020'
}
"""
if not (sid_entry := sssclient.getsidbygroupname(groupname)):
return None
sid = sid_entry[groupname]['sid']
id_type = sid_entry[groupname]['type']
if not (id_entry := sssclient.getidbysid(sid)):
return None
return {
'id_type': IDType(id_type).name,
'id': id_entry[sid]['id'],
'name': groupname,
'sid': sid
}
def _gid_to_entry(self, gid):
""" convert gid to idmap entry dict -- see above _groupname_to_entry()"""
if not (sid_entry := sssclient.getsidbygid(gid)):
return None
sid = sid_entry[gid]['sid']
id_type = sid_entry[gid]['type']
if not (name_entry := sssclient.getnamebysid(sid)):
return None
return {
'id_type': IDType(id_type).name,
'id': gid,
'name': name_entry[sid]['name'],
'sid': sid
}
def _uid_to_entry(self, uid):
""" convert gid to idmap entry dict -- see above _username_to_entry()"""
if not (sid_entry := sssclient.getsidbyuid(uid)):
return None
sid = sid_entry[uid]['sid']
id_type = sid_entry[uid]['type']
if not (name_entry := sssclient.getnamebysid(sid)):
return None
return {
'id_type': IDType(id_type).name,
'id': uid,
'name': name_entry[sid]['name'],
'sid': sid
}
def _sid_to_entry(self, sid):
""" convert sid to idmap entry dict -- see above _username_to_entry()"""
if not (id_entry := sssclient.getidbysid(sid)):
return None
if not (name_entry := sssclient.getnamebysid(sid)):
return None
return {
'id_type': IDType(id_entry[sid]['type']).name,
'id': id_entry[sid]['id'],
'name': name_entry[sid]['name'],
'sid': sid
}
def sids_to_idmap_entries(self, sidlist):
"""
Bulk conversion of list of sids to idmap entries
sample output:
{
"mapped": {
"S-1-5-21-3696504179-2855309571-923743039-1020": {
"id_type": "USER",
"id": 565200020,
"name": "smbuser",
"sid": "S-1-5-21-3696504179-2855309571-923743039-1020"
}
}
"unmapped": {
"S-1-5-21-3696504179-2855309571-923743039-1022": "S-1-5-21-3696504179-2855309571-923743039-1022"
}
}
"""
out = {'mapped': {}, 'unmapped': {}}
for sid in sidlist:
if not (entry := self._sid_to_entry(sid)):
out['unmapped'][sid] = sid
continue
out['mapped'][sid] = entry
return out
def users_and_groups_to_idmap_entries(self, uidgids):
"""
Bulk conversion of list of sids to idmap entries
sample output:
{
"mapped": {
"UID:565200020": {
"id_type": "USER",
"id": 565200020,
"name": "smbuser",
"sid": "S-1-5-21-3696504179-2855309571-923743039-1020"
}
}
"unmapped": {
"UID:565200020": None
}
}
"""
out = {'mapped': {}, 'unmapped': {}}
for uidgid in uidgids:
match uidgid['id_type']:
case 'GROUP':
entry = self._gid_to_entry(uidgid['id'])
case 'USER':
entry = self._uid_to_entry(uidgid['id'])
case 'BOTH':
if not (entry := self._gid_to_entry(uidgid['id'])):
entry = self._uid_to_entry(uidgid['id'])
case _:
raise ValueError(f'{uidgid["id_type"]}: Unknown id_type')
key = f'{IDType[uidgid["id_type"]].wbc_str()}:{uidgid["id"]}'
if not entry:
out['unmapped'][key] = entry
continue
out['mapped'][key] = entry
return out
def sid_to_idmap_entry(self, sid):
""" convert a single sid to an idmap entry dict """
if not (entry := self._sid_to_entry(sid)):
raise MatchNotFound(sid)
return entry
def name_to_idmap_entry(self, name):
""" convert a single name (user or group) to an idmap entry dict """
if entry := self._groupname_to_entry(name):
return entry
if entry := self._username_to_entryd(name):
return entry
raise MatchNotFound(name)
def uidgid_to_idmap_entry(self, data):
""" convert a single name (user or group) to an idmap entry dict """
mapped = self.users_and_groups_to_idmap_entries([data])['mapped']
if not mapped:
raise MatchNotFound(str(data))
key = f'{IDType[data["id_type"]].wbc_str()}:{data["id"]}'
return mapped[key]
| 6,668 | Python | .py | 178 | 26.320225 | 108 | 0.516835 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,953 | backend.py | truenas_middleware/src/middlewared/middlewared/plugins/audit/backend.py | import os
import threading
import time
from sqlalchemy import create_engine, inspect
from sqlalchemy import and_, func, select
from sqlalchemy.exc import DBAPIError
from sqlalchemy.orm import Session
from sqlalchemy.sql.expression import nullsfirst, nullslast
from middlewared.schema import accepts, Ref, Str
from middlewared.service import periodic, private, Service
from middlewared.service_exception import CallError, MatchNotFound
from middlewared.plugins.audit.utils import AUDITED_SERVICES, audit_file_path, AUDIT_TABLES
from middlewared.plugins.datastore.filter import FilterMixin
from middlewared.plugins.datastore.schema import SchemaMixin
class SQLConn:
def __init__(self, svc, vers):
svcs = [svc[0] for svc in AUDITED_SERVICES]
if svc not in svcs:
raise ValueError(f'{svc}: unknown service')
self.table = AUDIT_TABLES[svc]
self.table_name = f'audit_{svc}_{str(vers).replace(".", "_")}'
self.path = audit_file_path(svc)
self.engine = None
self.connection = None
self.lock = threading.RLock()
self.dbfd = -1
def audit_table_exists(self):
"""
syslog-ng creates the audit table on first message insertion, and
so it's reasonable to expect a freshly installed or upgraded system
to have empty sqlite3 databases.
"""
with self.lock:
return inspect(self.engine).has_table(self.table_name)
def setup(self):
with self.lock:
if self.engine is not None:
self.engine.dispose()
if self.connection is not None:
self.connection.close()
if self.dbfd != -1:
os.close(self.dbfd)
self.dbfd = -1
self.engine = create_engine(
f'sqlite:///{self.path}',
connect_args={'check_same_thread': False}
)
self.connection = self.engine.connect()
self.connection.connection.execute('VACUUM')
self.connection.execute('PRAGMA journal_mode=WAL')
self.dbfd = os.open(self.path, os.O_PATH)
def fetchall(self, query, params=None):
with self.lock:
if (st := os.fstat(self.dbfd)).st_nlink == 0:
raise RuntimeError(
f'{self.path}: audit database was unexpectedly deleted.'
)
try:
if os.lstat(self.path).st_ino != st.st_ino:
raise RuntimeError(
f'{self.path}: audit database was unexpectedly replaced.'
)
except FileNotFoundError:
raise RuntimeError(f'{self.path}: audit database was renamed.')
try:
cursor = self.connection.execute(query, params or [])
except DBAPIError as e:
# We want to squash errors that are due to presence of missing
# table. See note for audit_table_exists() method.
if not str(e.orig).startswith('no such table'):
raise
return []
try:
return cursor.fetchall()
finally:
cursor.close()
def enforce_retention(self, days):
if not days or days < 0:
raise ValueError("Days must be positive value greater than zero.")
if not self.audit_table_exists():
return
secs = days * 86400
cutoff_ts = int(time.time()) - secs
with self.lock:
with Session(self.engine) as s:
expired = s.query(self.table).filter(self.table.c.message_timestamp < cutoff_ts)
expired.delete(synchronize_session=False)
s.commit()
self.connection.connection.execute('VACUUM')
class AuditBackendService(Service, FilterMixin, SchemaMixin):
class Config:
private = True
connections = {svc[0]: SQLConn(*svc) for svc in AUDITED_SERVICES}
@private
def setup(self):
"""
This method reinitializes the database connections for audit databases.
Examples of when this is necessary are:
- initial middlewared startup
- after audit database deletion or rename
"""
for svc, conn in self.connections.items():
# Dismiss any existing AuditSetup one-shot alerts
self.middleware.call_sync('alert.oneshot_delete', 'AuditBackendSetup', {"service": svc})
try:
conn.setup()
except Exception:
self.middleware.call_sync('alert.oneshot_create', 'AuditBackendSetup', {"service": svc})
self.logger.error(
'%s: failed to set up auditing database connection.',
svc, exc_info=True
)
@private
def serialize_results(self, results, table, select):
out = []
for row in results:
entry = {}
for column in table.c:
column_name = str(column.name)
if select and column_name not in select:
continue
entry[column_name] = row[column]
out.append(entry)
return out
def __fetchall(self, conn, qs):
try:
data = conn.fetchall(qs)
except RuntimeError:
self.logger.critical('Failed to fetch information from audit database', exc_info=True)
conn.setup()
data = conn.fetchall(qs)
return data
@private
@accepts(
Str('db_name', enum=[svc[0] for svc in AUDITED_SERVICES], required=True),
Ref('query-filters'),
Ref('query-options')
)
def query(self, db_name, filters, options):
"""
Query the specied auditable service's database based on the specified
`query-filters` and `query-options`. This is the private endpoint for the
audit backend and so it should generally not be used by websocket API
consumers except in special circumstances.
"""
conn = self.connections[db_name]
order_by = options.get('order_by', []).copy()
from_ = conn.table
if conn.connection is None:
raise CallError(
f'{db_name}: connection to audit database is not initialized.'
)
if options['count']:
qs = select([func.count('ROW_ID')]).select_from(from_)
else:
columns = list(conn.table.c)
qs = select(columns).select_from(from_)
if filters:
qs = qs.where(and_(*self._filters_to_queryset(filters, conn.table, None, {})))
if options['count']:
if not (results := self.__fetchall(conn, qs)):
return 0
return results[0][0]
if order_by:
for i, order in enumerate(order_by):
wrapper = None
if order.startswith('nulls_first:'):
wrapper = nullsfirst
order = order[len('nulls_first:'):]
elif order.startswith('nulls_last:'):
wrapper = nullslast
order = order[len('nulls_last:'):]
if order.startswith('-'):
order_by[i] = self._get_col(conn.table, order[1:], None).desc()
else:
order_by[i] = self._get_col(conn.table, order, None)
if wrapper is not None:
order_by[i] = wrapper(order_by[i])
qs = qs.order_by(*order_by)
if options['offset']:
qs = qs.offset(options['offset'])
if options['limit']:
qs = qs.limit(options['limit'])
result = self.__fetchall(conn, qs)
if options['get']:
try:
return result[0]
except IndexError:
raise MatchNotFound() from None
return self.serialize_results(result, conn.table, options.get('select'))
@private
@periodic(interval=86400)
def __lifecycle_cleanup(self):
"""
This is a private method that should only be called as a periodic task.
It deletes database entries that are older than the specified lifetime.
"""
retention_period = self.middleware.call_sync('audit.config')['retention']
for svc, conn in self.connections.items():
try:
conn.enforce_retention(retention_period)
except Exception:
self.logger.error(
"%s: failed to enforce retention on audit DB.",
svc, exc_info=True
)
try:
self.middleware.call_sync('audit.cleanup_reports')
except Exception:
self.logger.warning(
'Cleanup of auditing report directory failed',
exc_info=True
)
| 8,931 | Python | .py | 213 | 30.131455 | 104 | 0.574856 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,954 | utils.py | truenas_middleware/src/middlewared/middlewared/plugins/audit/utils.py | import middlewared.sqlalchemy as sa
import os
from sqlalchemy import Table
from sqlalchemy.orm import declarative_base
from .schema.common import AuditEventParam
AUDIT_DATASET_PATH = '/audit'
AUDITED_SERVICES = [('MIDDLEWARE', 0.1), ('SMB', 0.1), ('SUDO', 0.1)]
AUDIT_TABLE_PREFIX = 'audit_'
AUDIT_LIFETIME = 7
AUDIT_DEFAULT_RESERVATION = 0
AUDIT_DEFAULT_QUOTA = 0
AUDIT_DEFAULT_FILL_CRITICAL = 95
AUDIT_DEFAULT_FILL_WARNING = 75
AUDIT_REPORTS_DIR = os.path.join(AUDIT_DATASET_PATH, 'reports')
SQL_SAFE_FIELDS = (
AuditEventParam.AUDIT_ID.value,
AuditEventParam.MESSAGE_TIMESTAMP.value,
AuditEventParam.ADDRESS.value,
AuditEventParam.USERNAME.value,
AuditEventParam.SESSION.value,
AuditEventParam.SERVICE.value,
AuditEventParam.EVENT.value,
AuditEventParam.SUCCESS.value,
)
AuditBase = declarative_base()
def audit_program(svc):
if svc == 'SUDO':
return 'sudo'
else:
return f'TNAUDIT_{svc}'
def audit_custom_section(svc, section):
"""
Can be used to control whether generic SVC mako rendering applies for this section/service.
"""
if svc == 'SUDO' and section == 'log':
return True
return False
def audit_file_path(svc):
return f'{AUDIT_DATASET_PATH}/{svc}.db'
def audit_table_name(svc, vers):
return f'{AUDIT_TABLE_PREFIX}{svc}_{str(vers).replace(".", "_")}'
def generate_audit_table(svc, vers):
"""
NOTE: any changes to audit table schemas should be typically be
accompanied by a version bump for the audited service and update
to the guiding design document for structured auditing NEP-041
and related documents. This will potentially entail changes to
audit-related code in the above AUDIT_SERVICES independent of the
middleware auditing backend.
Currently the sa.DateTime() does not give us fractional second
precision, but for the purpose of our query interfaces, this
should be sufficient to figure out when events happened.
"""
return Table(
audit_table_name(svc, vers),
AuditBase.metadata,
sa.Column('audit_id', sa.String(36)),
sa.Column('message_timestamp', sa.Integer()),
sa.Column('timestamp', sa.DateTime()),
sa.Column('address', sa.String()),
sa.Column('username', sa.String()),
sa.Column('session', sa.String()),
sa.Column('service', sa.String()),
sa.Column('service_data', sa.JSON(dict), nullable=True),
sa.Column('event', sa.String()),
sa.Column('event_data', sa.JSON(dict), nullable=True),
sa.Column('success', sa.Boolean())
)
def parse_query_filters(
services: list,
filters: list,
skip_sql_filters: bool
) -> tuple:
"""
NOTE: this method should only be called by audit.query
This method tries to optimize audit query based on provided filter.
Optimizations are:
1. limit databases queried
2. generate sql filters where appropriate
returns a tuple of services that should be queried on backend and validated
SQL-safe filters.
We err on side of caution here since we're dealing with audit results.
This means that we skip optimized filters if the field is a JSON one, and
do not try to pass disjunctions to sqlalchemy. In future if needed we
can loosen these restrictions with appropriate levels of testing and
validation in auditbackend plugin.
"""
services_to_check = services_in = set(services)
filters_out = []
for f in filters:
if len(f) != 3:
continue
if f[0] == 'service':
# we are potentially limiting which services may be audited
if isinstance(f[2], str):
svcs = set([f[2]])
else:
svcs = set(f[2])
match f[1]:
case '=' | 'in':
if services_in == services_to_check:
services_to_check = svcs
else:
services_to_check = services_to_check & svcs
case '!=' | 'nin':
services_to_check = services_to_check - svcs
case _:
# Other filters quite unlikely to be used
# by end-users so we'll just skip optimization
# and rely on filter_list later on
pass
if not services_to_check:
# These filters are guaranteed to have no results. Bail
# early and let caller handle it.
break
if skip_sql_filters:
# User has manually specified to pass all these filters to datastore
continue
if f[0] not in SQL_SAFE_FIELDS:
# Keys that contain JSON data are not currently supported
continue
filters_out.append(f)
return (services_to_check, filters_out)
def requires_python_filtering(
services: list,
filters_in: list,
filters_for_sql: list,
options: dict
) -> bool:
"""
There are situations where we have to perform additional audit filtering
in python via `filter_list`.
1. Not all user specified filters could be converted directly into an SQL
statement for auditbackend.query.
2. We are selecting a subkey within a JSON object.
3. Multiple services are being queried and pagination options are being used
or a specific ordering is specified.
"""
if filters_in != filters_for_sql:
# We will need to do additional filtering after retrieval
return True
if (to_investigate := set(options.get('select', [])) - set(SQL_SAFE_FIELDS)):
# Field is being selected that may not be safe for SQL select
for entry in to_investigate:
# Selecting subkey in entry is not currently supported
if '.' in entry or isinstance(entry, tuple):
return True
if len(services) > 1:
# When we have more than one database being queried we
# often need to pass the aggregated results to filter_list
if options.get('offset') or options.get('limit'):
# We need to do pagination on total results.
return True
if options.get('order_by'):
return True
return False
AUDIT_TABLES = {svc[0]: generate_audit_table(*svc) for svc in AUDITED_SERVICES}
| 6,370 | Python | .py | 157 | 32.808917 | 95 | 0.651061 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,955 | __init__.py | truenas_middleware/src/middlewared/middlewared/plugins/audit/__init__.py | async def setup(middleware):
try:
# Set up connections to the auditing databases
await middleware.call("auditbackend.setup")
except Exception:
middleware.logger.error("Failed to set up auditing backend.", exc_info=True)
if await middleware.call("keyvalue.get", "run_migration", False):
# If this is an upgrade then free up space used by refreservation on
# deactivated boot environments
try:
await middleware.call("audit.setup")
except Exception:
middleware.logger.error("Failed to perform setup tasks for auditing.", exc_info=True)
| 625 | Python | .py | 13 | 40 | 97 | 0.686275 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,956 | audit.py | truenas_middleware/src/middlewared/middlewared/plugins/audit/audit.py | import asyncio
import csv
import errno
import json
import middlewared.sqlalchemy as sa
import os
import shutil
import time
import uuid
import yaml
from truenas_api_client import json as ejson
from .utils import (
AUDIT_DATASET_PATH,
AUDIT_LIFETIME,
AUDIT_DEFAULT_RESERVATION,
AUDIT_DEFAULT_QUOTA,
AUDIT_DEFAULT_FILL_CRITICAL,
AUDIT_DEFAULT_FILL_WARNING,
AUDIT_REPORTS_DIR,
AUDITED_SERVICES,
parse_query_filters,
requires_python_filtering,
)
from .schema.middleware import AUDIT_EVENT_MIDDLEWARE_JSON_SCHEMAS, AUDIT_EVENT_MIDDLEWARE_PARAM_SET
from .schema.smb import AUDIT_EVENT_SMB_JSON_SCHEMAS, AUDIT_EVENT_SMB_PARAM_SET
from .schema.sudo import AUDIT_EVENT_SUDO_JSON_SCHEMAS, AUDIT_EVENT_SUDO_PARAM_SET
from middlewared.plugins.zfs_.utils import TNUserProp
from middlewared.schema import (
accepts, Bool, Datetime, Dict, Int, List, Patch, Ref, returns, Str, UUID
)
from middlewared.service import filterable, filterable_returns, job, private, ConfigService
from middlewared.service_exception import CallError, ValidationErrors, ValidationError
from middlewared.utils import filter_list
from middlewared.utils.mount import getmntinfo
from middlewared.utils.functools_ import cache
from middlewared.validators import Range
ALL_AUDITED = [svc[0] for svc in AUDITED_SERVICES]
BULK_AUDIT = ['SMB']
NON_BULK_AUDIT = [svc for svc in ALL_AUDITED if svc not in BULK_AUDIT]
# We set the refquota limit
QUOTA_WARN = TNUserProp.REFQUOTA_WARN.value
QUOTA_CRIT = TNUserProp.REFQUOTA_CRIT.value
_GIB = 1024 ** 3
class AuditModel(sa.Model):
__tablename__ = 'system_audit'
id = sa.Column(sa.Integer(), primary_key=True)
retention = sa.Column(sa.Integer(), default=AUDIT_LIFETIME)
reservation = sa.Column(sa.Integer(), default=AUDIT_DEFAULT_RESERVATION)
quota = sa.Column(sa.Integer(), default=AUDIT_DEFAULT_QUOTA)
quota_fill_warning = sa.Column(sa.Integer(), default=AUDIT_DEFAULT_FILL_WARNING)
quota_fill_critical = sa.Column(sa.Integer(), default=AUDIT_DEFAULT_FILL_CRITICAL)
class AuditService(ConfigService):
class Config:
datastore = 'system.audit'
cli_namespace = 'system.audit'
datastore_extend = 'audit.extend'
role_prefix = 'SYSTEM_AUDIT'
ENTRY = Patch(
'system_audit_update', 'system_audit_config',
('add', Int('available')),
('add', Dict(
'space',
Int('used'),
Int('used_by_snapshots'),
Int('available'),
)),
('add', Bool('remote_logging_enabled')),
('add', List('enabled_services'))
)
@private
@cache
def audit_dataset_name(self):
audit_dev = os.stat(AUDIT_DATASET_PATH).st_dev
return getmntinfo(audit_dev)[audit_dev]['mount_source']
@private
def get_audit_dataset(self):
ds_name = self.audit_dataset_name()
ds = self.middleware.call_sync(
'zfs.dataset.query',
[['id', '=', ds_name]],
{'extra': {'retrieve_children': False}, 'get': True}
)
for k, default in TNUserProp.quotas():
try:
ds[k] = int(ds['properties'][k]["rawvalue"])
except (KeyError, ValueError):
ds[k] = default
return ds
@private
def extend(self, data):
sys_adv = self.middleware.call_sync('system.advanced.config')
data['remote_logging_enabled'] = bool(sys_adv['syslogserver']) and sys_adv['syslog_audit']
ds_info = self.get_audit_dataset()
data['space'] = {'used': None, 'used_by_snapshots': None, 'available': None}
data['space']['used'] = ds_info['properties']['used']['parsed']
data['space']['used_by_dataset'] = ds_info['properties']['usedbydataset']['parsed']
data['space']['used_by_reservation'] = ds_info['properties']['usedbyrefreservation']['parsed']
data['space']['used_by_snapshots'] = ds_info['properties']['usedbysnapshots']['parsed']
data['space']['available'] = ds_info['properties']['available']['parsed']
data['enabled_services'] = {'MIDDLEWARE': [], 'SMB': [], 'SUDO': []}
audited_smb_shares = self.middleware.call_sync(
'sharing.smb.query', [['audit.enable', '=', True]], {'select': ['name', 'audit']}
)
for share in audited_smb_shares:
data['enabled_services']['SMB'].append(share['name'])
return data
@private
async def compress(self, data):
for key in ['space', 'enabled_services', 'remote_logging_enabled']:
data.pop(key, None)
return data
@accepts(Dict(
'audit_query',
List('services', items=[Str('db_name', enum=ALL_AUDITED)], default=NON_BULK_AUDIT),
Ref('query-filters'),
Ref('query-options'),
Bool('remote_controller', default=False),
register=True
))
@filterable_returns(Dict(
'audit_entry',
UUID('audit_id'),
Int('message_timestamp'),
Datetime('timestamp'),
Str('address'),
Str('username'),
UUID('session'),
Str('service', enum=ALL_AUDITED),
Dict('service_data', additional_attrs=True, null=True),
Str('event'),
Dict('event_data', additional_attrs=True, null=True),
Bool('success')
))
async def query(self, data):
"""
Query contents of audit databases specified by `services`.
If the query-option `force_sql_filters` is true, then the query will be
converted into a more efficient form for better performance. This will
not be possible if filters use keys within `svc_data` and `event_data`.
HA systems may direct the query to the 'remote' controller by
including 'remote_controller=True'. The default is the 'current' controller.
Each audit entry contains the following keys:
`audit_id` - GUID uniquely identifying this specific audit event.
`message_timestamp` - Unix timestamp for when the audit event was
written to the auditing database.
`timestamp` - converted ISO-8601 timestamp from application recording
when event occurred.
`address` - IP address of client performing action that generated the
audit message.
`username` - Username used by client performing action.
`session` - GUID uniquely identifying the client session.
`services` - Name of the service that generated the message. This will
be one of the names specified in `services`.
`service_data` - JSON object containing variable data depending on the
particular service. See TrueNAS auditing documentation for the service
in question.
`event` - Name of the event type that generated the audit record. Each
service has its own unique event identifiers.
`event_data` - JSON object containing variable data depending on the
particular event type. See TrueNAS auditing documentation for the
service in question.
`success` - boolean value indicating whether the action generating the
event message succeeded.
"""
verrors = ValidationErrors()
# If HA, handle the possibility of remote controller requests
if await self.middleware.call('failover.licensed') and data['remote_controller']:
data.pop('remote_controller')
try:
audit_query = await self.middleware.call(
'failover.call_remote',
'audit.query',
[data],
{'timeout': 2, 'connect_timeout': 2}
)
return audit_query
except CallError as e:
if e.errno in [errno.ECONNABORTED, errno.ECONNREFUSED, errno.ECONNRESET, errno.EHOSTDOWN,
errno.ETIMEDOUT, CallError.EALERTCHECKERUNAVAILABLE]:
raise ValidationError(
'audit.query.remote_controller',
'Temporarily failed to communicate to remote controller'
)
raise ValidationError(
'audit.query.remote_controller',
'Failed to query audit logs of remote controller'
)
except Exception:
self.logger.exception('Unexpected failure querying remote node for audit entries')
raise
sql_filters = data['query-options']['force_sql_filters']
if (select := data['query-options'].get('select')):
for idx, entry in enumerate(select):
if isinstance(entry, list):
entry = entry[0]
if entry not in (AUDIT_EVENT_MIDDLEWARE_PARAM_SET | AUDIT_EVENT_SMB_PARAM_SET | AUDIT_EVENT_SUDO_PARAM_SET):
verrors.add(
f'audit.query.query-options.select.{idx}',
f'{entry}: column does not exist'
)
services_to_check, filters = parse_query_filters(
data['services'], data['query-filters'], sql_filters
)
if not services_to_check:
verrors.add(
'audit.query.query-filters',
'The combination of filters and specified services would result '
'in no databases being queried.'
)
verrors.check()
if sql_filters:
filters = data['query-filters']
options = data['query-options']
else:
# Check whether we can pass to SQL backend directly
if requires_python_filtering(services_to_check, data['query-filters'], filters, data['query-options']):
options = {}
else:
options = data['query-options']
# set sql_filters so that we don't pass through filter_list
sql_filters = True
if options.get('count'):
results = 0
else:
results = []
# `services_to_check` is a set and so ordering isn't guaranteed;
# however, strict ordering when multiple databases are queried is
# a requirement for pagination and consistent results.
for op in await asyncio.gather(*[
self.middleware.call('auditbackend.query', svc, filters, options)
for svc in ALL_AUDITED if svc in services_to_check
]):
results += op
if sql_filters:
return results
return filter_list(results, data['query-filters'], data['query-options'])
@accepts(
Patch(
'audit_query', 'audit_export',
('add', Str('export_format', enum=['CSV', 'JSON', 'YAML'], default='JSON')),
),
roles=['SYSTEM_AUDIT_READ'],
audit='Export Audit Data'
)
@returns(Str('audit_file_path'))
@job()
def export(self, job, data):
"""
Generate an audit report based on the specified `query-filters` and
`query-options` for the specified `services` in the specified `export_format`.
Supported export_formats are CSV, JSON, and YAML. The endpoint returns a
local filesystem path where the resulting audit report is located.
"""
if data['query-options'].get('count') is True:
raise CallError('Raw row count may not be exported', errno.EINVAL)
if data['query-options'].get('get') is True:
raise CallError(
'Use of "get" query-option is not supported for export',
errno.EINVAL
)
export_format = data.pop('export_format')
job.set_progress(0, f'Quering data for {export_format} audit report')
if not (res := self.middleware.call_sync('audit.query', data)):
raise CallError('No entries were returned by query.', errno.ENOENT)
if job.credentials:
username = job.credentials.user['username']
else:
username = 'root'
target_dir = os.path.join(AUDIT_REPORTS_DIR, username)
os.makedirs(target_dir, mode=0o700, exist_ok=True)
filename = f'{uuid.uuid4()}.{export_format.lower()}'
destination = os.path.join(target_dir, filename)
with open(destination, 'w') as f:
job.set_progress(50, f'Writing audit report to {destination}.')
match export_format:
case 'CSV':
fieldnames = res[0].keys()
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writeheader()
for entry in res:
if entry.get('service_data'):
entry['service_data'] = ejson.dumps(entry['service_data'])
if entry.get('event_data'):
entry['event_data'] = ejson.dumps(entry['event_data'])
writer.writerow(entry)
case 'JSON':
ejson.dump(res, f, indent=4)
case 'YAML':
yaml.dump(res, f)
job.set_progress(100, f'Audit report completed and available at {destination}')
return os.path.join(target_dir, destination)
@accepts(
Dict(
'audit_download',
Str('report_name', required=True),
),
roles=['SYSTEM_AUDIT_READ'],
audit='Download Audit Data',
)
@returns()
@job(pipes=["output"])
def download_report(self, job, data):
"""
Download the audit report with the specified name from the server.
Note that users will only be able to download reports that they personally
generated.
"""
if job.credentials:
username = job.credentials.user['username']
else:
username = 'root'
target = os.path.join(AUDIT_REPORTS_DIR, username, data['report_name'])
if not os.path.exists(target):
raise CallError(
f'{target}: audit report does not exist in the report directory of '
f'user ({username}).'
)
if not os.path.isfile(target):
raise CallError(f'{target}: unexpected file type.')
with open(target, 'rb') as f:
shutil.copyfileobj(f, job.pipes.output.w)
@private
def __process_reports_entry(self, entry, cutoff):
if not entry.is_file():
self.logger.warning(
'%s: unexpected item in audit reports directory',
entry.name
)
return
if not entry.name.endswith(('.csv', '.json', '.yaml')):
self.logger.warning(
'%s: unexpected file type in audit reports directory',
entry.name
)
return
if entry.stat().st_mtime > cutoff:
return
try:
os.unlink(entry.path)
except Exception:
self.logger.error(
'%s: failed to remove file for audit reports directory.',
entry.name, exc_info=True
)
@private
def cleanup_reports(self):
"""
Remove old audit reports. Precision is not of high priority. In most
circumstances users will download the report within a few minutes.
"""
retention = self.middleware.call_sync('audit.config')['retention']
cutoff = int(time.time()) - (retention * 86400)
try:
with os.scandir(AUDIT_REPORTS_DIR) as it:
for entry in it:
if not entry.is_dir():
continue
with os.scandir(entry.path) as subdir:
for subentry in subdir:
self.__process_reports_entry(subentry, cutoff)
except FileNotFoundError:
os.mkdir(AUDIT_REPORTS_DIR, 0o700)
@private
async def validate_local_storage(self, new, old, verrors):
# A quota of `0` == `disable`
if new['quota'] and (old['quota'] != new['quota']):
new_volsize = new['quota'] * _GIB
used = new['space']['used_by_dataset'] + new['space']['used_by_snapshots']
if used / new_volsize > new['quota_fill_warning'] / 100:
verrors.add(
'audit_update.quota',
'Specified quota would result in the percentage used of the '
'audit dataset to exceed the maximum permitted by the configured '
'quota_fill_warning.'
)
if new['quota'] < new['reservation']:
verrors.add(
'audit_update.quota',
'Quota on auditing dataset must be greater than or equal to '
'the space reservation for the dataset.'
)
@private
async def update_audit_dataset(self, new):
ds = await self.middleware.call('audit.get_audit_dataset')
ds_props = ds['properties']
old_reservation = ds_props['refreservation']['parsed'] or 0
old_quota = ds_props['refquota']['parsed'] or 0
old_warn = int(ds_props.get(QUOTA_WARN, {}).get('rawvalue', '0'))
old_crit = int(ds_props.get(QUOTA_CRIT, {}).get('rawvalue', '0'))
payload = {}
# Using floor division for conversion from bytes to GiB
if new['quota'] != old_quota // _GIB:
quota_val = "none" if new['quota'] == 0 else f'{new["quota"]}G'
# Using refquota gives better fidelity with dataset settings
payload['refquota'] = {'parsed': quota_val}
if new['reservation'] != old_reservation // _GIB:
reservation_val = "none" if new['reservation'] == 0 else f'{new["reservation"]}G'
payload['refreservation'] = {'parsed': reservation_val}
if new["quota_fill_warning"] != old_warn:
payload[QUOTA_WARN] = {'parsed': str(new['quota_fill_warning'])}
if new["quota_fill_critical"] != old_crit:
payload[QUOTA_CRIT] = {'parsed': str(new['quota_fill_critical'])}
if not payload:
return
await self.middleware.call(
'zfs.dataset.update', ds['id'], {'properties': payload}
)
@accepts(
Dict(
'system_audit_update',
Int('retention', validators=[Range(1, 30)]),
Int('reservation', validators=[Range(0, 100)]),
Int('quota', validators=[Range(0, 100)]),
Int('quota_fill_warning', validators=[Range(5, 80)]),
Int('quota_fill_critical', validators=[Range(50, 95)]),
register=True
),
audit='Update Audit Configuration',
)
async def update(self, data):
"""
Update default audit settings.
The following fields may be modified:
`retention` - number of days to retain local audit messages.
`reservation` - size in GiB of refreservation to set on ZFS dataset
where the audit databases are stored. The refreservation specifies the
minimum amount of space guaranteed to the dataset, and counts against
the space available for other datasets in the zpool where the audit
dataset is located.
`quota` - size in GiB of the maximum amount of space that may be
consumed by the dataset where the audit dabases are stored.
`quota_fill_warning` - percentage used of dataset quota at which to
generate a warning alert.
`quota_fill_critical` - percentage used of dataset quota at which to
generate a critical alert.
The following fields contain read-only data and are returned in calls
to `audit.config` and `audit.update`:
`space` - ZFS dataset properties relating space used and available for
the dataset where the audit databases are written.
`remote_logging_enabled` - Boolean indicating whether logging to a
remote syslog server is enabled on TrueNAS and if audit logs are
included in what is sent remotely.
`enabled_services` - JSON object with key denoting service, and value
containing a JSON array of what aspects of this service are being
audited. In the case of the SMB audit, the list contains share names
of shares for which auditing is enabled.
"""
old = await self.config()
new = old.copy()
new.update(data)
verrors = ValidationErrors()
await self.validate_local_storage(new, old, verrors)
verrors.check()
await self.update_audit_dataset(new)
await self.compress(new)
await self.middleware.call('datastore.update', self._config.datastore, old['id'], new)
return await self.config()
@private
async def setup(self):
"""
This method should only be called once per upgrade to clean up any stale
refreservations from old boot environments and to apply the audit dataset
configuration to the current boot environment.
"""
try:
os.mkdir(AUDIT_REPORTS_DIR, 0o700)
except FileExistsError:
os.chmod(AUDIT_REPORTS_DIR, 0o700)
cur = await self.middleware.call('audit.get_audit_dataset')
parent = os.path.dirname(cur['id'])
# Explicitly look up pool name. If somehow audit dataset ends up being
# on a pool that isn't the boot-pool, we don't want to recursively
# remove refreservations on it.
boot_pool = await self.middleware.call('boot.pool_name')
# Get dataset names of any dataset on boot pool that isn't on the current
# activated boot environment.
to_remove = await self.middleware.call('zfs.dataset.query', [
['id', '!=', cur['id']],
['id', '!^', f'{parent}/'],
['pool', '=', boot_pool],
['properties.refreservation.parsed', '!=', None]
], {'select': ['id']})
if to_remove:
self.logger.debug(
'Removing refreservations from the following datasets: %s',
', '.join([ds['id'] for ds in to_remove])
)
payload = {'refreservation': {'parsed': None}}
for ds in to_remove:
try:
await self.middleware.call(
'zfs.dataset.update', ds['id'], {'properties': payload}
)
except Exception:
self.logger.error(
'%s: failed to remove refreservation from dataset. Manual '
'cleanup may be required', ds['id'], exc_info=True
)
# Dismiss any existing AuditSetup one-shot alerts
await self.middleware.call('alert.oneshot_delete', 'AuditSetup', None)
audit_config = await self.middleware.call('audit.config')
try:
await self.middleware.call('audit.update_audit_dataset', audit_config)
except Exception:
await self.middleware.call('alert.oneshot_create', 'AuditSetup', None)
self.logger.error('Failed to apply auditing dataset configuration.', exc_info=True)
@private
@filterable
async def json_schemas(self, filters, options):
return filter_list(AUDIT_EVENT_MIDDLEWARE_JSON_SCHEMAS + AUDIT_EVENT_SMB_JSON_SCHEMAS + AUDIT_EVENT_SUDO_JSON_SCHEMAS, filters, options)
| 23,400 | Python | .py | 510 | 35.217647 | 144 | 0.603571 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,957 | sudo.py | truenas_middleware/src/middlewared/middlewared/plugins/audit/schema/sudo.py | from middlewared.schema import (
Bool,
Dict,
Int,
IPAddr,
Str,
UUID,
)
from .common import (
AuditEnum,
AuditEventParam,
AuditSchema,
AUDIT_VERS,
audit_schema_from_base,
convert_schema_to_set,
)
class AuditSudoEventType(AuditEnum):
ACCEPT = 'ACCEPT'
REJECT = 'REJECT'
AUDIT_EVENT_DATA_SUDO_ACCEPT = Dict(
str(AuditEventParam.EVENT_DATA),
AUDIT_VERS,
)
AUDIT_EVENT_DATA_SUDO_REJECT = Dict(
str(AuditEventParam.EVENT_DATA),
AUDIT_VERS,
)
AUDIT_EVENT_SUDO_SCHEMAS = []
AUDIT_EVENT_SUDO_BASE_SCHEMA = AuditSchema(
'audit_entry_sudo',
UUID(AuditEventParam.AUDIT_ID.value),
Int(AuditEventParam.MESSAGE_TIMESTAMP.value),
Dict(AuditEventParam.TIMESTAMP.value),
IPAddr(AuditEventParam.ADDRESS.value),
Str(AuditEventParam.USERNAME.value),
UUID(AuditEventParam.SESSION.value),
Str(AuditEventParam.SERVICE.value, enum=['SUDO']),
Bool('success'),
)
AUDIT_EVENT_SUDO_SCHEMAS.append(audit_schema_from_base(
AUDIT_EVENT_SUDO_BASE_SCHEMA,
'audit_entry_sudo_accept',
Str(AuditEventParam.EVENT.value, enum=[AuditSudoEventType.ACCEPT.name]),
AUDIT_EVENT_DATA_SUDO_ACCEPT,
))
AUDIT_EVENT_SUDO_SCHEMAS.append(audit_schema_from_base(
AUDIT_EVENT_SUDO_BASE_SCHEMA,
'audit_entry_sudo_reject',
Str(AuditEventParam.EVENT.value, enum=[AuditSudoEventType.REJECT.name]),
AUDIT_EVENT_DATA_SUDO_REJECT,
))
AUDIT_EVENT_SUDO_JSON_SCHEMAS = [
schema.to_json_schema() for schema in AUDIT_EVENT_SUDO_SCHEMAS
]
AUDIT_EVENT_SUDO_PARAM_SET = convert_schema_to_set(AUDIT_EVENT_SUDO_JSON_SCHEMAS)
| 1,617 | Python | .py | 55 | 25.454545 | 81 | 0.736399 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,958 | smb.py | truenas_middleware/src/middlewared/middlewared/plugins/audit/schema/smb.py | from .common import (
AuditEnum,
AuditEventParam,
AuditSchema,
AUDIT_VERS,
AUDIT_RESULT_NTSTATUS,
AUDIT_RESULT_UNIX,
AuditFileType,
AUDIT_FILE,
AUDIT_FILE_HANDLE,
audit_schema_from_base,
AUDIT_UNIX_TOKEN,
convert_schema_to_set
)
from middlewared.schema import (
Bool,
Dict,
Int,
IPAddr,
Str,
UUID
)
class AuditSmbCreateDisp(AuditEnum):
"""
This enum contains all possible values of the SMB2 CREATE CreateDisposition.
"""
SUPERSEDE = 'SUPERSEDE'
OVERWRITE_IF = 'OVERWRITE_IF'
OPEN = 'OPEN'
CREATE = 'CREATE'
OPEN_IF = 'OPEN_IF'
UNKNOWN = 'UNKNOWN'
class AuditSmbEventType(AuditEnum):
"""
This enum contains all possible SMB audit events. Values correspond with
`event` written to auditing SQLite database.
"""
AUTHENTICATION = 'AUTHENTICATION'
CONNECT = 'CONNECT'
DISCONNECT = 'DISCONNECT'
CREATE = 'CREATE'
CLOSE = 'CLOSE'
READ = 'READ'
WRITE = 'WRITE'
OFFLOAD_READ = 'OFFLOAD_READ'
OFFLOAD_WRITE = 'OFFLOAD_WRITE'
RENAME = 'RENAME'
UNLINK = 'UNLINK'
SET_ACL = 'SET_ACL'
SET_ATTR = 'SET_ATTR'
SET_QUOTA = 'SET_QUOTA'
FSCTL = 'FSCTL'
class AuditSetattrType(AuditEnum):
DOSMODE = 'DOSMODE'
TIMESTAMP = 'TIMESTAMP'
"""
Below are schema class instances for `event_data` for SMB audit events.
"""
AUDIT_EVENT_DATA_SMB_AUTHENTICATION = Dict(
str(AuditEventParam.EVENT_DATA),
Str('logonId'),
Int('logonType'),
Str('localAddress'),
Str('remoteAddress'),
Str('serviceDescription'),
Str('authDescription'),
Str('clientDomain'),
Str('clientAccount'),
Str('workstation'),
Str('becameAccount'),
Str('becameDomain'),
Str('becameSid'),
Str('mappedAccount'),
Str('mappedDomain'),
Str('netlogonComputer'),
Str('netlogonTrustAccount'),
Str('netlogonNegotiateFlags'),
Str('netlogonSecureChannelType'),
Str('netlogonTrustAccountSid'),
Str('passwordType'),
AUDIT_RESULT_NTSTATUS,
AUDIT_VERS,
)
AUDIT_EVENT_DATA_SMB_CONNECT = Dict(
str(AuditEventParam.EVENT_DATA),
Str('host'),
AUDIT_UNIX_TOKEN,
AUDIT_RESULT_UNIX,
AUDIT_VERS
)
AUDIT_EVENT_DATA_SMB_DISCONNECT = Dict(
str(AuditEventParam.EVENT_DATA),
Str('host'),
AUDIT_UNIX_TOKEN,
Dict(
'operations',
Str('create'),
Str('close'),
Str('read'),
Str('write')
),
AUDIT_RESULT_UNIX,
AUDIT_VERS
)
AUDIT_EVENT_DATA_SMB_CREATE = Dict(
str(AuditEventParam.EVENT_DATA),
Dict(
'parameters',
Str('DesiredAccess'),
Str('FileAttributes'),
Str('ShareAccess'),
Str('CreateDisposition', enum=[x.name for x in AuditSmbCreateDisp]),
Str('CreateOptions')
),
Str('file_type', enum=[x.name for x in AuditFileType]),
AUDIT_FILE,
AUDIT_RESULT_NTSTATUS,
AUDIT_VERS,
)
AUDIT_EVENT_DATA_SMB_CLOSE = Dict(
str(AuditEventParam.EVENT_DATA),
Dict('file', AUDIT_FILE_HANDLE),
Dict(
'operations',
Str('read_cnt'),
Str('read_bytes'),
Str('write_cnt'),
Str('write_bytes')
),
AUDIT_RESULT_UNIX,
AUDIT_VERS
)
AUDIT_EVENT_DATA_SMB_SET_ATTR = Dict(
str(AuditEventParam.EVENT_DATA),
Str('attr_type', enum=[x.name for x in AuditSetattrType]),
Str('dosmode'),
Dict('ts'),
Dict('file', AUDIT_FILE_HANDLE),
AUDIT_RESULT_UNIX,
AUDIT_VERS
)
AUDIT_EVENT_DATA_SMB_RENAME = Dict(
str(AuditEventParam.EVENT_DATA),
Dict(
'src_file',
Str('file_type', enum=[x.name for x in AuditFileType]),
Str('path'),
Str('stream'),
Str('snap')
),
Dict(
'dst_file',
Str('path'),
Str('stream'),
Str('snap'),
),
AUDIT_RESULT_UNIX,
AUDIT_VERS
)
AUDIT_EVENT_DATA_SMB_UNLINK = Dict(
str(AuditEventParam.EVENT_DATA),
AUDIT_FILE,
AUDIT_RESULT_UNIX,
AUDIT_VERS
)
AUDIT_EVENT_DATA_SMB_READ = Dict(
str(AuditEventParam.EVENT_DATA),
Dict('file', AUDIT_FILE_HANDLE),
AUDIT_RESULT_UNIX,
AUDIT_VERS
)
AUDIT_EVENT_DATA_SMB_WRITE = Dict(
str(AuditEventParam.EVENT_DATA),
Dict('file', AUDIT_FILE_HANDLE),
AUDIT_RESULT_UNIX,
AUDIT_VERS
)
AUDIT_EVENT_DATA_SMB_OFFLOAD_READ = Dict(
str(AuditEventParam.EVENT_DATA),
Dict('file', AUDIT_FILE_HANDLE),
AUDIT_RESULT_NTSTATUS,
AUDIT_VERS
)
AUDIT_EVENT_DATA_SMB_OFFLOAD_WRITE = Dict(
str(AuditEventParam.EVENT_DATA),
Dict('file', AUDIT_FILE_HANDLE),
AUDIT_RESULT_NTSTATUS,
AUDIT_VERS
)
AUDIT_EVENT_DATA_SMB_SET_ACL = Dict(
str(AuditEventParam.EVENT_DATA),
AUDIT_FILE,
Str('secinfo'),
Str('sd'),
AUDIT_RESULT_NTSTATUS,
AUDIT_VERS
)
AUDIT_EVENT_DATA_SMB_FSCTL = Dict(
str(AuditEventParam.EVENT_DATA),
Dict(
'function',
Str('raw'),
Str('parsed')
),
Dict('file', AUDIT_FILE_HANDLE),
AUDIT_RESULT_NTSTATUS,
AUDIT_VERS
)
AUDIT_EVENT_DATA_SMB_SET_QUOTA = Dict(
str(AuditEventParam.EVENT_DATA),
Dict(
'qt',
Str('type', enum=['USER', 'GROUP']),
Str('bsize'),
Str('soflimit'),
Str('hardlimit'),
Str('isoftlimit'),
Str('ihardlimit')
),
AUDIT_RESULT_UNIX,
AUDIT_VERS
)
"""
Below are schema classes for the full SMB audit events that are written to the
auditing database and returned in `audit.query` requests. We start with a generic
base instance and then extend a copy of the generalized event with event-specific
`event_data` defined above.
"""
AUDIT_EVENT_SMB_SCHEMAS = []
AUDIT_EVENT_SMB_BASE_SCHEMA = AuditSchema(
'audit_entry_smb',
UUID(AuditEventParam.AUDIT_ID.value),
Int(AuditEventParam.MESSAGE_TIMESTAMP.value),
Dict(AuditEventParam.TIMESTAMP.value),
IPAddr(AuditEventParam.ADDRESS.value),
Str(AuditEventParam.USERNAME.value),
UUID(AuditEventParam.SESSION.value),
Str(AuditEventParam.SERVICE.value, enum=['SMB']),
Dict(
AuditEventParam.SERVICE_DATA.value,
AUDIT_VERS,
Str('service'),
Str('session_id'),
Str('tcon_id')
),
Bool('success')
)
AUDIT_EVENT_SMB_SCHEMAS.append(audit_schema_from_base(
AUDIT_EVENT_SMB_BASE_SCHEMA,
'audit_entry_smb_authentication',
Str(AuditEventParam.EVENT.value, enum=[AuditSmbEventType.AUTHENTICATION.name]),
AUDIT_EVENT_DATA_SMB_AUTHENTICATION
))
AUDIT_EVENT_SMB_SCHEMAS.append(audit_schema_from_base(
AUDIT_EVENT_SMB_BASE_SCHEMA,
'audit_entry_smb_connect',
Str(AuditEventParam.EVENT.value, enum=[AuditSmbEventType.CONNECT.name]),
AUDIT_EVENT_DATA_SMB_CONNECT
))
AUDIT_EVENT_SMB_SCHEMAS.append(audit_schema_from_base(
AUDIT_EVENT_SMB_BASE_SCHEMA,
'audit_entry_smb_disconnect',
Str(AuditEventParam.EVENT.value, enum=[AuditSmbEventType.DISCONNECT.name]),
AUDIT_EVENT_DATA_SMB_DISCONNECT
))
AUDIT_EVENT_SMB_SCHEMAS.append(audit_schema_from_base(
AUDIT_EVENT_SMB_BASE_SCHEMA,
'audit_entry_smb_create',
Str(AuditEventParam.EVENT.value, enum=[AuditSmbEventType.CREATE.name]),
AUDIT_EVENT_DATA_SMB_CREATE
))
AUDIT_EVENT_SMB_SCHEMAS.append(audit_schema_from_base(
AUDIT_EVENT_SMB_BASE_SCHEMA,
'audit_entry_smb_close',
Str(AuditEventParam.EVENT.value, enum=[AuditSmbEventType.CLOSE.name]),
AUDIT_EVENT_DATA_SMB_CLOSE
))
AUDIT_EVENT_SMB_SCHEMAS.append(audit_schema_from_base(
AUDIT_EVENT_SMB_BASE_SCHEMA,
'audit_entry_smb_set_attr',
Str(AuditEventParam.EVENT.value, enum=[AuditSmbEventType.SET_ATTR.name]),
AUDIT_EVENT_DATA_SMB_SET_ATTR
))
AUDIT_EVENT_SMB_SCHEMAS.append(audit_schema_from_base(
AUDIT_EVENT_SMB_BASE_SCHEMA,
'audit_entry_smb_rename',
Str(AuditEventParam.EVENT.value, enum=[AuditSmbEventType.RENAME.name]),
AUDIT_EVENT_DATA_SMB_RENAME
))
AUDIT_EVENT_SMB_SCHEMAS.append(audit_schema_from_base(
AUDIT_EVENT_SMB_BASE_SCHEMA,
'audit_entry_smb_unlink',
Str(AuditEventParam.EVENT.value, enum=[AuditSmbEventType.UNLINK.name]),
AUDIT_EVENT_DATA_SMB_UNLINK
))
AUDIT_EVENT_SMB_SCHEMAS.append(audit_schema_from_base(
AUDIT_EVENT_SMB_BASE_SCHEMA,
'audit_entry_smb_read',
Str(AuditEventParam.EVENT.value, enum=[AuditSmbEventType.READ.name]),
AUDIT_EVENT_DATA_SMB_READ
))
AUDIT_EVENT_SMB_SCHEMAS.append(audit_schema_from_base(
AUDIT_EVENT_SMB_BASE_SCHEMA,
'audit_entry_smb_write',
Str(AuditEventParam.EVENT.value, enum=[AuditSmbEventType.WRITE.name]),
AUDIT_EVENT_DATA_SMB_WRITE
))
AUDIT_EVENT_SMB_SCHEMAS.append(audit_schema_from_base(
AUDIT_EVENT_SMB_BASE_SCHEMA,
'audit_entry_smb_offload_read',
Str(AuditEventParam.EVENT.value, enum=[AuditSmbEventType.OFFLOAD_READ.name]),
AUDIT_EVENT_DATA_SMB_OFFLOAD_READ
))
AUDIT_EVENT_SMB_SCHEMAS.append(audit_schema_from_base(
AUDIT_EVENT_SMB_BASE_SCHEMA,
'audit_entry_smb_offload_write',
Str(AuditEventParam.EVENT.value, enum=[AuditSmbEventType.OFFLOAD_WRITE.name]),
AUDIT_EVENT_DATA_SMB_OFFLOAD_WRITE
))
AUDIT_EVENT_SMB_SCHEMAS.append(audit_schema_from_base(
AUDIT_EVENT_SMB_BASE_SCHEMA,
'audit_entry_smb_set_acl',
Str(AuditEventParam.EVENT.value, enum=[AuditSmbEventType.SET_ACL.name]),
AUDIT_EVENT_DATA_SMB_SET_ACL
))
AUDIT_EVENT_SMB_SCHEMAS.append(audit_schema_from_base(
AUDIT_EVENT_SMB_BASE_SCHEMA,
'audit_entry_smb_fsctl',
Str(AuditEventParam.EVENT.value, enum=[AuditSmbEventType.FSCTL.name]),
AUDIT_EVENT_DATA_SMB_FSCTL
))
AUDIT_EVENT_SMB_SCHEMAS.append(audit_schema_from_base(
AUDIT_EVENT_SMB_BASE_SCHEMA,
'audit_entry_smb_set_quota',
Str(AuditEventParam.EVENT.value, enum=[AuditSmbEventType.SET_QUOTA.name]),
AUDIT_EVENT_DATA_SMB_SET_QUOTA
))
AUDIT_EVENT_SMB_JSON_SCHEMAS = [
schema.to_json_schema() for schema in AUDIT_EVENT_SMB_SCHEMAS
]
AUDIT_EVENT_SMB_PARAM_SET = convert_schema_to_set(AUDIT_EVENT_SMB_JSON_SCHEMAS)
| 9,950 | Python | .py | 341 | 24.442815 | 83 | 0.691323 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,959 | middleware.py | truenas_middleware/src/middlewared/middlewared/plugins/audit/schema/middleware.py | from middlewared.schema import (
Bool,
Dict,
Int,
IPAddr,
List,
Str,
UUID,
)
from .common import (
AuditEnum,
AuditEventParam,
AuditSchema,
AUDIT_VERS,
audit_schema_from_base,
convert_schema_to_set,
)
class AuditMiddlewareEventType(AuditEnum):
AUTHENTICATION = 'AUTHENTICATION'
METHOD_CALL = 'METHOD_CALL'
AUDIT_EVENT_DATA_MIDDLEWARE_AUTHENTICATION = Dict(
str(AuditEventParam.EVENT_DATA),
Dict('credentials',
Str('type'),
Dict('data', additional_attrs=True),
null=True),
Str('error', null=True),
AUDIT_VERS,
)
AUDIT_EVENT_DATA_MIDDLEWARE_METHOD_CALL = Dict(
str(AuditEventParam.EVENT_DATA),
Str('method'),
List('params'),
Str('description', null=True),
Bool('authenticated'),
Bool('authorized'),
AUDIT_VERS,
)
AUDIT_EVENT_MIDDLEWARE_SCHEMAS = []
AUDIT_EVENT_MIDDLEWARE_BASE_SCHEMA = AuditSchema(
'audit_entry_middleware',
UUID(AuditEventParam.AUDIT_ID.value),
Int(AuditEventParam.MESSAGE_TIMESTAMP.value),
Dict(AuditEventParam.TIMESTAMP.value),
IPAddr(AuditEventParam.ADDRESS.value),
Str(AuditEventParam.USERNAME.value),
UUID(AuditEventParam.SESSION.value),
Str(AuditEventParam.SERVICE.value, enum=['MIDDLEWARE']),
Dict(
AuditEventParam.SERVICE_DATA.value,
AUDIT_VERS,
Str('origin', null=True),
Str('protocol', enum=['REST', 'WEBSOCKET']),
Dict('credentials', null=True, additional_attrs=True),
),
Bool('success'),
)
AUDIT_EVENT_MIDDLEWARE_SCHEMAS.append(audit_schema_from_base(
AUDIT_EVENT_MIDDLEWARE_BASE_SCHEMA,
'audit_entry_middleware_authentication',
Str(AuditEventParam.EVENT.value, enum=[AuditMiddlewareEventType.AUTHENTICATION.name]),
AUDIT_EVENT_DATA_MIDDLEWARE_AUTHENTICATION,
))
AUDIT_EVENT_MIDDLEWARE_SCHEMAS.append(audit_schema_from_base(
AUDIT_EVENT_MIDDLEWARE_BASE_SCHEMA,
'audit_entry_middleware_method_call',
Str(AuditEventParam.EVENT.value, enum=[AuditMiddlewareEventType.METHOD_CALL.name]),
AUDIT_EVENT_DATA_MIDDLEWARE_METHOD_CALL,
))
AUDIT_EVENT_MIDDLEWARE_JSON_SCHEMAS = [
schema.to_json_schema() for schema in AUDIT_EVENT_MIDDLEWARE_SCHEMAS
]
AUDIT_EVENT_MIDDLEWARE_PARAM_SET = convert_schema_to_set(AUDIT_EVENT_MIDDLEWARE_JSON_SCHEMAS)
| 2,329 | Python | .py | 73 | 27.219178 | 93 | 0.721626 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,960 | common.py | truenas_middleware/src/middlewared/middlewared/plugins/audit/schema/common.py | import enum
from copy import deepcopy
from middlewared.schema import (
Dict,
Int,
List,
Str,
)
class AuditEnum(enum.Enum):
def __str__(self):
return str(self.value)
class AuditSchema(Dict):
def extend(self, *attrs, **kwargs):
for i in attrs:
self.attrs[i.name] = i
for k, v in self.conditional_defaults.items():
if k not in self.attrs:
raise ValueError(f'Specified attribute {k!r} not found.')
for k_v in ('filters', 'attrs'):
if k_v not in v:
raise ValueError(f'Conditional defaults must have {k_v} specified.')
for attr in v['attrs']:
if attr not in self.attrs:
raise ValueError(f'Specified attribute {attr} not found.')
if self.strict:
for attr in self.attrs.values():
if attr.required:
if attr.has_default:
raise ValueError(
f'Attribute {attr.name} is required and has default value at the same time, '
'this is forbidden in strict mode'
)
else:
if not attr.has_default:
raise ValueError(
f'Attribute {attr.name} is not required and does not have default value, '
'this is forbidden in strict mode'
)
class AuditEventParam(AuditEnum):
AUDIT_ID = 'audit_id'
TIMESTAMP = 'timestamp'
MESSAGE_TIMESTAMP = 'message_timestamp'
ADDRESS = 'address'
USERNAME = 'username'
SESSION = 'session'
SERVICE = 'service'
SERVICE_DATA = 'service_data'
EVENT = 'event'
EVENT_DATA = 'event_data'
SUCCESS = 'success'
class AuditFileHandleType(AuditEnum):
DEV_INO = 'DEV_INO'
UUID = 'UUID'
class AuditResultType(AuditEnum):
UNIX = 'UNIX'
NTSTATUS = 'NTSTATUS'
class AuditFileType(AuditEnum):
BLOCK = 'BLOCK'
CHARACTER = 'CHARACTER'
FIFO = 'FIFO'
REGULAR = 'REGULAR'
DIRECTORY = 'DIRECTORY'
SYMLINK = 'SYMLINK'
AUDIT_VERS = Dict(
'vers',
Int('major', required=True),
Int('minor', required=True)
)
AUDIT_RESULT_NTSTATUS = Dict(
'result',
Str('type', enum=[AuditResultType.NTSTATUS.name]),
Int('value_raw'),
Str('value_parsed')
)
AUDIT_RESULT_UNIX = Dict(
'result',
Str('type', enum=[AuditResultType.UNIX.name]),
Int('value_raw'),
Str('value_parsed')
)
AUDIT_FILE_HANDLE = Dict(
'handle',
Str('type', enum=[x.name for x in AuditFileHandleType]),
Str('value')
)
AUDIT_FILE = Dict(
'file',
Str('type', enum=[x.name for x in AuditFileType]),
Str('name'),
Str('stream'),
Str('path'),
Str('snap')
)
AUDIT_UNIX_TOKEN = Dict(
'unix_token',
Int('uid'),
Int('gid'),
List('groups', items=[Int('group_id')]),
)
def audit_schema_from_base(schema, new_name, *args):
new_schema = deepcopy(schema)
new_schema.extend(*args)
new_schema.name = new_name
return new_schema
def convert_schema_to_set(schema_list):
def add_to_set(key, val, current_name):
if current_name:
current_name += '.'
current_name += val['title']
schema_set.add(current_name)
if val['type'] == 'object':
for subkey, subval in val['properties'].items():
add_to_set(subkey, subval, current_name)
schema_set = set()
for entry in schema_list:
for key, val in entry['properties'].items():
add_to_set(key, val, '')
return schema_set
| 3,681 | Python | .py | 118 | 23.211864 | 105 | 0.578112 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,961 | challenge.py | truenas_middleware/src/middlewared/middlewared/plugins/acme_protocol_/challenge.py | import josepy as jose
import json
from acme import messages
from middlewared.schema import accepts, Dict, Int, Str
from middlewared.service import private, Service
from .authenticators.factory import auth_factory
class DNSAuthenticatorService(Service):
class Config:
namespace = 'acme.dns.authenticator'
@accepts(
Dict(
'perform_challenge',
Int('authenticator', required=True),
Str('key', required=True, max_length=None),
Str('domain', required=True),
Str('challenge', required=True, max_length=None),
)
)
@private
def perform_challenge(self, data):
authenticator = self.get_authenticator(data['authenticator'])
authenticator.perform(*self.get_validation_parameters(data['challenge'], data['domain'], data['key']))
@private
def cleanup_challenge(self, data):
authenticator = self.get_authenticator(data['authenticator'])
authenticator.cleanup(*self.get_validation_parameters(data['challenge'], data['domain'], data['key']))
@private
def get_authenticator(self, authenticator):
auth_details = self.middleware.call_sync('acme.dns.authenticator.get_instance', authenticator)
return self.get_authenticator_internal(auth_details)(self.middleware, auth_details['attributes'])
@private
def get_authenticator_internal(self, auth_details):
return auth_factory.authenticator(auth_details['authenticator'])
@private
def get_validation_parameters(self, challenge, domain, key):
challenge = messages.ChallengeBody.from_json(json.loads(challenge))
return (
domain,
challenge.validation_domain_name(domain),
challenge.validation(jose.JWKRSA.fields_from_json(json.loads(key))),
)
| 1,827 | Python | .py | 41 | 37.146341 | 110 | 0.695775 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,962 | acme_svc.py | truenas_middleware/src/middlewared/middlewared/plugins/acme_protocol_/acme_svc.py | import josepy as jose
import json
from acme import client, messages
from middlewared.service import Service
class ACMEService(Service):
class Config:
namespace = 'acme'
private = True
def get_acme_client_and_key(self, acme_directory_uri, tos=False):
data = self.middleware.call_sync('acme.registration.query', [['directory', '=', acme_directory_uri]])
if not data:
data = self.middleware.call_sync(
'acme.registration.create',
{'tos': tos, 'acme_directory_uri': acme_directory_uri}
)
else:
data = data[0]
# Making key now
key = jose.JWKRSA.fields_from_json(json.loads(data['body']['key']))
key_dict = key.fields_to_partial_json()
# Making registration resource now
registration = messages.RegistrationResource.from_json({
'uri': data['uri'],
'terms_of_service': data['tos'],
'body': {
'contact': [data['body']['contact']],
'status': data['body']['status'],
'key': {
'e': key_dict['e'],
'kty': 'RSA',
'n': key_dict['n']
}
}
})
return client.ClientV2(
messages.Directory({
'newAccount': data['new_account_uri'],
'newNonce': data['new_nonce_uri'],
'newOrder': data['new_order_uri'],
'revokeCert': data['revoke_cert_uri']
}),
client.ClientNetwork(key, account=registration)
), key
| 1,636 | Python | .py | 43 | 26.372093 | 109 | 0.522699 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,963 | issue_cert.py | truenas_middleware/src/middlewared/middlewared/plugins/acme_protocol_/issue_cert.py | import copy
import datetime
import errno
from acme import errors, messages
from middlewared.service import Service, ValidationErrors
from middlewared.service_exception import CallError
class ACMEService(Service):
class Config:
namespace = 'acme'
private = True
def issue_certificate(self, job, progress, data, csr_data):
"""
How we would like to proceed with issuing an ACME cert is as follows:
1) Decide domains which are involved
2) Validate we have valid authenticators for domains involved
3) Place Order
4) Handle Authorizations
5) Clean up challenge ( we should do this even if 3/4 fail to ensure there are no leftovers )
"""
self.middleware.call_sync('network.general.will_perform_activity', 'acme')
verrors = ValidationErrors()
# TODO: Add ability to complete DNS validation challenge manually
# Validate domain dns mapping for handling DNS challenges
# Ensure that there is an authenticator for each domain in the CSR
domains = self.middleware.call_sync('certificate.get_domain_names', csr_data['id'])
dns_authenticator_ids = [o['id'] for o in self.middleware.call_sync('acme.dns.authenticator.query')]
dns_mapping_copy = copy.deepcopy(data['dns_mapping'])
# We will normalise domain authenticators to ensure consistency between SAN "DNS:*" prefixes
for domain in data['dns_mapping']:
if ':' in domain and domain.split(':', 1)[-1] not in dns_mapping_copy:
dns_mapping_copy[domain.split(':', 1)[-1]] = dns_mapping_copy[domain]
elif ':' not in domain:
normalised_san = ':'.join(self.middleware.call_sync('cryptokey.normalize_san', [domain])[0])
if normalised_san not in dns_mapping_copy:
dns_mapping_copy[normalised_san] = dns_mapping_copy[domain]
for domain in domains:
if domain not in dns_mapping_copy:
verrors.add(
'acme_create.dns_mapping',
f'Please provide DNS authenticator id for {domain}'
)
elif dns_mapping_copy[domain] not in dns_authenticator_ids:
verrors.add(
'acme_create.dns_mapping',
f'Provided DNS Authenticator id for {domain} does not exist'
)
if domain.endswith('.'):
verrors.add(
'acme_create.dns_mapping',
f'Domain {domain} name cannot end with a period'
)
if '*' in domain and not domain.split(':', 1)[-1].startswith('*.'):
verrors.add(
'acme_create.dns_mapping',
'Wildcards must be at the start of domain name followed by a period'
)
for domain in data['dns_mapping']:
if domain not in domains:
verrors.add(
'acme_create.dns_mapping',
f'{domain} not specified in the CSR'
)
verrors.check()
acme_client, key = self.middleware.call_sync(
'acme.get_acme_client_and_key', data['acme_directory_uri'], data['tos']
)
try:
# perform operations and have a cert issued
order = acme_client.new_order(csr_data['CSR'])
except messages.Error as e:
raise CallError(f'Failed to issue a new order for Certificate : {e}')
else:
job.set_progress(progress, 'New order for certificate issuance placed')
dns_mapping = {}
for d, v in map(lambda v: (v[0].split(':', 1)[-1], v[1]), dns_mapping_copy.items()):
dns_mapping[d] = v
if '*' in d:
# Boulder returns us domain name stripped of wildcard character,
# hence we account for that in the mapping we keep
dns_mapping[d.replace('*.', '')] = v
try:
self.handle_authorizations(job, progress, order, dns_mapping, acme_client, key)
try:
# Polling for a maximum of 10 minutes while trying to finalize order
# Should we try .poll() instead first ? research please
return acme_client.poll_and_finalize(
order, datetime.datetime.now() + datetime.timedelta(minutes=10)
)
except errors.TimeoutError:
raise CallError('Certificate request for final order timed out')
except errors.ValidationError as e:
msg = ''
for authzr in e.failed_authzrs:
msg += f'\nAuthorization for identifier {authzr.body.identifier} failed.'
msg += '\nHere are the challenges that were not fulfilled:'
for challenge in authzr.body.challenges:
msg += \
f'\nChallenge Type: {challenge.chall.typ}' \
f'\n\nError information:' \
f'\n- Type: {challenge.error.typ if challenge.error else "No error type found"}' \
f'\n- Details: {challenge.error.detail if challenge.error else "No error details were found"}\n\n'
raise CallError(f'Certificate request for final order failed: {msg}')
finally:
self.cleanup_authorizations(order, dns_mapping, key)
def handle_authorizations(self, job, progress, order, dns_mapping, acme_client, key):
# When this is called, it should be ensured by the function calling this function that for all authorization
# resource, a domain name dns mapping is available
# For multiple domain providers in domain names, I think we should ask the end user to specify which domain
# provider is used for which domain so authorizations can be handled gracefully
max_progress = (progress * 4) - progress - (progress * 4 / 5)
for authorization_resource in order.authorizations:
status = False
domain = authorization_resource.body.identifier.value
try:
progress += (max_progress / len(order.authorizations))
challenge = self.get_challenge(authorization_resource.body.challenges)
if not challenge:
raise CallError(f'DNS Challenge not found for domain {domain}', errno=errno.ENOENT)
self.middleware.call_sync(
'acme.dns.authenticator.perform_challenge',
self.get_acme_payload(dns_mapping, challenge, domain, key)
)
try:
status = acme_client.answer_challenge(challenge, challenge.response(key))
except errors.UnexpectedUpdate as e:
raise CallError(f'Error answering challenge for {domain} : {e}')
finally:
job.set_progress(progress, f'DNS challenge {"completed" if status else "failed"} for {domain}')
def get_challenge(self, challenges):
challenge = None
for chg in challenges:
if chg.typ == 'dns-01':
challenge = chg
return challenge
def get_acme_payload(self, dns_mapping, challenge, domain, key):
return {
'authenticator': dns_mapping[domain],
'challenge': challenge.json_dumps(),
'domain': domain,
'key': key.json_dumps()
}
def cleanup_authorizations(self, order, dns_mapping, key):
for authorization_resource in order.authorizations:
domain = authorization_resource.body.identifier.value
challenge = self.get_challenge(authorization_resource.body.challenges)
if not challenge:
continue
try:
self.middleware.call_sync(
'acme.dns.authenticator.cleanup_challenge',
self.get_acme_payload(dns_mapping, challenge, domain, key)
)
except Exception:
self.logger.error('Failed to cleanup challenge for %r domain', domain, exc_info=True)
| 8,347 | Python | .py | 154 | 39.642857 | 130 | 0.582446 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,964 | schema.py | truenas_middleware/src/middlewared/middlewared/plugins/acme_protocol_/schema.py | from middlewared.api import api_method
from middlewared.api.current import DNSAuthenticatorSchemasArgs, DNSAuthenticatorSchemasResult
from middlewared.service import private, Service
from .authenticators.factory import auth_factory
class DNSAuthenticatorService(Service):
class Config:
namespace = 'acme.dns.authenticator'
def __init__(self, *args, **kwargs):
super(DNSAuthenticatorService, self).__init__(*args, **kwargs)
self.schemas = self.get_authenticator_schemas()
@api_method(DNSAuthenticatorSchemasArgs, DNSAuthenticatorSchemasResult, roles=['READONLY_ADMIN'])
def authenticator_schemas(self):
"""
Get the schemas for all DNS providers we support for ACME DNS Challenge and the respective attributes
required for connecting to them while validating a DNS Challenge
"""
return [
{'schema': [v.to_json_schema() for v in value.attrs.values()], 'key': key}
for key, value in self.schemas.items()
]
@private
def get_authenticator_schemas(self):
return {k: klass.SCHEMA for k, klass in auth_factory.get_authenticators().items()}
| 1,166 | Python | .py | 23 | 43.826087 | 109 | 0.71743 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,965 | factory.py | truenas_middleware/src/middlewared/middlewared/plugins/acme_protocol_/authenticators/factory.py | import errno
from middlewared.service_exception import CallError
from .cloudflare import CloudFlareAuthenticator
from .ovh import OVHAuthenticator
from .route53 import Route53Authenticator
from .shell import ShellAuthenticator
class AuthenticatorFactory:
def __init__(self):
self._creators = {}
def register(self, authenticator):
self._creators[authenticator.NAME] = authenticator
def authenticator(self, name):
if name not in self._creators:
raise CallError(f'Unable to locate {name!r} authenticator.', errno=errno.ENOENT)
return self._creators[name]
def get_authenticators(self):
return self._creators
auth_factory = AuthenticatorFactory()
for authenticator in [
CloudFlareAuthenticator,
Route53Authenticator,
OVHAuthenticator,
ShellAuthenticator,
]:
auth_factory.register(authenticator)
| 888 | Python | .py | 25 | 30.6 | 92 | 0.760844 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,966 | cloudflare.py | truenas_middleware/src/middlewared/middlewared/plugins/acme_protocol_/authenticators/cloudflare.py | import logging
from certbot_dns_cloudflare._internal.dns_cloudflare import _CloudflareClient
from middlewared.schema import accepts, Dict, Password, Str, ValidationErrors
from middlewared.service import skip_arg
from .base import Authenticator
logger = logging.getLogger(__name__)
class CloudFlareAuthenticator(Authenticator):
NAME = 'cloudflare'
PROPAGATION_DELAY = 60
SCHEMA = Dict(
'cloudflare',
Str('cloudflare_email', empty=False, null=True, title='Cloudflare Email'),
Password('api_key', empty=False, null=True, title='API Key'),
Password('api_token', empty=False, null=True, title='API Token'),
)
def initialize_credentials(self):
self.cloudflare_email = self.attributes.get('cloudflare_email')
self.api_key = self.attributes.get('api_key')
self.api_token = self.attributes.get('api_token')
@staticmethod
@accepts(SCHEMA)
@skip_arg(count=1)
async def validate_credentials(middleware, data):
verrors = ValidationErrors()
if data.get('api_token'):
if data.get('cloudflare_email'):
verrors.add('cloudflare_email', 'Should not be specified when using "api_token".')
if data.get('api_key'):
verrors.add('api_key', 'Should not be specified when using "api_token".')
elif data.get('cloudflare_email') or data.get('api_key'):
if not data.get('cloudflare_email'):
verrors.add(
'cloudflare_email',
'Attribute is required when using a Global API Key (should be associated with Cloudflare account).'
)
if not data.get('api_key'):
verrors.add('api_key', 'Attribute is required when using a Global API Key.')
else:
verrors.add('api_token', 'Attribute must be specified when Global API Key is not specified.')
verrors.check()
return data
def _perform(self, domain, validation_name, validation_content):
self.get_cloudflare_object().add_txt_record(domain, validation_name, validation_content, 600)
def get_cloudflare_object(self):
if self.api_token:
return _CloudflareClient(api_token=self.api_token)
else:
return _CloudflareClient(email=self.cloudflare_email, api_key=self.api_key)
def _cleanup(self, domain, validation_name, validation_content):
self.get_cloudflare_object().del_txt_record(domain, validation_name, validation_content)
| 2,531 | Python | .py | 50 | 41.56 | 119 | 0.663828 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,967 | route53.py | truenas_middleware/src/middlewared/middlewared/plugins/acme_protocol_/authenticators/route53.py | import errno
import boto3
import time
from botocore import exceptions as boto_exceptions
from middlewared.schema import accepts, Dict, Password, Str
from middlewared.service import CallError, skip_arg
from .base import Authenticator
class Route53Authenticator(Authenticator):
NAME = 'route53'
SCHEMA = Dict(
'route53',
Str('access_key_id', required=True, empty=False, title='Access Key Id'),
Password('secret_access_key', required=True, empty=False, title='Secret Access Key'),
)
def initialize_credentials(self):
self.access_key_id = self.attributes['access_key_id']
self.secret_access_key = self.attributes['secret_access_key']
self.client = boto3.Session(
aws_access_key_id=self.access_key_id,
aws_secret_access_key=self.secret_access_key,
).client('route53')
@staticmethod
@accepts(SCHEMA)
@skip_arg(count=1)
async def validate_credentials(middleware, data):
return data
def _perform(self, domain, validation_name, validation_content):
return self._change_txt_record('UPSERT', validation_name, validation_content)
def wait_for_records_to_propagate(self, resp_change_info):
"""
Wait for a change to be propagated to all Route53 DNS servers.
https://docs.aws.amazon.com/Route53/latest/APIReference/API_GetChange.html
"""
r = resp_change_info
for unused_n in range(0, 120):
r = self.client.get_change(Id=resp_change_info['Id'])
if r['ChangeInfo']['Status'] == 'INSYNC':
return resp_change_info['Id']
time.sleep(5)
raise CallError(f'Timed out waiting for Route53 change. Current status: {r["Status"]}')
def _find_zone_id_for_domain(self, domain):
# Finding zone id for the given domain
paginator = self.client.get_paginator('list_hosted_zones')
target_labels = domain.rstrip('.').split('.')
zones = []
try:
for page in paginator.paginate():
for zone in page['HostedZones']:
if zone['Config']['PrivateZone']:
continue
candidate_labels = zone['Name'].rstrip('.').split('.')
if candidate_labels == target_labels[-len(candidate_labels):]:
zones.append((zone['Name'], zone['Id']))
if not zones:
raise CallError(f'Unable to find a Route53 hosted zone for {domain}', errno=errno.ENOENT)
except boto_exceptions.ClientError as e:
raise CallError(f'Failed to get Hosted zones with provided credentials :{e}')
# Order the zones that are suffixes for our desired to domain by
# length, this puts them in an order like:
# ["foo.bar.baz.com", "bar.baz.com", "baz.com", "com"]
# And then we choose the first one, which will be the most specific.
zones.sort(key=lambda z: len(z[0]), reverse=True)
return zones[0][1]
def _change_txt_record(self, action, validation_domain_name, validation):
if action not in ('UPSERT', 'DELETE'):
raise CallError('Please specify a valid action for changing TXT record for Route53')
zone_id = self._find_zone_id_for_domain(validation_domain_name)
try:
response = self.client.change_resource_record_sets(
HostedZoneId=zone_id,
ChangeBatch={
'Comment': 'TrueNAS-dns-route53 certificate validation ' + action,
'Changes': [
{
'Action': action,
'ResourceRecordSet': {
'Name': validation_domain_name,
'Type': 'TXT',
'TTL': 10,
'ResourceRecords': [{'Value': f'"{validation}"'}],
}
}
]
}
)
return response['ChangeInfo']
except Exception as e:
raise CallError(f'Failed to {action} Route53 record sets: {e}')
def _cleanup(self, domain, validation_name, validation_content):
self._change_txt_record('DELETE', validation_name, validation_content)
| 4,364 | Python | .py | 90 | 36.222222 | 105 | 0.586701 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,968 | ovh.py | truenas_middleware/src/middlewared/middlewared/plugins/acme_protocol_/authenticators/ovh.py | import logging
from lexicon.providers.ovh import ENDPOINTS
from certbot_dns_ovh._internal.dns_ovh import _OVHLexiconClient
from middlewared.schema import accepts, Dict, Password, Str
from middlewared.service import skip_arg
from .base import Authenticator
logger = logging.getLogger(__name__)
OVH_ENDPOINTS = tuple(ENDPOINTS.keys())
class OVHAuthenticator(Authenticator):
NAME = 'OVH'
PROPAGATION_DELAY = 60
SCHEMA = Dict(
'OVH',
Str('application_key', empty=False, null=False, title='OVH Application Key', required=True),
Password('application_secret', empty=False, null=False, title='OVH Application Secret', required=True),
Str('consumer_key', empty=False, null=False, title='OVH Consumer Key', required=True),
Str('endpoint', empty=False, default='ovh-eu', title='OVH Endpoint', enum=OVH_ENDPOINTS, required=True),
)
def initialize_credentials(self):
self.application_key = self.attributes.get('application_key')
self.application_secret = self.attributes.get('application_secret')
self.consumer_key = self.attributes.get('consumer_key')
self.endpoint = self.attributes.get('endpoint')
@staticmethod
@accepts(SCHEMA)
@skip_arg(count=1)
async def validate_credentials(middleware, data):
return data
def _perform(self, domain, validation_name, validation_content):
self.get_client().add_txt_record(domain, validation_name, validation_content)
def get_client(self):
return _OVHLexiconClient(
self.endpoint, self.application_key, self.application_secret,
self.consumer_key, 600,
)
def _cleanup(self, domain, validation_name, validation_content):
self.get_client().del_txt_record(domain, validation_name, validation_content)
| 1,820 | Python | .py | 37 | 42.864865 | 112 | 0.716949 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,969 | base.py | truenas_middleware/src/middlewared/middlewared/plugins/acme_protocol_/authenticators/base.py | import time
from middlewared.service import CallError
class Authenticator:
NAME = NotImplementedError
PROPAGATION_DELAY = NotImplementedError
SCHEMA = NotImplementedError
def __init__(self, middleware, attributes):
self.middleware = middleware
self.attributes = attributes
self.initialize_credentials()
def initialize_credentials(self):
pass
@staticmethod
async def validate_credentials(middleware, data):
raise NotImplementedError
def perform(self, domain, validation_name, validation_content):
try:
perform_ret = self._perform(domain, validation_name, validation_content)
except Exception as e:
raise CallError(f'Failed to perform {self.NAME} challenge for {domain!r} domain: {e}')
else:
self.wait_for_records_to_propagate(perform_ret)
def _perform(self, domain, validation_name, validation_content):
raise NotImplementedError
def wait_for_records_to_propagate(self, perform_ret):
time.sleep(self.PROPAGATION_DELAY)
def cleanup(self, domain, validation_name, validation_content):
try:
self._cleanup(domain, validation_name, validation_content)
except Exception as e:
raise CallError(f'Failed to cleanup {self.NAME} challenge for {domain!r} domain: {e}')
def _cleanup(self, domain, validation_name, validation_content):
raise NotImplementedError
| 1,471 | Python | .py | 33 | 36.787879 | 98 | 0.703366 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,970 | shell.py | truenas_middleware/src/middlewared/middlewared/plugins/acme_protocol_/authenticators/shell.py | """
The authenticator script is called two times during the certificate generation:
1. The validation record creation which is called in the following way:
script set domain validation_name validaton_context timeout
2. The validation record deletion which is called in following way:
script unset domain validation_name validation_context
It is up to script implementation to handle both calls and perform the record creation.
"""
import logging
from middlewared.async_validators import check_path_resides_within_volume
from middlewared.schema import accepts, Dict, Str, File, Int
from middlewared.service import CallError, skip_arg, ValidationErrors
from middlewared.utils.user_context import run_command_with_user_context
from .base import Authenticator
logger = logging.getLogger(__name__)
class ShellAuthenticator(Authenticator):
NAME = 'shell'
PROPAGATION_DELAY = 60
SCHEMA = Dict(
'shell',
File('script', required=True, empty=False, title='Authenticator script'),
Str('user', default='nobody', title='Running user', empty=False),
Int('timeout', default=60, title='Timeout'),
Int('delay', default=60, title='Propagation delay'),
)
def initialize_credentials(self):
self.script = self.attributes['script']
self.user = self.attributes['user']
self.timeout = self.attributes['timeout']
self.PROPAGATION_DELAY = self.attributes['delay']
@staticmethod
@accepts(SCHEMA)
@skip_arg(count=1)
async def validate_credentials(middleware, data):
# We would like to validate the following bits:
# 1) script exists and is executable
# 2) user exists
# 3) User can access the script in question
verrors = ValidationErrors()
try:
await middleware.call('user.get_user_obj', {'username': data['user']})
except KeyError:
verrors.add('user', f'Unable to locate {data["user"]!r} user')
await check_path_resides_within_volume(verrors, middleware, 'script', data['script'])
try:
can_access = await middleware.call(
'filesystem.can_access_as_user', data['user'], data['script'], {'execute': True}
)
except CallError as e:
verrors.add('script', f'Unable to validate script: {e}')
else:
if not can_access:
verrors.add('user', f'{data["user"]!r} user does not has permission to execute the script')
verrors.check()
return data
def _perform(self, domain, validation_name, validation_content):
run_command_with_user_context(
f'{self.script} set {domain} {validation_name} {validation_content}', self.user,
output=False, timeout=self.timeout
)
def _cleanup(self, domain, validation_name, validation_content):
run_command_with_user_context(
f'{self.script} unset {domain} {validation_name} {validation_content}', self.user,
output=False, timeout=self.timeout
)
| 3,061 | Python | .py | 65 | 39.523077 | 107 | 0.678416 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,971 | event_source.py | truenas_middleware/src/middlewared/middlewared/plugins/smart_/event_source.py | import asyncio
import time
from middlewared.event import EventSource
from middlewared.service import private, Service
from middlewared.service_exception import MatchNotFound
class SMARTTestService(Service):
class Config:
namespace = 'smart.test'
cli_namespace = 'task.smart_test'
tests = {}
@private
async def set_test_data(self, disk, data):
self.tests[disk] = data
@private
async def get_test_data(self, disk):
return self.tests.get(disk)
@private
async def pop_test_data(self, disk):
return self.tests.pop(disk, None)
class SMARTTestEventSource(EventSource):
"""
Reports current S.M.A.R.T. test progress for the specified disk.
"""
async def run(self):
disk = self.arg
while not self._cancel.is_set():
data = await self.middleware.call('smart.test.get_test_data', disk)
try:
current_test = (await self.middleware.call(
'smart.test.results',
[['disk', '=', disk]],
{'get': True}
))['current_test']
except MatchNotFound:
current_test = None
if current_test is None:
await self.middleware.call('smart.test.pop_test_data', disk)
self.send_event('ADDED', fields={'progress': None})
return
self.send_event('ADDED', fields={'progress': current_test['progress']})
if data:
# Check every percent
interval = (data['end_monotime'] - data['start_monotime']) // 100
if time.monotonic() < data['end_monotime']:
# but not more often than every ten seconds
interval = max(interval, 10)
else:
# the test is taking longer than expected, do not poll more often than every minute
interval = max(interval, 60)
else:
# Test was started at an unknown time
interval = 60
await asyncio.sleep(interval)
def setup(middleware):
middleware.register_event_source('smart.test.progress', SMARTTestEventSource)
| 2,237 | Python | .py | 55 | 29.563636 | 103 | 0.583719 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,972 | schedule.py | truenas_middleware/src/middlewared/middlewared/plugins/smart_/schedule.py | # -*- coding=utf-8 -*-
from collections import namedtuple
import re
__all__ = ["smartd_schedule_piece", "smartd_schedule_piece_values"]
ALL_VALUES = object()
RE_RANGE_WITH_DIVISOR = re.compile(r"((?P<min>[0-9]+)-(?P<max>[0-9]+)|\*)/(?P<divisor>[0-9]+)")
RE_RANGE = re.compile(r"((?P<min>[0-9]+)-(?P<max>[0-9]+)|\*)")
SchedulePiece = namedtuple("SchedulePiece", ["key", "min", "max", "enum", "map"])
SMARTD_SCHEDULE_PIECES = [
SchedulePiece("month", 1, 12, dict(zip([
"jan", "feb", "mar", "apr", "may", "jun", "jul", "aug", "sep", "oct", "nov", "dec"
], range(1, 13))), None),
SchedulePiece("dom", 1, 31, None, None),
SchedulePiece("dow", 1, 7, dict(zip([
"mon", "tue", "wed", "thu", "fri", "sat", "sun"
], range(1, 8))), {0: 7}),
SchedulePiece("hour", 0, 23, None, None),
]
def smartd_schedule_piece(value, min_, max_, enum=None, map_=None):
width = len(str(max_))
values = smartd_schedule_piece_values_template(value, min_, max_, enum, map_)
if values == ALL_VALUES:
return "." * width
else:
return "(" + "|".join([f"%0{width}d" % v for v in values]) + ")"
def smartd_schedule_piece_values_template(value, min_, max_, enum=None, map_=None):
enum = enum or {}
map_ = map_ or {}
if value == "*":
return ALL_VALUES
elif m := RE_RANGE_WITH_DIVISOR.match(value):
d = int(m.group("divisor"))
if m.group("min") is None:
if d == 1:
return ALL_VALUES
else:
min_ = int(m.group("min"))
max_ = int(m.group("max"))
values = [v for v in range(min_, max_ + 1) if v % d == 0]
elif m := RE_RANGE.match(value):
start = int(m.group("min"))
end = int(m.group("max"))
if end <= start:
values = [start]
else:
values = [i for i in range(start, end + 1)]
else:
values = list(filter(lambda v: v is not None,
map(lambda s: enum.get(s.lower(), int(s) if s.isdigit() else None),
value.split(","))))
values = [map_.get(v, v) for v in values]
if values == list(range(min_, max_ + 1)):
return ALL_VALUES
return values
def smartd_schedule_piece_values(value, min_, max_, enum=None, map_=None):
values = smartd_schedule_piece_values_template(value, min_, max_, enum, map_)
if values == ALL_VALUES:
return list(range(min_, max_ + 1))
else:
return values
| 2,509 | Python | .py | 60 | 34.35 | 96 | 0.543326 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,973 | ports.py | truenas_middleware/src/middlewared/middlewared/plugins/ports/ports.py | import ipaddress
import itertools
from collections import defaultdict
from middlewared.service import Service, ValidationErrors
from .utils import WILDCARD_IPS
SYSTEM_PORTS = [(wildcard, port) for wildcard in WILDCARD_IPS for port in [67, 123, 3702, 5353, 6000]]
def get_ip_version(ip: str) -> int:
return ipaddress.ip_interface(ip).version
class PortService(Service):
DELEGATES = {}
SYSTEM_USED_PORTS = [
{
'title': 'System',
'ports': SYSTEM_PORTS,
'port_details': [{'description': None, 'ports': SYSTEM_PORTS}],
'namespace': 'system',
},
]
class Config:
private = True
async def register_attachment_delegate(self, delegate):
if delegate.namespace in self.DELEGATES:
raise ValueError(f'{delegate.namespace!r} delegate is already registered with Port Service')
self.DELEGATES[delegate.namespace] = delegate
async def get_all_used_ports(self):
used_ports = await self.get_in_use()
return [
port_entry[1] for entry in used_ports for port_entry in entry['ports']
]
async def get_unused_ports(self, lower_port_limit=1025):
used_ports = set(await self.get_all_used_ports())
return [i for i in range(lower_port_limit, 65535) if i not in used_ports]
async def get_in_use(self):
ports = []
for delegate in self.DELEGATES.values():
used_ports = await delegate.get_ports()
if used_ports:
for entry in used_ports:
entry['ports'] = [list(i) for i in entry['ports']]
ports.append({
'namespace': delegate.namespace,
'title': delegate.title,
'ports': list(itertools.chain(*[entry['ports'] for entry in used_ports])),
'port_details': used_ports,
})
return ports + self.SYSTEM_USED_PORTS
async def validate_port(self, schema, port, bindip='0.0.0.0', whitelist_namespace=None, raise_error=False):
verrors = ValidationErrors()
bindip_version = get_ip_version(bindip)
wildcard_ip = '0.0.0.0' if bindip_version == 4 else '::'
port_mapping = await self.ports_mapping(whitelist_namespace)
port_attachment = port_mapping[port]
if not any(
get_ip_version(ip) == bindip_version for ip in port_attachment
) or (
bindip not in port_attachment and wildcard_ip not in port_attachment and bindip != wildcard_ip
):
return verrors
ip_errors = []
for index, port_detail in enumerate(port_attachment.items()):
ip, port_entry = port_detail
if get_ip_version(ip) != bindip_version:
continue
if bindip == wildcard_ip or ip == wildcard_ip or (bindip != wildcard_ip and ip == bindip):
try:
entry = next(
detail for detail in port_entry['port_details']
if [ip, port] in detail['ports'] or [bindip, port] in detail['ports']
)
description = entry['description']
except StopIteration:
description = None
ip_errors.append(
f'{index + 1}) "{ip}:{port}" used by {port_entry["title"]}'
f'{f" ({description})" if description else ""}'
)
err = '\n'.join(ip_errors)
verrors.add(
schema,
f'The port is being used by following services:\n{err}'
)
if raise_error:
verrors.check()
return verrors
async def ports_mapping(self, whitelist_namespace=None):
ports = defaultdict(dict)
for attachment in filter(lambda entry: entry['namespace'] != whitelist_namespace, await self.get_in_use()):
for bindip, port in attachment['ports']:
ports[port][bindip] = attachment
return ports
| 4,065 | Python | .py | 90 | 33.655556 | 115 | 0.58116 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,974 | __init__.py | truenas_middleware/src/middlewared/middlewared/plugins/rate_limit/__init__.py | from middlewared.service import periodic, Service
from middlewared.utils.rate_limit.cache import RateLimitCache
CLEAR_CACHE_INTERVAL = 600
class RateLimitService(Service):
class Config:
namespace = 'rate.limit'
private = True
cli_private = True
@periodic(interval=CLEAR_CACHE_INTERVAL, run_on_start=False)
async def cache_clear(self):
"""Clear the entirety of the rate limit global cache."""
# This is useful, mostly, for the edge-case scenario where
# we have bad actor(s) that spam the API endpoints that
# require no authentication and they are using random IP
# addresses for each request. In that scenario, we will
# store a maximum of amount of entries in the cache and
# then refuse to honor any more requests for all consumers.
# This is required for STIG purposes.
await RateLimitCache.cache_clear()
async def cache_get(self):
"""Return the global rate limit cache."""
return await RateLimitCache.cache_get()
async def cache_pop(self, method_name: str, ip: str) -> None:
"""Pop an entry from the global cache."""
return await RateLimitCache.cache_pop(method_name, ip)
| 1,232 | Python | .py | 25 | 42.08 | 67 | 0.694167 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,975 | 2fa.py | truenas_middleware/src/middlewared/middlewared/plugins/auth_/2fa.py | import base64
import contextlib
import pyotp
import middlewared.sqlalchemy as sa
from middlewared.schema import accepts, Bool, Dict, Int, Patch
from middlewared.service import CallError, ConfigService, periodic, private
from middlewared.utils.directoryservices.constants import DSStatus, DSType
from middlewared.validators import Range
class TwoFactoryUserAuthModel(sa.Model):
__tablename__ = 'account_twofactor_user_auth'
id = sa.Column(sa.Integer(), primary_key=True)
secret = sa.Column(sa.EncryptedText(), nullable=True, default=None)
user_id = sa.Column(sa.ForeignKey('account_bsdusers.id', ondelete='CASCADE'), index=True, nullable=True)
user_sid = sa.Column(sa.String(length=255), nullable=True, index=True, unique=True)
otp_digits = sa.Column(sa.Integer(), default=6)
interval = sa.Column(sa.Integer(), default=30)
class TwoFactorAuthModel(sa.Model):
__tablename__ = 'system_twofactorauthentication'
id = sa.Column(sa.Integer(), primary_key=True)
services = sa.Column(sa.JSON(), default={})
enabled = sa.Column(sa.Boolean(), default=False)
window = sa.Column(sa.Integer(), default=0)
class TwoFactorAuthService(ConfigService):
class Config:
datastore = 'system.twofactorauthentication'
datastore_extend = 'auth.twofactor.two_factor_extend'
namespace = 'auth.twofactor'
cli_namespace = 'auth.two_factor'
ENTRY = Dict(
'auth_twofactor_entry',
Bool('enabled', required=True),
Dict(
'services',
Bool('ssh', default=False),
required=True
),
Int('window', validators=[Range(min_=0)], required=True),
Int('id', required=True),
)
@private
async def two_factor_extend(self, data):
for srv in ['ssh']:
data['services'].setdefault(srv, False)
return data
@accepts(
Patch(
'auth_twofactor_entry', 'auth_twofactor_update',
('rm', {'name': 'id'}),
('attr', {'update': True}),
),
audit='Update two-factor authentication service configuration'
)
async def do_update(self, data):
"""
`window` extends the validity to `window` many counter ticks before and after the current one.
Update Two-Factor Authentication Service Configuration.
"""
old_config = await self.config()
config = old_config.copy()
config.update(data)
if config == old_config:
return config
await self.middleware.call(
'datastore.update',
self._config.datastore,
config['id'],
config
)
await self.middleware.call('service.reload', 'ssh')
return await self.config()
@private
async def get_user_config(self, user_id, local_user):
filters = [
['user_id', '=', user_id], ['user_sid', '=', None]
] if local_user else [['user_sid', '=', user_id], ['user_id', '=', None]]
if config := await self.middleware.call('datastore.query', 'account.twofactor_user_auth', filters):
return {
**config[0],
'exists': True,
}
else:
return {
'secret': None,
filters[0][0]: user_id,
'exists': False,
'interval': 30,
'otp_digits': 6,
}
@private
def generate_base32_secret(self):
return pyotp.random_base32()
@private
def get_users_config(self):
users = []
mapping = {
user['sid']: user for user in self.middleware.call_sync(
'user.query', [['local', '=', False], ['sid', '!=', None]]
)
}
for config in self.middleware.call_sync(
'datastore.query', 'account.twofactor_user_auth', [['secret', '!=', None]]
):
username = None
ad_user = False
if config['user']:
username = config['user']['bsdusr_username']
elif user := mapping.get(config['user_sid']):
username = user['username']
ad_user = True
if username:
users.append({
'username': username,
'secret_hex': base64.b16encode(base64.b32decode(config['secret'])).decode(),
'row_id': config['id'],
'ad_user': ad_user,
'otp_digits': config['otp_digits'],
'interval': config['interval']
})
return users
@private
async def get_ad_users(self):
return {
entry['user_sid']: entry for entry in await self.middleware.call(
'datastore.query', 'account.twofactor_user_auth', [['user_sid', '!=', None]]
)
}
@periodic(interval=86400, run_on_start=False)
@private
async def remove_expired_secrets(self):
ds = await self.middleware.call('directoryservices.status')
if ds['type'] != DSType.AD.value or ds['status'] != DSStatus.HEALTHY.name:
return
ad_users = await self.get_ad_users()
ad_users_sid_mapping = {user['sid']: user for user in ad_users}
with contextlib.suppress(CallError):
for unmapped_user_sid in (await self.middleware.call('idmap.convert_sids', list(ad_users)))['unmapped']:
await self.middleware.call(
'datastore.delete', 'account.twofactor_user_auth', ad_users_sid_mapping[unmapped_user_sid]['id']
)
| 5,639 | Python | .py | 139 | 30.517986 | 116 | 0.580073 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,976 | authenticate.py | truenas_middleware/src/middlewared/middlewared/plugins/auth_/authenticate.py | import os
import pam
from middlewared.auth import AuthenticationContext
from middlewared.plugins.account import unixhash_is_valid
from middlewared.plugins.account_.constants import (
ADMIN_UID, MIDDLEWARE_PAM_SERVICE, MIDDLEWARE_PAM_API_KEY_SERVICE
)
from middlewared.service import Service, pass_app, private
from middlewared.service_exception import CallError
from middlewared.utils.crypto import check_unixhash
PAM_SERVICES = {MIDDLEWARE_PAM_SERVICE, MIDDLEWARE_PAM_API_KEY_SERVICE}
class AuthService(Service):
class Config:
cli_namespace = 'auth'
@private
@pass_app()
async def authenticate_plain(self, app, username, password, is_api_key=False):
pam_svc = MIDDLEWARE_PAM_API_KEY_SERVICE if is_api_key else MIDDLEWARE_PAM_SERVICE
if user_info := (await self.middleware.call(
'datastore.query', 'account.bsdusers',
[('username', '=', username)],
{'prefix': 'bsdusr_', 'select': ['id', 'unixhash', 'uid']},
)):
user_info = user_info[0] | {'local': True}
unixhash = user_info.pop('unixhash')
else:
user_info = {'id': None, 'uid': None, 'local': False}
unixhash = None
pam_resp = {'code': pam.PAM_AUTH_ERR, 'reason': 'Authentication failure'}
user_token = None
# The following provides way for root user to avoid getting locked out
# of webui via due to PAM enforcing password policies on the root
# account. Specifically, some legacy users have configured the root
# account so its password has password_disabled = true. We have to
# maintain the old middleware authentication code (bypassing PAM) to
# prevent this.
#
# In all failure cases libpam_authenticate is called so that timing
# is consistent with pam_fail_delay
if not is_api_key and username == 'root' and await self.middleware.call('privilege.always_has_root_password_enabled'):
if not unixhash_is_valid(unixhash):
await self.middleware.call('auth.libpam_authenticate', username, password)
elif await self.middleware.run_in_thread(check_unixhash, password, unixhash):
pam_resp = {'code': pam.PAM_SUCCESS, 'reason': ''}
else:
await self.middleware.call('auth.libpam_authenticate', username, password, app=app)
else:
pam_resp = await self.middleware.call('auth.libpam_authenticate', username, password, pam_svc, app=app)
if pam_resp['code'] == pam.PAM_SUCCESS:
user_token = await self.authenticate_user({'username': username}, user_info, is_api_key)
if user_token is None:
# Some error occurred when trying to generate our user token
pam_resp['code'] = pam.PAM_AUTH_ERR
pam_resp['reason'] = 'Failed to generate user token'
return {'pam_response': pam_resp, 'user_data': user_token}
@private
@pass_app()
def libpam_authenticate(self, app, username, password, pam_service=MIDDLEWARE_PAM_SERVICE):
"""
Following PAM codes are returned:
PAM_SUCCESS = 0
PAM_SYSTEM_ERR = 4 // pam_tdb.so response if used in unexpected pam service file
PAM_AUTH_ERR = 7 // Bad username or password
PAM_AUTHINFO_UNAVAIL = 9 // API key - pam_tdb file must be regenerated
PAM_USER_UNKNOWN = 10 // API key - user has no keys defined
PAM_NEW_AUTHTOK_REQD = 12 // User must change password
Potentially other may be returned as well depending on the particulars
of the PAM modules.
"""
if app and app.authentication_context:
auth_ctx = app.authentication_context
else:
# If this is coming through REST API then we may not have app, but
# this is not an issue since we will not implement PAM converstations
# over REST.
auth_ctx = AuthenticationContext()
if pam_service not in PAM_SERVICES:
self.logger.error('%s: invalid pam service file used for username: %s',
pam_service, username)
raise CallError(f'{pam_service}: invalid pam service file')
if not os.path.exists(pam_service):
self.logger.error('PAM service file is missing. Attempting to regenerate')
self.middleware.call_sync('etc.generate', 'pam_middleware')
if not os.path.exists(pam_service):
self.logger.error(
'%s: Unable to generate PAM service file for middleware. Denying '
'access to user.', username
)
return {'code': pam.PAM_ABORT, 'reason': 'Failed to generate PAM service file'}
with auth_ctx.pam_lock:
if not auth_ctx.pam_hdl:
auth_ctx.pam_hdl = pam.pam()
p = auth_ctx.pam_hdl
p.authenticate(username, password, service=os.path.basename(pam_service))
pam_resp = {'code': p.code, 'reason': p.reason}
return pam_resp
@private
async def authenticate_user(self, query, user_info, is_api_key):
try:
user = await self.middleware.call('user.get_user_obj', {
**query, 'get_groups': True,
'sid_info': not user_info['local'],
})
except KeyError:
return None
if user_info['uid'] is not None and user_info['uid'] != user['pw_uid']:
# For some reason there's a mismatch between the passwd file
# and what is stored in the TrueNAS configuration.
self.logger.error(
'%s: rejecting access for local user due to uid [%d] not '
'matching expected value [%d]',
user['pw_name'], user['pw_uid'], user_info['uid']
)
return None
match user['source']:
case 'LOCAL':
# Local user
twofactor_id = user_info['id']
groups_key = 'local_groups'
account_flags = ['LOCAL']
case 'ACTIVEDIRECTORY':
# Active directory user
twofactor_id = user['sid']
groups_key = 'ds_groups'
account_flags = ['DIRECTORY_SERVICE', 'ACTIVE_DIRECTORY']
case 'LDAP':
# This includes both OpenLDAP and IPA domains
# Since IPA domains may have cross-realm trusts with separate
# idmap configuration we will preferentially use the SID if it is
# available (since it should be static and universally unique)
twofactor_id = user['sid'] or user_info['id']
groups_key = 'ds_groups'
account_flags = ['DIRECTORY_SERVICE', 'LDAP']
case _:
self.logger.error('[%s]: unknown user source. Rejecting access.', user['source'])
return None
if user['local'] != user_info['local']:
# There is a disagreement between our expectation of user account source
# based on our database and what NSS _actually_ returned.
self.logger.error(
'%d: Rejecting access by user id due to potential collision between '
'local and directory service user account. TrueNAS configuration '
'expected a %s user account but received an account provided by %s.',
user['pw_uid'], 'local' if user_info['local'] else 'non-local', user['source']
)
return None
# Two-factor authentication token is keyed by SID for activedirectory
# users.
twofactor_enabled = bool((await self.middleware.call(
'auth.twofactor.get_user_config',
twofactor_id, user_info['local']
))['secret'])
groups = set(user['grouplist'])
if twofactor_enabled:
account_flags.append('2FA')
if is_api_key:
account_flags.append('API_KEY')
if user['pw_uid'] in (0, ADMIN_UID):
if not user['local']:
# Although this should be covered in above check for mismatch in
# value of `local`, perform an extra explicit check for the case
# of root / root-equivalent accounts.
self.logger.error(
'Rejecting admin account access due to collision with acccount provided '
'by a directory service.'
)
return None
account_flags.append('SYS_ADMIN')
privileges = await self.middleware.call('privilege.privileges_for_groups', groups_key, groups)
if not privileges:
return None
return {
'username': user['pw_name'],
'account_attributes': account_flags,
'privilege': await self.middleware.call('privilege.compose_privilege', privileges),
}
@private
async def authenticate_root(self):
return {
'username': 'root',
'account_attributes': ['LOCAL', 'SYS_ADMIN'],
'privilege': await self.middleware.call('privilege.full_privilege'),
}
| 9,268 | Python | .py | 183 | 38.743169 | 126 | 0.599978 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,977 | cert_info.py | truenas_middleware/src/middlewared/middlewared/plugins/crypto_/cert_info.py | from middlewared.schema import accepts, Dict, Ref, returns, Str
from middlewared.service import private, Service
from .utils import EC_CURVES, EKU_OIDS
class CertificateService(Service):
class Config:
cli_namespace = 'system.certificate'
@private
async def get_domain_names(self, cert_id):
data = await self.middleware.call('certificate.get_instance', int(cert_id))
names = [data['common']] if data['common'] else []
names.extend(data['san'])
return names
@accepts()
@returns(Ref('country_choices'))
async def country_choices(self):
"""
Returns country choices for creating a certificate/csr.
"""
return await self.middleware.call('system.general.country_choices')
@accepts()
@returns(Dict('acme_server_choices', additional_attrs=True))
async def acme_server_choices(self):
"""
Dictionary of popular ACME Servers with their directory URI endpoints which we display automatically
in UI
"""
return {
'https://acme-staging-v02.api.letsencrypt.org/directory': 'Let\'s Encrypt Staging Directory',
'https://acme-v02.api.letsencrypt.org/directory': 'Let\'s Encrypt Production Directory'
}
@accepts()
@returns(Dict(
'ec_curve_choices',
*[Str(k, enum=[k]) for k in EC_CURVES]
))
async def ec_curve_choices(self):
"""
Dictionary of supported EC curves.
"""
return {k: k for k in EC_CURVES}
@accepts()
@returns(Dict(
'extended_key_usage_choices',
*[Str(k, enum=[k]) for k in EKU_OIDS]
))
async def extended_key_usage_choices(self):
"""
Dictionary of choices for `ExtendedKeyUsage` extension which can be passed over to `usages` attribute.
"""
return {k: k for k in EKU_OIDS}
| 1,881 | Python | .py | 50 | 30.28 | 110 | 0.639407 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,978 | dependencies.py | truenas_middleware/src/middlewared/middlewared/plugins/crypto_/dependencies.py | from middlewared.service import CallError, private, Service
class CertificateService(Service):
@private
async def check_cert_deps(self, cert_id):
if deps := await self.middleware.call('certificate.get_attachments', cert_id):
deps_str = ''
for i, svc in enumerate(deps):
deps_str += f'{i+1}) {svc}\n'
raise CallError(f'Certificate is being used by following service(s):\n{deps_str}')
| 454 | Python | .py | 9 | 41.555556 | 94 | 0.647059 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,979 | ca_profiles.py | truenas_middleware/src/middlewared/middlewared/plugins/crypto_/ca_profiles.py | from middlewared.schema import accepts, Dict, returns
from middlewared.service import Service
from .utils import DEFAULT_LIFETIME_DAYS
class CertificateAuthorityService(Service):
class Config:
cli_namespace = 'system.certificate.authority'
PROFILES = {
'CA': {
'key_length': 2048,
'key_type': 'RSA',
'lifetime': DEFAULT_LIFETIME_DAYS,
'digest_algorithm': 'SHA256',
'cert_extensions': {
'KeyUsage': {
'enabled': True,
'key_cert_sign': True,
'crl_sign': True,
'extension_critical': True
},
'BasicConstraints': {
'enabled': True,
'ca': True,
'extension_critical': True
},
'ExtendedKeyUsage': {
'enabled': True,
'extension_critical': False,
'usages': ['SERVER_AUTH']
}
}
}
}
@accepts(roles=['CERTIFICATE_AUTHORITY_READ'])
@returns(Dict(
'certificate_authority_profiles',
*[Dict(profile, additional_attrs=True) for profile in PROFILES]
))
async def profiles(self):
"""
Returns a dictionary of predefined options for specific use cases i.e OpenVPN certificate authority
configurations which can be used for creating certificate authorities.
"""
return self.PROFILES
| 1,542 | Python | .py | 43 | 23.651163 | 107 | 0.529806 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,980 | csr.py | truenas_middleware/src/middlewared/middlewared/plugins/crypto_/csr.py | import typing
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from .extensions_utils import add_extensions
from .generate_utils import generate_builder, normalize_san
from .key_utils import export_private_key_object, generate_private_key, retrieve_signing_algorithm
from .utils import CERT_BACKEND_MAPPINGS, EC_CURVE_DEFAULT
def generate_certificate_signing_request(data: dict) -> typing.Tuple[str, str]:
key = generate_private_key({
'type': data.get('key_type') or 'RSA',
'curve': data.get('ec_curve') or EC_CURVE_DEFAULT,
'key_length': data.get('key_length') or 2048
})
csr = generate_builder({
'crypto_subject_name': {
k: data.get(v) for k, v in CERT_BACKEND_MAPPINGS.items()
},
'san': normalize_san(data.get('san') or []),
'serial': data.get('serial'),
'lifetime': data.get('lifetime'),
'csr': True
})
csr = add_extensions(csr, data.get('cert_extensions', {}), key, None)
csr = csr.sign(key, retrieve_signing_algorithm(data, key), default_backend())
return csr.public_bytes(serialization.Encoding.PEM).decode(), export_private_key_object(key)
| 1,232 | Python | .py | 25 | 43.52 | 98 | 0.694167 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,981 | revoke_ca.py | truenas_middleware/src/middlewared/middlewared/plugins/crypto_/revoke_ca.py | from middlewared.service import periodic, private, Service
from middlewared.utils.time_utils import utc_now
from .query_utils import get_ca_chain
class CertificateAuthorityService(Service):
class Config:
cli_namespace = 'system.certificate.authority'
@periodic(86400, run_on_start=True)
@private
async def crl_generation(self):
await self.middleware.call('service.start', 'ssl')
@private
async def revoke_ca_chain(self, ca_id):
chain = await self.get_ca_chain(ca_id)
for cert in chain:
datastore = f'system.certificate{"authority" if cert["cert_type"] == "CA" else ""}'
await self.middleware.call(
'datastore.update',
datastore,
cert['id'], {
'revoked_date': utc_now()
},
{'prefix': 'cert_'}
)
@private
async def get_ca_chain(self, ca_id):
certs = await self.middleware.call('datastore.query', 'system.certificate', [], {'prefix': 'cert_'})
cas = await self.middleware.call('datastore.query', 'system.certificateauthority', [], {'prefix': 'cert_'})
return get_ca_chain(ca_id, certs, cas)
| 1,219 | Python | .py | 28 | 34.285714 | 115 | 0.613176 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,982 | default_cert.py | truenas_middleware/src/middlewared/middlewared/plugins/crypto_/default_cert.py | from middlewared.service import private, Service
from .utils import CERT_TYPE_EXISTING, DEFAULT_CERT_NAME
class CertificateService(Service):
@private
async def setup_self_signed_cert_for_ui(self, cert_name=DEFAULT_CERT_NAME):
cert_id = None
index = 1
while not cert_id:
cert = await self.middleware.call('certificate.query', [['name', '=', cert_name]])
if cert:
cert = cert[0]
if await self.middleware.call('certificate.cert_services_validation', cert['id'], 'certificate', False):
cert_name = f'{cert_name}_{index}'
index += 1
else:
cert_id = cert['id']
self.logger.debug('Using %r certificate for System UI', cert_name)
else:
cert_id = await self.setup_self_signed_cert_for_ui_impl(cert_name)
self.logger.debug('Default certificate for System created')
await self.middleware.call(
'datastore.update',
'system.settings',
(await self.middleware.call('system.general.config'))['id'],
{'stg_guicertificate': cert_id}
)
await self.middleware.call('service.start', 'ssl')
@private
async def setup_self_signed_cert_for_ui_impl(self, cert_name):
cert, key = await self.middleware.call('cryptokey.generate_self_signed_certificate')
cert_dict = {
'certificate': cert,
'privatekey': key,
'name': cert_name,
'type': CERT_TYPE_EXISTING,
}
# We use datastore.insert to directly insert in db as this is a self-signed cert
# and we don't allow that via regular api
return await self.middleware.call(
'datastore.insert',
'system.certificate',
cert_dict,
{'prefix': 'cert_'}
)
| 1,930 | Python | .py | 44 | 32.204545 | 120 | 0.577517 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,983 | load_utils.py | truenas_middleware/src/middlewared/middlewared/plugins/crypto_/load_utils.py | import datetime
import dateutil
import dateutil.parser
import logging
import re
from contextlib import suppress
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import dsa, ec, ed25519, ed448, rsa
from OpenSSL import crypto
from typing import Optional, Union
from .utils import RE_CERTIFICATE
logger = logging.getLogger(__name__)
def parse_cert_date_string(date_value: str) -> str:
t1 = dateutil.parser.parse(date_value)
t2 = t1.astimezone(dateutil.tz.tzlocal())
return t2.ctime()
def load_certificate(certificate: str, get_issuer: bool = False) -> dict:
try:
# digest_algorithm, lifetime, country, state, city, organization, organizational_unit,
# email, common, san, serial, chain, fingerprint
cert = crypto.load_certificate(crypto.FILETYPE_PEM, certificate)
from_date = parse_cert_date_string(cert.get_notBefore())
until_date = parse_cert_date_string(cert.get_notAfter())
expired = datetime.datetime.now() > datetime.datetime.strptime(
parse_cert_date_string(cert.get_notAfter()), '%a %b %d %H:%M:%S %Y'
)
except (crypto.Error, OverflowError):
# Overflow error is raised when the certificate has a lifetime which will never expire
# and we don't support such certificates
return {}
else:
cert_info = get_x509_subject(cert)
if get_issuer:
cert_info['issuer_dn'] = parse_name_components(cert.get_issuer()) if cert.get_issuer() else None
valid_algos = ('SHA1', 'SHA224', 'SHA256', 'SHA384', 'SHA512', 'ED25519')
signature_algorithm = cert.get_signature_algorithm().decode()
# Certs signed with RSA keys will have something like
# sha256WithRSAEncryption
# Certs signed with EC keys will have something like
# ecdsa-with-SHA256
m = re.match('^(.+)[Ww]ith', signature_algorithm)
if m:
cert_info['digest_algorithm'] = m.group(1).upper()
if cert_info.get('digest_algorithm') not in valid_algos:
cert_info['digest_algorithm'] = (signature_algorithm or '').split('-')[-1].strip()
if cert_info['digest_algorithm'] not in valid_algos:
# Let's log this please
logger.debug(f'Failed to parse signature algorithm {signature_algorithm} for {certificate}')
cert_info.update({
'lifetime': (
dateutil.parser.parse(cert.get_notAfter()) - dateutil.parser.parse(cert.get_notBefore())
).days,
'from': from_date,
'until': until_date,
'serial': cert.get_serial_number(),
'chain': len(RE_CERTIFICATE.findall(certificate)) > 1,
'fingerprint': cert.digest('sha1').decode(),
'expired': expired,
})
return cert_info
def get_x509_subject(obj: Union[crypto.X509, crypto.X509Req]) -> dict:
cert_info = {
'country': obj.get_subject().C,
'state': obj.get_subject().ST,
'city': obj.get_subject().L,
'organization': obj.get_subject().O,
'organizational_unit': obj.get_subject().OU,
'common': obj.get_subject().CN,
'san': [],
'email': obj.get_subject().emailAddress,
'DN': '',
'subject_name_hash': obj.subject_name_hash() if not isinstance(obj, crypto.X509Req) else None,
'extensions': {},
}
for ext in filter(
lambda e: e.get_short_name().decode() != 'UNDEF',
map(
lambda i: obj.get_extension(i),
range(obj.get_extension_count())
) if isinstance(obj, crypto.X509) else obj.get_extensions()
):
if 'subjectAltName' == ext.get_short_name().decode():
cert_info['san'] = [s.strip() for s in ext.__str__().split(',') if s]
try:
ext_name = re.sub(r"^(\S)", lambda m: m.group(1).upper(), ext.get_short_name().decode())
cert_info['extensions'][ext_name] = 'Unable to parse extension'
cert_info['extensions'][ext_name] = ext.__str__()
except crypto.Error as e:
# some certificates can have extensions with binary data which we can't parse without
# explicit mapping for each extension. The current case covers the most of extensions nicely
# and if it's required to map certain extensions which can't be handled by above we can do
# so as users request.
logger.error('Unable to parse extension: %s', e)
cert_info['DN'] = parse_name_components(obj.get_subject())
if cert_info['san']:
# We should always trust the extension instead of the subject for SAN
cert_info['DN'] += f'/subjectAltName={", ".join(cert_info["san"])}'
return cert_info
def parse_name_components(obj: crypto.X509Name) -> str:
dn = []
for k in filter(
lambda k: k != 'subjectAltName' and hasattr(obj, k), map(lambda v: v[0].decode(), obj.get_components())
):
dn.append(f'{k}={getattr(obj, k)}')
return f'/{"/".join(dn)}'
def load_certificate_request(csr: str) -> dict:
try:
csr_obj = crypto.load_certificate_request(crypto.FILETYPE_PEM, csr)
except crypto.Error:
return {}
else:
return get_x509_subject(csr_obj)
def load_private_key(key_string: str, passphrase: Optional[str] = None) -> Union[
ed25519.Ed25519PrivateKey,
ed448.Ed448PrivateKey,
rsa.RSAPrivateKey,
dsa.DSAPrivateKey,
ec.EllipticCurvePrivateKey,
]:
with suppress(ValueError, TypeError, AttributeError):
return serialization.load_pem_private_key(
key_string.encode(),
password=passphrase.encode() if passphrase else None,
backend=default_backend()
)
def get_serial_from_certificate_safe(certificate: Union[str, None]) -> Optional[int]:
try:
cert = crypto.load_certificate(crypto.FILETYPE_PEM, certificate)
except crypto.Error:
return
else:
return cert.get_serial_number()
| 6,099 | Python | .py | 133 | 37.879699 | 111 | 0.640451 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,984 | cert_entry.py | truenas_middleware/src/middlewared/middlewared/plugins/crypto_/cert_entry.py | import copy
from middlewared.schema import Bool, Datetime, Dict, Int, List, OROperator, Str
CERT_ENTRY = Dict(
'certificate_entry',
Int('id'),
Int('type'),
Str('name'),
Str('certificate', null=True, max_length=None),
Str('privatekey', null=True, max_length=None, private=True),
Str('CSR', null=True, max_length=None),
Str('acme_uri', null=True),
Dict('domains_authenticators', additional_attrs=True, null=True),
Int('renew_days'),
Datetime('revoked_date', null=True),
Dict('signedby', additional_attrs=True, null=True),
Str('root_path'),
Dict('acme', additional_attrs=True, null=True),
Str('certificate_path', null=True),
Str('privatekey_path', null=True),
Str('csr_path', null=True),
Str('cert_type'),
Bool('revoked'),
Bool('expired', null=True),
OROperator(
Str('issuer', null=True, private=True),
Dict('issuer', additional_attrs=True, null=True, private=True),
name='issuer'
),
List('chain_list', items=[Str('certificate', max_length=None)]),
Str('country', null=True),
Str('state', null=True),
Str('city', null=True),
Str('organization', null=True),
Str('organizational_unit', null=True),
List('san', items=[Str('san_entry')], null=True),
Str('email', null=True),
Str('DN', null=True),
Str('subject_name_hash', null=True),
Str('digest_algorithm', null=True),
Str('from', null=True),
Str('common', null=True, max_length=None),
Str('until', null=True),
Str('fingerprint', null=True),
Str('key_type', null=True),
Str('internal', null=True),
Int('lifetime', null=True),
Int('serial', null=True),
Int('key_length', null=True),
Bool('add_to_trusted_store', default=False),
Bool('chain', null=True),
Bool('CA_type_existing'),
Bool('CA_type_internal'),
Bool('CA_type_intermediate'),
Bool('cert_type_existing'),
Bool('cert_type_internal'),
Bool('cert_type_CSR'),
Bool('parsed'),
Bool('can_be_revoked'),
Dict('extensions', additional_attrs=True),
List('revoked_certs'),
Str('crl_path'),
Int('signed_certificates'),
)
def get_ca_result_entry():
entry = copy.deepcopy(CERT_ENTRY)
entry.name = 'certificateauthority_entry'
return entry
| 2,292 | Python | .py | 67 | 29.253731 | 79 | 0.64009 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,985 | certificates.py | truenas_middleware/src/middlewared/middlewared/plugins/crypto_/certificates.py | import datetime
import josepy as jose
from acme import errors, messages
from OpenSSL import crypto
import middlewared.sqlalchemy as sa
from middlewared.schema import accepts, Bool, Dict, Int, List, Patch, Ref, Str
from middlewared.service import CallError, CRUDService, job, private, skip_arg, ValidationErrors
from middlewared.utils.time_utils import utc_now
from middlewared.validators import Email, Range
from .common_validation import _validate_common_attributes, validate_cert_name
from .cert_entry import CERT_ENTRY
from .csr import generate_certificate_signing_request
from .key_utils import export_private_key
from .load_utils import load_certificate
from .query_utils import normalize_cert_attrs
from .utils import (
CERT_TYPE_EXISTING, CERT_TYPE_INTERNAL, CERT_TYPE_CSR, EC_CURVES, EC_CURVE_DEFAULT,
get_cert_info_from_data, _set_required,
)
class CertificateModel(sa.Model):
__tablename__ = 'system_certificate'
id = sa.Column(sa.Integer(), primary_key=True)
cert_type = sa.Column(sa.Integer())
cert_name = sa.Column(sa.String(120), unique=True)
cert_certificate = sa.Column(sa.Text(), nullable=True)
cert_privatekey = sa.Column(sa.EncryptedText(), nullable=True)
cert_CSR = sa.Column(sa.Text(), nullable=True)
cert_signedby_id = sa.Column(sa.ForeignKey('system_certificateauthority.id'), index=True, nullable=True)
cert_acme_uri = sa.Column(sa.String(200), nullable=True)
cert_domains_authenticators = sa.Column(sa.JSON(encrypted=True), nullable=True)
cert_renew_days = sa.Column(sa.Integer(), nullable=True, default=10)
cert_acme_id = sa.Column(sa.ForeignKey('system_acmeregistration.id'), index=True, nullable=True)
cert_revoked_date = sa.Column(sa.DateTime(), nullable=True)
cert_add_to_trusted_store = sa.Column(sa.Boolean(), default=False, nullable=False)
class CertificateService(CRUDService):
class Config:
datastore = 'system.certificate'
datastore_extend = 'certificate.cert_extend'
datastore_extend_context = 'certificate.cert_extend_context'
datastore_prefix = 'cert_'
cli_namespace = 'system.certificate'
role_prefix = 'CERTIFICATE'
ENTRY = CERT_ENTRY
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.map_functions = {
'CERTIFICATE_CREATE_INTERNAL': 'create_internal',
'CERTIFICATE_CREATE_IMPORTED': 'create_imported_certificate',
'CERTIFICATE_CREATE_IMPORTED_CSR': 'create_imported_csr',
'CERTIFICATE_CREATE_CSR': 'create_csr',
'CERTIFICATE_CREATE_ACME': 'create_acme_certificate',
}
@private
def cert_extend_context(self, rows, extra):
context = {
'cas': {c['id']: c for c in self.middleware.call_sync('certificateauthority.query')},
}
return context
@private
def cert_extend(self, cert, context):
if cert['signedby']:
cert['signedby'] = context['cas'][cert['signedby']['id']]
normalize_cert_attrs(cert)
return cert
@private
async def cert_services_validation(self, id_, schema_name, raise_verrors=True):
# General method to check certificate health wrt usage in services
cert = await self.middleware.call('certificate.query', [['id', '=', id_]])
verrors = ValidationErrors()
if cert:
cert = cert[0]
if cert['cert_type'] != 'CERTIFICATE' or cert['cert_type_CSR']:
verrors.add(
schema_name,
'Selected certificate id is not a valid certificate'
)
else:
await self.cert_checks(cert, verrors, schema_name)
else:
verrors.add(
schema_name,
f'No Certificate found with the provided id: {id_}'
)
if raise_verrors:
verrors.check()
else:
return verrors
@private
async def cert_checks(self, cert, verrors, schema_name):
valid_key_size = {'EC': 28, 'RSA': 2048}
if not cert.get('fingerprint'):
verrors.add(
schema_name,
f'{cert["name"]} certificate is malformed'
)
if not cert['privatekey']:
verrors.add(
schema_name,
'Selected certificate does not have a private key'
)
elif not cert['key_length']:
verrors.add(
schema_name,
'Failed to parse certificate\'s private key'
)
elif cert['key_length'] < valid_key_size[cert['key_type']]:
verrors.add(
schema_name,
f'{cert["name"]}\'s private key size is less then {valid_key_size[cert["key_type"]]} bits'
)
if cert['until'] and datetime.datetime.strptime(
cert['until'], '%a %b %d %H:%M:%S %Y'
) < datetime.datetime.now():
verrors.add(
schema_name,
f'{cert["name"]!r} has expired (it was valid until {cert["until"]!r})'
)
if cert['digest_algorithm'] in ['MD5', 'SHA1']:
verrors.add(
schema_name,
'Please use a certificate whose digest algorithm has at least 112 security bits'
)
if cert['revoked']:
verrors.add(
schema_name,
'This certificate is revoked'
)
@private
async def validate_common_attributes(self, data, schema_name):
verrors = ValidationErrors()
await _validate_common_attributes(self.middleware, data, verrors, schema_name)
return verrors
# CREATE METHODS FOR CREATING CERTIFICATES
# "do_create" IS CALLED FIRST AND THEN BASED ON THE TYPE OF THE CERTIFICATE WHICH IS TO BE CREATED THE
# APPROPRIATE METHOD IS CALLED
# FOLLOWING TYPES ARE SUPPORTED
# CREATE_TYPE ( STRING ) - METHOD CALLED
# CERTIFICATE_CREATE_INTERNAL - create_internal
# CERTIFICATE_CREATE_IMPORTED - create_imported_certificate
# CERTIFICATE_CREATE_IMPORTED_CSR - create_imported_csr
# CERTIFICATE_CREATE_CSR - create_csr
# CERTIFICATE_CREATE_ACME - create_acme_certificate
@accepts(
Dict(
'certificate_create',
Bool('tos'),
Dict('dns_mapping', additional_attrs=True),
Int('csr_id'),
Int('signedby'),
Int('key_length', enum=[2048, 4096]),
Int('renew_days', validators=[Range(min_=1, max_=30)]),
Int('type'),
Int('lifetime'),
Int('serial', validators=[Range(min_=1)]),
Str('acme_directory_uri'),
Str('certificate', max_length=None),
Str('city'),
Str('common', max_length=None, null=True),
Str('country'),
Str('CSR', max_length=None),
Str('ec_curve', enum=EC_CURVES, default=EC_CURVE_DEFAULT),
Str('email', validators=[Email()]),
Str('key_type', enum=['RSA', 'EC'], default='RSA'),
Str('name', required=True),
Str('organization'),
Str('organizational_unit'),
Str('passphrase'),
Str('privatekey', max_length=None),
Str('state'),
Str('create_type', enum=[
'CERTIFICATE_CREATE_INTERNAL', 'CERTIFICATE_CREATE_IMPORTED',
'CERTIFICATE_CREATE_CSR', 'CERTIFICATE_CREATE_IMPORTED_CSR',
'CERTIFICATE_CREATE_ACME'], required=True),
Str('digest_algorithm', enum=['SHA224', 'SHA256', 'SHA384', 'SHA512']),
List('san', items=[Str('san')]),
Ref('cert_extensions'),
Bool('add_to_trusted_store', default=False),
register=True
),
)
@job(lock='cert_create')
async def do_create(self, job, data):
"""
Create a new Certificate
Certificates are classified under following types and the necessary keywords to be passed
for `create_type` attribute to create the respective type of certificate
1) Internal Certificate - CERTIFICATE_CREATE_INTERNAL
2) Imported Certificate - CERTIFICATE_CREATE_IMPORTED
3) Certificate Signing Request - CERTIFICATE_CREATE_CSR
4) Imported Certificate Signing Request - CERTIFICATE_CREATE_IMPORTED_CSR
5) ACME Certificate - CERTIFICATE_CREATE_ACME
By default, created certs use RSA keys. If an Elliptic Curve Key is desired, it can be specified with the
`key_type` attribute. If the `ec_curve` attribute is not specified for the Elliptic Curve Key, then default to
using "SECP384R1" curve.
A type is selected by the Certificate Service based on `create_type`. The rest of the values in `data` are
validated accordingly and finally a certificate is made based on the selected type.
`cert_extensions` can be specified to set X509v3 extensions.
.. examples(websocket)::
Create an ACME based certificate
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "certificate.create",
"params": [{
"tos": true,
"csr_id": 1,
"acme_directory_uri": "https://acme-staging-v02.api.letsencrypt.org/directory",
"name": "acme_certificate",
"dns_mapping": {
"domain1.com": "1"
},
"create_type": "CERTIFICATE_CREATE_ACME"
}]
}
Create an Imported Certificate Signing Request
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "certificate.create",
"params": [{
"name": "csr",
"CSR": "CSR string",
"privatekey": "Private key string",
"create_type": "CERTIFICATE_CREATE_IMPORTED_CSR"
}]
}
Create an Internal Certificate
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "certificate.create",
"params": [{
"name": "internal_cert",
"key_length": 2048,
"lifetime": 3600,
"city": "Nashville",
"common": "domain1.com",
"country": "US",
"email": "dev@ixsystems.com",
"organization": "iXsystems",
"state": "Tennessee",
"digest_algorithm": "SHA256",
"signedby": 4,
"create_type": "CERTIFICATE_CREATE_INTERNAL"
}]
}
"""
if not data.get('dns_mapping'):
data.pop('dns_mapping') # Default dict added
create_type = data.pop('create_type')
if create_type in (
'CERTIFICATE_CREATE_IMPORTED_CSR', 'CERTIFICATE_CREATE_ACME', 'CERTIFICATE_CREATE_IMPORTED'
):
for key in ('key_length', 'key_type', 'ec_curve'):
data.pop(key, None)
add_to_trusted_store = data.pop('add_to_trusted_store', False)
verrors = await self.validate_common_attributes(data, 'certificate_create')
if add_to_trusted_store and create_type in ('CERTIFICATE_CREATE_IMPORTED_CSR', 'CERTIFICATE_CREATE_CSR'):
verrors.add('certificate_create.add_to_trusted_store', 'Cannot add CSR to trusted store')
if create_type == 'CERTIFICATE_CREATE_IMPORTED' and not load_certificate(data['certificate']):
verrors.add('certificate_create.certificate', 'Unable to parse certificate')
await validate_cert_name(
self.middleware, data['name'], self._config.datastore,
verrors, 'certificate_create.name'
)
verrors.check()
job.set_progress(10, 'Initial validation complete')
if create_type in (
'CERTIFICATE_CREATE_IMPORTED_CSR',
'CERTIFICATE_CREATE_ACME',
'CERTIFICATE_CREATE_IMPORTED',
):
# We add dictionaries/lists by default, so we need to explicitly remove them
data.pop('cert_extensions')
data.pop('san')
data = {
k: v for k, v in (
await self.middleware.call(f'certificate.{self.map_functions[create_type]}', job, data)
).items()
if k in [
'name', 'certificate', 'CSR', 'privatekey', 'type', 'signedby', 'acme', 'acme_uri',
'domains_authenticators', 'renew_days', 'add_to_trusted_store',
]
}
data['add_to_trusted_store'] = add_to_trusted_store
pk = await self.middleware.call(
'datastore.insert',
self._config.datastore,
data,
{'prefix': self._config.datastore_prefix}
)
await self.middleware.call('service.start', 'ssl')
job.set_progress(100, 'Certificate created successfully')
return await self.get_instance(pk)
@accepts(
Dict(
'acme_create',
Bool('tos', default=False),
Int('csr_id', required=True),
Int('renew_days', default=10, validators=[Range(min_=1)]),
Str('acme_directory_uri', required=True),
Str('name', required=True),
Dict('dns_mapping', additional_attrs=True, required=True)
)
)
@private
@skip_arg(count=1)
def create_acme_certificate(self, job, data):
csr_data = self.middleware.call_sync(
'certificate.get_instance', data['csr_id']
)
verrors = ValidationErrors()
email = self.middleware.call_sync('mail.local_administrator_email')
if not email:
verrors.add(
'name', ('Please configure an email address for any local administrator user which will be used with '
'the ACME server'),
)
verrors.check()
data['acme_directory_uri'] += '/' if data['acme_directory_uri'][-1] != '/' else ''
final_order = self.middleware.call_sync('acme.issue_certificate', job, 25, data, csr_data)
job.set_progress(95, 'Final order received from ACME server')
cert_dict = {
'acme': self.middleware.call_sync(
'acme.registration.query',
[['directory', '=', data['acme_directory_uri']]]
)[0]['id'],
'acme_uri': final_order.uri,
'certificate': final_order.fullchain_pem,
'CSR': csr_data['CSR'],
'privatekey': csr_data['privatekey'],
'name': data['name'],
'type': CERT_TYPE_EXISTING,
'domains_authenticators': data['dns_mapping'],
'renew_days': data['renew_days']
}
return cert_dict
@accepts(
Patch(
'certificate_create_internal', 'certificate_create_csr',
('rm', {'name': 'signedby'}),
('rm', {'name': 'lifetime'})
)
)
@private
@skip_arg(count=1)
def create_csr(self, job, data):
# no signedby, lifetime attributes required
verrors = ValidationErrors()
cert_info = get_cert_info_from_data(data)
cert_info['cert_extensions'] = data['cert_extensions']
if cert_info['cert_extensions']['AuthorityKeyIdentifier']['enabled']:
verrors.add('cert_extensions.AuthorityKeyIdentifier.enabled', 'This extension is not valid for CSR')
verrors.check()
data['type'] = CERT_TYPE_CSR
req, key = generate_certificate_signing_request(cert_info)
job.set_progress(80)
data['CSR'] = req
data['privatekey'] = key
job.set_progress(90, 'Finalizing changes')
return data
@accepts(
Dict(
'create_imported_csr',
Str('CSR', required=True, max_length=None, empty=False),
Str('name'),
Str('privatekey', required=True, max_length=None, empty=False),
Str('passphrase')
)
)
@private
@skip_arg(count=1)
def create_imported_csr(self, job, data):
# TODO: We should validate csr with private key ?
data['type'] = CERT_TYPE_CSR
job.set_progress(80)
if 'passphrase' in data:
data['privatekey'] = export_private_key(data['privatekey'], data['passphrase'])
job.set_progress(90, 'Finalizing changes')
return data
@accepts(
Dict(
'certificate_create_imported',
Int('csr_id'),
Str('certificate', required=True, max_length=None),
Str('name'),
Str('passphrase'),
Str('privatekey', max_length=None)
)
)
@private
@skip_arg(count=1)
def create_imported_certificate(self, job, data):
verrors = ValidationErrors()
csr_id = data.pop('csr_id', None)
if csr_id:
csr_obj = self.middleware.call_sync(
'certificate.query', [
['id', '=', csr_id],
['type', '=', CERT_TYPE_CSR]
],
{'get': True}
)
data['privatekey'] = csr_obj['privatekey']
data.pop('passphrase', None)
elif not data.get('privatekey'):
verrors.add(
'certificate_create.privatekey',
'Private key is required when importing a certificate'
)
verrors.check()
job.set_progress(50, 'Validation complete')
data['type'] = CERT_TYPE_EXISTING
if 'passphrase' in data:
data['privatekey'] = export_private_key(data['privatekey'], data['passphrase'])
return data
@accepts(
Patch(
'certificate_create', 'certificate_create_internal',
('edit', _set_required('lifetime')),
('edit', _set_required('country')),
('edit', _set_required('state')),
('edit', _set_required('city')),
('edit', _set_required('organization')),
('edit', _set_required('email')),
('edit', _set_required('san')),
('edit', _set_required('signedby')),
('rm', {'name': 'create_type'}),
register=True
)
)
@private
@skip_arg(count=1)
async def create_internal(self, job, data):
cert_info = get_cert_info_from_data(data)
data['type'] = CERT_TYPE_INTERNAL
signing_cert = await self.middleware.call(
'certificateauthority.query',
[('id', '=', data['signedby'])],
{'get': True}
)
cert_serial = await self.middleware.call(
'certificateauthority.get_serial_for_certificate',
data['signedby']
)
cert_info.update({
'ca_privatekey': signing_cert['privatekey'],
'ca_certificate': signing_cert['certificate'],
'serial': cert_serial,
'cert_extensions': data['cert_extensions']
})
cert, key = await self.middleware.call(
'cryptokey.generate_certificate',
cert_info
)
data['certificate'] = cert
data['privatekey'] = key
job.set_progress(90, 'Finalizing changes')
return data
@accepts(
Int('id', required=True),
Dict(
'certificate_update',
Bool('revoked'),
Int('renew_days', validators=[Range(min_=1, max_=30)]),
Bool('add_to_trusted_store'),
Str('name'),
),
)
@job(lock='cert_update')
async def do_update(self, job, id_, data):
"""
Update certificate of `id`
Only name and revoked attribute can be updated.
When `revoked` is enabled, the specified cert `id` is revoked and if it belongs to a CA chain which
exists on this system, its serial number is added to the CA's certificate revocation list.
.. examples(websocket)::
Update a certificate of `id`
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "certificate.update",
"params": [
1,
{
"name": "updated_name"
}
]
}
"""
old = await self.get_instance(id_)
# signedby is changed back to integer from a dict
old['signedby'] = old['signedby']['id'] if old.get('signedby') else None
if old.get('acme'):
old['acme'] = old['acme']['id']
new = old.copy()
new.update(data)
if any(new.get(k) != old.get(k) for k in ('name', 'revoked', 'renew_days', 'add_to_trusted_store')):
verrors = ValidationErrors()
if new['name'] != old['name']:
await validate_cert_name(
self.middleware, new['name'], self._config.datastore,
verrors, 'certificate_update.name'
)
if not new.get('acme') and data.get('renew_days'):
verrors.add(
'certificate_update.renew_days',
'Certificate renewal days is only supported for ACME certificates'
)
if new['revoked'] and new['cert_type_CSR']:
verrors.add(
'certificate_update.revoked',
'A CSR cannot be marked as revoked.'
)
elif new['revoked'] and not old['revoked'] and not new['can_be_revoked']:
verrors.add(
'certificate_update.revoked',
'Only certificate(s) can be revoked which have a CA present on the system'
)
elif old['revoked'] and not new['revoked']:
verrors.add(
'certificate_update.revoked',
'Certificate has already been revoked and this cannot be reversed'
)
if not verrors and new['revoked'] and new['add_to_trusted_store']:
verrors.add(
'certificate_update.add_to_trusted_store',
'Revoked certificates cannot be added to system\'s trusted store'
)
verrors.check()
to_update = {'renew_days': new['renew_days']} if data.get('renew_days') else {}
if old['revoked'] != new['revoked'] and new['revoked']:
to_update['revoked_date'] = utc_now()
await self.middleware.call(
'datastore.update',
self._config.datastore,
id_,
{'name': new['name'], 'add_to_trusted_store': new['add_to_trusted_store'], **to_update},
{'prefix': self._config.datastore_prefix}
)
await self.middleware.call('service.start', 'ssl')
job.set_progress(90, 'Finalizing changes')
return await self.get_instance(id_)
@private
async def delete_domains_authenticator(self, auth_id):
# Delete provided auth_id from all ACME based certs domains_authenticators
for cert in await self.query([['acme', '!=', None]]):
if auth_id in cert['domains_authenticators'].values():
await self.middleware.call(
'datastore.update',
self._config.datastore,
cert['id'],
{
'domains_authenticators': {
k: v for k, v in cert['domains_authenticators'].items()
if v != auth_id
}
},
{'prefix': self._config.datastore_prefix}
)
@accepts(
Int('id'),
Bool('force', default=False),
)
@job(lock='cert_delete')
def do_delete(self, job, id_, force):
"""
Delete certificate of `id`.
If the certificate is an ACME based certificate, certificate service will try to
revoke the certificate by updating it's status with the ACME server, if it fails an exception is raised
and the certificate is not deleted from the system. However, if `force` is set to True, certificate is deleted
from the system even if some error occurred while revoking the certificate with the ACME Server
.. examples(websocket)::
Delete certificate of `id`
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "certificate.delete",
"params": [
1,
true
]
}
"""
certificate = self.middleware.call_sync('certificate.get_instance', id_)
self.middleware.call_sync('certificate.check_cert_deps', id_)
if certificate.get('acme') and not certificate['expired']:
# We won't try revoking a certificate which has expired already
client, key = self.middleware.call_sync(
'acme.get_acme_client_and_key', certificate['acme']['directory'], True
)
try:
client.revoke(
jose.ComparableX509(crypto.load_certificate(crypto.FILETYPE_PEM, certificate['certificate'])), 0
)
except (errors.ClientError, messages.Error) as e:
if not force:
raise CallError(f'Failed to revoke certificate: {e}')
response = self.middleware.call_sync(
'datastore.delete',
self._config.datastore,
id_
)
self.middleware.call_sync('service.start', 'ssl')
self.middleware.call_sync('alert.alert_source_clear_run', 'CertificateChecks')
job.set_progress(100)
return response
| 26,539 | Python | .py | 612 | 31.362745 | 118 | 0.556684 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,986 | generate_utils.py | truenas_middleware/src/middlewared/middlewared/plugins/crypto_/generate_utils.py | import datetime
import ipaddress
import random
import typing
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.x509.oid import NameOID
from middlewared.utils.time_utils import utc_now
from middlewared.validators import IpAddress
from .extensions_utils import add_extensions
from .key_utils import retrieve_signing_algorithm
from .load_utils import load_certificate, load_certificate_request, load_private_key
from .utils import DEFAULT_LIFETIME_DAYS, RDN_MAPPINGS
def generate_builder(options: dict) -> typing.Union[x509.CertificateBuilder, x509.CertificateSigningRequestBuilder]:
# We expect backend_mapping keys for crypto_subject_name attr in options and for crypto_issuer_name as well
data = {}
for key in ('crypto_subject_name', 'crypto_issuer_name'):
data[key] = x509.Name([
x509.NameAttribute(getattr(NameOID, k.upper()), v)
for k, v in (options.get(key) or {}).items() if v
])
if not data['crypto_issuer_name']:
data['crypto_issuer_name'] = data['crypto_subject_name']
# Lifetime represents no of days
# Let's normalize lifetime value
not_valid_before = utc_now()
not_valid_after = utc_now() + datetime.timedelta(
days=options.get('lifetime') or DEFAULT_LIFETIME_DAYS
)
# Let's normalize `san`
san = x509.SubjectAlternativeName([
x509.IPAddress(ipaddress.ip_address(v)) if t == 'IP' else x509.DNSName(v)
for t, v in options.get('san') or []
])
builder = x509.CertificateSigningRequestBuilder if options.get('csr') else x509.CertificateBuilder
cert = builder(
subject_name=data['crypto_subject_name']
)
if not options.get('csr'):
cert = cert.issuer_name(
data['crypto_issuer_name']
).not_valid_before(
not_valid_before
).not_valid_after(
not_valid_after
).serial_number(options.get('serial') or random.randint(1000, pow(2, 30)))
if san:
cert = cert.add_extension(san, False)
return cert
def normalize_san(san_list: list) -> list:
# TODO: ADD MORE TYPES WRT RFC'S
normalized = []
ip_validator = IpAddress()
for count, san in enumerate(san_list or []):
# If we already have SAN normalized, let's use the normalized version and don't
# try to add a type ourselves
if ':' in san:
san_type, san = san.split(':', 1)
else:
try:
ip_validator(san)
except ValueError:
san_type = 'DNS'
else:
san_type = 'IP'
normalized.append([san_type, san])
return normalized
def sign_csr_with_ca(data):
csr_data = load_certificate_request(data['csr'])
ca_data = load_certificate(data['ca_certificate'])
ca_key = load_private_key(data['ca_privatekey'])
csr_key = load_private_key(data['csr_privatekey'])
new_cert = generate_builder({
'crypto_subject_name': {
RDN_MAPPINGS[k]: v
for k, v in (item.split('=') for item in csr_data['DN'].split('/') if item)
if k in RDN_MAPPINGS
},
'crypto_issuer_name': {
RDN_MAPPINGS[k]: v
for k, v in (item.split('=') for item in ca_data['DN'].split('/') if item)
if k in RDN_MAPPINGS
},
'serial': data['serial'],
'san': normalize_san(csr_data.get('san'))
})
new_cert = add_extensions(
new_cert, data.get('cert_extensions'), csr_key,
x509.load_pem_x509_certificate(data['ca_certificate'].encode(), default_backend())
)
new_cert = new_cert.sign(
ca_key, retrieve_signing_algorithm(data, ca_key), default_backend()
)
return new_cert.public_bytes(serialization.Encoding.PEM).decode()
| 3,898 | Python | .py | 95 | 33.8 | 116 | 0.648692 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,987 | sign_csr.py | truenas_middleware/src/middlewared/middlewared/plugins/crypto_/sign_csr.py | from middlewared.schema import accepts, Dict, Int, Ref, returns, Str
from middlewared.service import private, Service, ValidationErrors
from .utils import CERT_TYPE_INTERNAL
class CertificateAuthorityService(Service):
class Config:
cli_namespace = 'system.certificate.authority'
@accepts(
Dict(
'ca_sign_csr',
Int('ca_id', required=True),
Int('csr_cert_id', required=True),
Str('name', required=True),
Ref('cert_extensions'),
register=True
),
roles=['CERTIFICATE_AUTHORITY_WRITE'],
)
@returns(Ref('certificate_entry'))
async def ca_sign_csr(self, data):
"""
Sign CSR by Certificate Authority of `ca_id`
Sign CSR's and generate a certificate from it. `ca_id` provides which CA is to be used for signing
a CSR of `csr_cert_id` which exists in the system
`cert_extensions` can be specified if specific extensions are to be set in the newly signed certificate.
.. examples(websocket)::
Sign CSR of `csr_cert_id` by Certificate Authority of `ca_id`
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "certificateauthority.ca_sign_csr",
"params": [{
"ca_id": 1,
"csr_cert_id": 1,
"name": "signed_cert"
}]
}
"""
return await self.ca_sign_csr_impl(data)
@accepts(
Ref('ca_sign_csr'),
Str('schema_name', default='certificate_authority_update')
)
@private
async def ca_sign_csr_impl(self, data, schema_name):
verrors = ValidationErrors()
ca_data = await self.middleware.call('certificateauthority.query', [('id', '=', data['ca_id'])])
csr_cert_data = await self.middleware.call('certificate.query', [('id', '=', data['csr_cert_id'])])
if not ca_data:
verrors.add(
f'{schema_name}.ca_id',
f'No Certificate Authority found for id {data["ca_id"]}'
)
else:
ca_data = ca_data[0]
if not ca_data.get('privatekey'):
verrors.add(
f'{schema_name}.ca_id',
'Please use a CA which has a private key assigned'
)
if not csr_cert_data:
verrors.add(
f'{schema_name}.csr_cert_id',
f'No Certificate found for id {data["csr_cert_id"]}'
)
else:
csr_cert_data = csr_cert_data[0]
if not csr_cert_data.get('CSR'):
verrors.add(
f'{schema_name}.csr_cert_id',
'No CSR has been filed by this certificate'
)
else:
if not await self.middleware.call('cryptokey.load_certificate_request', csr_cert_data['CSR']):
verrors.add(
f'{schema_name}.csr_cert_id',
'CSR not valid'
)
if not csr_cert_data['privatekey']:
verrors.add(
f'{schema_name}.csr_cert_id',
'Private key not found for specified CSR.'
)
if await self.middleware.call('certificate.query', [['name', '=', data['name']]]):
verrors.add(f'{schema_name}.name', 'A certificate with this name already exists')
verrors.check()
serial = await self.middleware.call('certificateauthority.get_serial_for_certificate', ca_data['id'])
new_cert = await self.middleware.call(
'cryptokey.sign_csr_with_ca',
{
'ca_certificate': ca_data['certificate'],
'ca_privatekey': ca_data['privatekey'],
'csr': csr_cert_data['CSR'],
'csr_privatekey': csr_cert_data['privatekey'],
'serial': serial,
'digest_algorithm': ca_data['digest_algorithm'],
'cert_extensions': data['cert_extensions']
}
)
new_csr = {
'type': CERT_TYPE_INTERNAL,
'name': data['name'],
'certificate': new_cert,
'privatekey': csr_cert_data['privatekey'],
'signedby': ca_data['id']
}
new_csr_id = await self.middleware.call(
'datastore.insert',
'system.certificate',
new_csr,
{'prefix': 'cert_'}
)
await self.middleware.call('service.start', 'ssl')
return await self.middleware.call(
'certificate.query',
[['id', '=', new_csr_id]],
{'get': True}
)
| 4,849 | Python | .py | 118 | 28.161017 | 112 | 0.520705 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,988 | query_utils.py | truenas_middleware/src/middlewared/middlewared/plugins/crypto_/query_utils.py | import copy
import logging
import os
from collections import defaultdict
from cryptography.hazmat.primitives.asymmetric import dsa, ec, rsa
from cryptography.hazmat.primitives.asymmetric.ed25519 import Ed25519PrivateKey
from typing import Union
from .load_utils import load_certificate, load_certificate_request, load_private_key
from .utils import (
CA_TYPE_EXISTING, CA_TYPE_INTERNAL, CA_TYPE_INTERMEDIATE, CERT_TYPE_EXISTING, CERT_TYPE_INTERNAL,
CERT_TYPE_CSR, CERT_ROOT_PATH, CERT_CA_ROOT_PATH, RE_CERTIFICATE
)
logger = logging.getLogger(__name__)
CERT_REPORT_ERRORS = set()
def cert_extend_report_error(title: str, cert: dict) -> None:
item = (title, cert['name'])
if item not in CERT_REPORT_ERRORS:
logger.debug('Failed to load %s of %s', title, cert['name'])
def cert_issuer(cert: dict) -> Union[str, dict]:
issuer = None
if cert['type'] in (CA_TYPE_EXISTING, CERT_TYPE_EXISTING):
issuer = 'external'
elif cert['type'] == CA_TYPE_INTERNAL:
issuer = 'self-signed'
elif cert['type'] in (CERT_TYPE_INTERNAL, CA_TYPE_INTERMEDIATE):
issuer = cert['signedby']
elif cert['type'] == CERT_TYPE_CSR:
issuer = 'external - signature pending'
return issuer
def normalize_cert_attrs(cert: dict) -> None:
# Remove ACME related keys if cert is not an ACME based cert
if not cert.get('acme'):
for key in ['acme', 'acme_uri', 'domains_authenticators', 'renew_days']:
cert.pop(key, None)
if cert['type'] in (CA_TYPE_EXISTING, CA_TYPE_INTERNAL, CA_TYPE_INTERMEDIATE):
root_path = CERT_CA_ROOT_PATH
is_ca = True
else:
root_path = CERT_ROOT_PATH
is_ca = False
cert.update({
'root_path': root_path,
'certificate_path': os.path.join(root_path, f'{cert["name"]}.crt'),
'privatekey_path': os.path.join(root_path, f'{cert["name"]}.key'),
'csr_path': os.path.join(root_path, f'{cert["name"]}.csr'),
'cert_type': 'CA' if is_ca else 'CERTIFICATE',
'revoked': bool(cert['revoked_date']),
'can_be_revoked': bool(cert['privatekey']) and not bool(cert['revoked_date']) if is_ca else (
bool(cert['signedby']) and not bool(cert['revoked_date'])
),
'internal': 'NO' if cert['type'] in (CA_TYPE_EXISTING, CERT_TYPE_EXISTING) else 'YES',
'CA_type_existing': bool(cert['type'] & CA_TYPE_EXISTING),
'CA_type_internal': bool(cert['type'] & CA_TYPE_INTERNAL),
'CA_type_intermediate': bool(cert['type'] & CA_TYPE_INTERMEDIATE),
'cert_type_existing': bool(cert['type'] & CERT_TYPE_EXISTING),
'cert_type_internal': bool(cert['type'] & CERT_TYPE_INTERNAL),
'cert_type_CSR': bool(cert['type'] & CERT_TYPE_CSR),
'issuer': cert_issuer(cert),
'chain_list': [],
'key_length': None,
'key_type': None,
})
if is_ca:
cert['crl_path'] = os.path.join(root_path, f'{cert["name"]}.crl')
certs = []
if len(RE_CERTIFICATE.findall(cert['certificate'] or '')) > 1:
certs = RE_CERTIFICATE.findall(cert['certificate'])
elif cert['type'] != CERT_TYPE_CSR:
certs = [cert['certificate']]
signing_CA = cert['issuer']
# Recursively get all internal/intermediate certificates
# FIXME: NONE HAS BEEN ADDED IN THE FOLLOWING CHECK FOR CSR'S WHICH HAVE BEEN SIGNED BY A CA
while signing_CA not in ['external', 'self-signed', 'external - signature pending', None]:
certs.append(signing_CA['certificate'])
signing_CA['issuer'] = cert_issuer(signing_CA)
signing_CA = signing_CA['issuer']
failed_parsing = False
for c in certs:
if c and load_certificate(c):
cert['chain_list'].append(c)
else:
cert_extend_report_error('certificate chain', cert)
break
if certs:
# This indicates cert is not CSR and a cert
cert_data = load_certificate(cert['certificate'])
cert.update(cert_data)
if not cert_data:
failed_parsing = True
cert_extend_report_error('certificate', cert)
if cert['privatekey']:
key_obj = load_private_key(cert['privatekey'])
if key_obj:
if isinstance(key_obj, Ed25519PrivateKey):
cert['key_length'] = 32
else:
cert['key_length'] = key_obj.key_size
if isinstance(key_obj, (ec.EllipticCurvePrivateKey, Ed25519PrivateKey)):
cert['key_type'] = 'EC'
elif isinstance(key_obj, rsa.RSAPrivateKey):
cert['key_type'] = 'RSA'
elif isinstance(key_obj, dsa.DSAPrivateKey):
cert['key_type'] = 'DSA'
else:
cert['key_type'] = 'OTHER'
else:
cert_extend_report_error('private key', cert)
if cert['cert_type_CSR']:
csr_data = load_certificate_request(cert['CSR'])
if csr_data:
cert.update({
**csr_data,
'from': None,
'until': None, # CSR's don't have from, until - normalizing keys
})
else:
cert_extend_report_error('csr', cert)
failed_parsing = True
if failed_parsing:
# Normalizing cert/csr
# Should we perhaps set the value to something like "MALFORMED_CERTIFICATE" for this list off attrs ?
cert.update({
key: None for key in [
'digest_algorithm', 'lifetime', 'country', 'state', 'city', 'from', 'until',
'organization', 'organizational_unit', 'email', 'common', 'san', 'serial',
'fingerprint', 'extensions', 'expired',
]
})
cert['parsed'] = not failed_parsing
def get_ca_chain(ca_id: int, certs: list, cas: list) -> list:
cert_mapping = defaultdict(list)
cas_mapping = defaultdict(list)
cas_id_mapping = {}
for cert in filter(lambda c: c['signedby'], certs):
cert_mapping[cert['signedby']['id']].append({**cert, 'cert_type': 'CERTIFICATE'})
for ca in cas:
cas_id_mapping[ca['id']] = ca
if ca['signedby']:
cas_mapping[ca['signedby']['id']].append(ca)
return get_ca_chain_impl(ca_id, cas_id_mapping, cert_mapping, cas_mapping)
def get_ca_chain_impl(ca_id: int, cas: dict, certs_mapping: dict, cas_mapping: dict) -> list:
certs = copy.deepcopy(certs_mapping[ca_id])
for ca in cas_mapping[ca_id]:
certs.extend(get_ca_chain_impl(ca['id'], cas, certs_mapping, cas_mapping))
certs.append({**cas[ca_id], 'cert_type': 'CA'})
return certs
| 6,655 | Python | .py | 146 | 37.034247 | 109 | 0.609903 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,989 | utils.py | truenas_middleware/src/middlewared/middlewared/plugins/crypto_/utils.py | import re
from cryptography import x509
CERT_BACKEND_MAPPINGS = {
'common_name': 'common',
'country_name': 'country',
'state_or_province_name': 'state',
'locality_name': 'city',
'organization_name': 'organization',
'organizational_unit_name': 'organizational_unit',
'email_address': 'email'
}
RDN_MAPPINGS = {
'C': 'country_name',
'country': 'country_name',
'ST': 'state_or_province_name',
'state': 'state_or_province_name',
'L': 'locality_name',
'city': 'locality_name',
'O': 'organization_name',
'organization': 'organization_name',
'OU': 'organizational_unit_name',
'organizational_unit': 'organizational_unit_name',
'CN': 'common_name',
'common': 'common_name',
'emailAddress': 'email_address',
'email': 'email_address'
}
# Cert locations
CERT_ROOT_PATH = '/etc/certificates'
CERT_CA_ROOT_PATH = '/etc/certificates/CA'
DEFAULT_CERT_NAME = 'truenas_default'
# This constant defines the default lifetime of certificate ( https://support.apple.com/en-us/HT211025 )
DEFAULT_LIFETIME_DAYS = 397
EC_CURVES = [
'SECP256R1',
'SECP384R1',
'SECP521R1',
'ed25519',
]
EC_CURVE_DEFAULT = 'SECP384R1'
EKU_OIDS = [i for i in dir(x509.oid.ExtendedKeyUsageOID) if not i.startswith('__')]
RE_CERTIFICATE = re.compile(r"(-{5}BEGIN[\s\w]+-{5}[^-]+-{5}END[\s\w]+-{5})+", re.M | re.S)
# Defining cert constants being used
CA_TYPE_EXISTING = 0x01
CA_TYPE_INTERNAL = 0x02
CA_TYPE_INTERMEDIATE = 0x04
CERT_TYPE_EXISTING = 0x08
CERT_TYPE_INTERNAL = 0x10
CERT_TYPE_CSR = 0x20
def get_cert_info_from_data(data):
cert_info_keys = [
'key_length', 'country', 'state', 'city', 'organization', 'common', 'key_type', 'ec_curve',
'san', 'serial', 'email', 'lifetime', 'digest_algorithm', 'organizational_unit'
]
return {key: data.get(key) for key in cert_info_keys if data.get(key)}
def _set_required(name):
def set_r(attr):
attr.required = True
return {'name': name, 'method': set_r}
| 2,006 | Python | .py | 59 | 30.423729 | 104 | 0.665807 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,990 | __init__.py | truenas_middleware/src/middlewared/middlewared/plugins/crypto_/__init__.py | from .utils import CERT_TYPE_EXISTING
async def setup(middleware):
failure = False
try:
system_general_config = await middleware.call('system.general.config')
system_cert = system_general_config['ui_certificate']
certs = await middleware.call('datastore.query', 'system.certificate', [], {'prefix': 'cert_'})
except Exception as e:
failure = True
middleware.logger.error(f'Failed to retrieve certificates: {e}', exc_info=True)
if not failure and (not system_cert or system_cert['id'] not in [c['id'] for c in certs]):
# create a self signed cert if it doesn't exist and set ui_certificate to it's value
try:
await middleware.call('certificate.setup_self_signed_cert_for_ui')
except Exception as e:
failure = True
middleware.logger.debug(
'Failed to set certificate for system.general plugin: %s', e, exc_info=True
)
if not failure:
middleware.logger.debug('Certificate setup for System complete')
| 1,057 | Python | .py | 21 | 41.714286 | 103 | 0.658915 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,991 | cert_profiles.py | truenas_middleware/src/middlewared/middlewared/plugins/crypto_/cert_profiles.py | import copy
from middlewared.schema import accepts, Dict, returns
from middlewared.service import Service
from .utils import DEFAULT_LIFETIME_DAYS
CERTIFICATE_PROFILES = {
# Options / EKUs reference rfc5246
'HTTPS RSA Certificate': {
'cert_extensions': {
'BasicConstraints': {
'enabled': True,
'ca': False,
'extension_critical': True
},
'AuthorityKeyIdentifier': {
'enabled': True,
'authority_cert_issuer': True,
'extension_critical': False
},
# These days, most TLS certs want "ClientAuth".
# LetsEncrypt appears to want this extension to issue.
# https://community.letsencrypt.org/t/extendedkeyusage-tls-client-
# authentication-in-tls-server-certificates/59140/7
'ExtendedKeyUsage': {
'enabled': True,
'extension_critical': True,
'usages': [
'SERVER_AUTH',
'CLIENT_AUTH',
]
},
# RSA certs need "digitalSignature" for DHE,
# and "keyEncipherment" for nonDHE
# Include "keyAgreement" for compatibility (DH_DSS / DH_RSA)
# See rfc5246
'KeyUsage': {
'enabled': True,
'extension_critical': True,
'digital_signature': True,
'key_encipherment': True,
'key_agreement': True,
}
},
'key_length': 2048,
'key_type': 'RSA',
'lifetime': DEFAULT_LIFETIME_DAYS,
'digest_algorithm': 'SHA256'
},
'HTTPS ECC Certificate': {
'cert_extensions': {
'BasicConstraints': {
'enabled': True,
'ca': False,
'extension_critical': True
},
'AuthorityKeyIdentifier': {
'enabled': True,
'authority_cert_issuer': True,
'extension_critical': False
},
# These days, most TLS certs want "ClientAuth".
# LetsEncrypt appears to want this extension to issue.
# https://community.letsencrypt.org/t/extendedkeyusage-tls-client-
# authentication-in-tls-server-certificates/59140/7
'ExtendedKeyUsage': {
'enabled': True,
'extension_critical': True,
'usages': [
'SERVER_AUTH',
'CLIENT_AUTH',
]
},
# keyAgreement is not generally required for EC certs.
# See Google, cloudflare certs
'KeyUsage': {
'enabled': True,
'extension_critical': True,
'digital_signature': True,
}
},
'ec_curve': 'SECP384R1',
'key_type': 'EC',
'lifetime': DEFAULT_LIFETIME_DAYS,
'digest_algorithm': 'SHA256'
},
}
CSR_PROFILES = copy.deepcopy(CERTIFICATE_PROFILES)
for key, schema in filter(lambda v: 'cert_extensions' in v[1], CSR_PROFILES.items()):
schema['cert_extensions'].pop('AuthorityKeyIdentifier', None)
class CertificateService(Service):
@accepts(roles=['CERTIFICATE_READ'])
@returns(Dict(
'certificate_profiles',
*[Dict(profile, additional_attrs=True) for profile in CERTIFICATE_PROFILES]
))
async def profiles(self):
"""
Returns a dictionary of predefined options for specific use cases i.e openvpn client/server
configurations which can be used for creating certificates.
"""
return CERTIFICATE_PROFILES
@accepts(roles=['CERTIFICATE_READ'])
@returns(Dict(
*[Dict(profile, additional_attrs=True) for profile in CSR_PROFILES],
example=CSR_PROFILES,
))
async def certificate_signing_requests_profiles(self):
"""
Returns a dictionary of predefined options for specific use cases i.e openvpn client/server
configurations which can be used for creating certificate signing requests.
"""
return CSR_PROFILES
| 4,203 | Python | .py | 111 | 26.414414 | 99 | 0.556317 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,992 | certificate_authorities.py | truenas_middleware/src/middlewared/middlewared/plugins/crypto_/certificate_authorities.py | import random
from middlewared.schema import accepts, Bool, Dict, Int, Patch, Str
from middlewared.service import CRUDService, private, ValidationErrors
import middlewared.sqlalchemy as sa
from .cert_entry import get_ca_result_entry
from .common_validation import _validate_common_attributes, validate_cert_name
from .key_utils import export_private_key
from .load_utils import get_serial_from_certificate_safe, load_certificate
from .query_utils import get_ca_chain, normalize_cert_attrs
from .utils import (
get_cert_info_from_data, _set_required, CA_TYPE_EXISTING, CA_TYPE_INTERNAL, CA_TYPE_INTERMEDIATE
)
class CertificateAuthorityModel(sa.Model):
__tablename__ = 'system_certificateauthority'
id = sa.Column(sa.Integer(), primary_key=True)
cert_type = sa.Column(sa.Integer())
cert_name = sa.Column(sa.String(120), unique=True)
cert_certificate = sa.Column(sa.Text(), nullable=True)
cert_privatekey = sa.Column(sa.EncryptedText(), nullable=True)
cert_CSR = sa.Column(sa.Text(), nullable=True)
cert_revoked_date = sa.Column(sa.DateTime(), nullable=True)
cert_signedby_id = sa.Column(sa.ForeignKey('system_certificateauthority.id'), index=True, nullable=True)
cert_add_to_trusted_store = sa.Column(sa.Boolean(), default=False, nullable=False)
class CertificateAuthorityService(CRUDService):
class Config:
datastore = 'system.certificateauthority'
datastore_extend = 'certificateauthority.cert_extend'
datastore_extend_context = 'certificateauthority.cert_extend_context'
datastore_prefix = 'cert_'
cli_namespace = 'system.certificate.authority'
role_prefix = 'CERTIFICATE_AUTHORITY'
ENTRY = get_ca_result_entry()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.map_create_functions = {
'CA_CREATE_INTERNAL': 'create_internal',
'CA_CREATE_IMPORTED': 'create_imported_ca',
'CA_CREATE_INTERMEDIATE': 'create_intermediate_ca',
}
@private
def cert_extend_context(self, rows, extra):
context = {
'cas': {c['id']: c for c in self.middleware.call_sync(
'datastore.query', 'system.certificateauthority', [], {'prefix': 'cert_'}
)},
'certs': {
c['id']: c for c in self.middleware.call_sync(
'datastore.query', 'system.certificate', [], {'prefix': 'cert_'}
)
},
}
signed_mapping = {}
for ca in context['cas'].values():
signed_mapping[ca['id']] = 0
for cert in context['certs'].values():
if cert['signedby'] and cert['signedby']['id'] == ca['id']:
signed_mapping[ca['id']] += 1
context['signed_mapping'] = signed_mapping
return context
@private
def cert_extend(self, cert, context):
if cert['signedby']:
cert['signedby'] = self.cert_extend(context['cas'][cert['signedby']['id']], context)
normalize_cert_attrs(cert)
cert['signed_certificates'] = context['signed_mapping'][cert['id']]
cert.update({
'revoked_certs': list(filter(
lambda c: c['revoked_date'],
get_ca_chain(cert['id'], context['certs'].values(), context['cas'].values())
)),
})
return cert
@private
async def validate_common_attributes(self, data, schema_name):
verrors = ValidationErrors()
await _validate_common_attributes(self.middleware, data, verrors, schema_name)
if not data['cert_extensions']['BasicConstraints']['enabled']:
verrors.add(
f'{schema_name}.cert_extensions.BasicConstraints.enabled',
'This must be enabled for a Certificate Authority.'
)
elif not data['cert_extensions']['BasicConstraints']['ca']:
verrors.add(
f'{schema_name}.cert_extensions.BasicConstraints.ca',
'"ca" must be enabled for a Certificate Authority.'
)
if not data['cert_extensions']['KeyUsage']['enabled']:
verrors.add(
f'{schema_name}.cert_extensions.KeyUsage.enabled',
'This must be enabled for a Certificate Authority.'
)
elif not data['cert_extensions']['KeyUsage']['key_cert_sign']:
verrors.add(
f'{schema_name}.cert_extensions.KeyUsage.key_cert_sign',
'"key_cert_sign" must be enabled for a Certificate Authority.'
)
return verrors
@private
def get_serial_for_certificate(self, ca_id):
ca_data = self.middleware.call_sync(
'datastore.query', 'system.certificateauthority', [['id', '=', ca_id]], {'get': True, 'prefix': 'cert_'}
)
if ca_data.get('signedby'):
# Recursively call the same function for it's parent and let the function gather all serials in a chain
return self.get_serial_for_certificate(ca_data['signedby']['id'])
else:
def cert_serials(ca_id):
serials = []
for cert in filter(
lambda c: c['certificate'],
self.middleware.call_sync(
'datastore.query', 'system.certificate', [['signedby', '=', ca_id]], {'prefix': 'cert_'}
)
):
serial = get_serial_from_certificate_safe(cert['certificate'])
if serial is not None:
serials.append(serial)
return serials
ca_signed_certs = cert_serials(ca_id)
def child_serials(ca):
serials = []
for child in filter(
lambda c: c['certificate'],
self.middleware.call_sync(
'datastore.query', 'system.certificateauthority',
[['signedby', '=', ca['id']]], {'prefix': 'cert_'}
)
):
serials.extend(child_serials(child))
serials.extend(cert_serials(ca['id']))
serial = get_serial_from_certificate_safe(ca['certificate'])
if serial is not None:
serials.append(serial)
return serials
ca_signed_certs.extend(child_serials(ca_data))
# This is for a case where the user might have a malformed certificate and serial value returns None
ca_signed_certs = list(filter(None, ca_signed_certs))
if not ca_signed_certs:
return int(get_serial_from_certificate_safe(ca_data['certificate']) or 0) + 1
else:
return max(ca_signed_certs) + 1
def _set_enum(name):
def set_enum(attr):
attr.enum = ['CA_CREATE_INTERNAL', 'CA_CREATE_IMPORTED', 'CA_CREATE_INTERMEDIATE']
return {'name': name, 'method': set_enum}
def _set_cert_extensions_defaults(name):
def set_defaults(attr):
for ext, keys, values in (
('BasicConstraints', ('enabled', 'ca', 'extension_critical'), [True] * 3),
('KeyUsage', ('enabled', 'key_cert_sign', 'crl_sign', 'extension_critical'), [True] * 4),
('ExtendedKeyUsage', ('enabled', 'usages'), (True, ['SERVER_AUTH']))
):
for k, v in zip(keys, values):
attr.attrs[ext].attrs[k].default = v
return {'name': name, 'method': set_defaults}
# CREATE METHODS FOR CREATING CERTIFICATE AUTHORITIES
# "do_create" IS CALLED FIRST AND THEN BASED ON THE TYPE OF CA WHICH IS TO BE CREATED, THE
# APPROPRIATE METHOD IS CALLED
# FOLLOWING TYPES ARE SUPPORTED
# CREATE_TYPE ( STRING ) - METHOD CALLED
# CA_CREATE_INTERNAL - create_internal
# CA_CREATE_IMPORTED - create_imported_ca
# CA_CREATE_INTERMEDIATE - create_intermediate_ca
@accepts(
Patch(
'certificate_create', 'ca_create',
('edit', _set_enum('create_type')),
('edit', _set_cert_extensions_defaults('cert_extensions')),
('rm', {'name': 'dns_mapping'}),
register=True
),
)
async def do_create(self, data):
"""
Create a new Certificate Authority
Certificate Authorities are classified under following types with the necessary keywords to be passed
for `create_type` attribute to create the respective type of certificate authority
1) Internal Certificate Authority - CA_CREATE_INTERNAL
2) Imported Certificate Authority - CA_CREATE_IMPORTED
3) Intermediate Certificate Authority - CA_CREATE_INTERMEDIATE
Created certificate authorities use RSA keys by default. If an Elliptic Curve Key is desired, then it can be
specified with the `key_type` attribute. If the `ec_curve` attribute is not specified for the Elliptic
Curve Key, default to using "SECP384R1" curve.
A type is selected by the Certificate Authority Service based on `create_type`. The rest of the values
are validated accordingly and finally a certificate is made based on the selected type.
`cert_extensions` can be specified to set X509v3 extensions.
.. examples(websocket)::
Create an Internal Certificate Authority
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "certificateauthority.create",
"params": [{
"name": "internal_ca",
"key_length": 2048,
"lifetime": 3600,
"city": "Nashville",
"common": "domain1.com",
"country": "US",
"email": "dev@ixsystems.com",
"organization": "iXsystems",
"state": "Tennessee",
"digest_algorithm": "SHA256"
"create_type": "CA_CREATE_INTERNAL"
}]
}
Create an Imported Certificate Authority
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "certificateauthority.create",
"params": [{
"name": "imported_ca",
"certificate": "Certificate string",
"privatekey": "Private key string",
"create_type": "CA_CREATE_IMPORTED"
}]
}
"""
create_type = data.pop('create_type')
if create_type == 'CA_CREATE_IMPORTED':
for key in ('key_length', 'key_type', 'ec_curve'):
data.pop(key, None)
verrors = await self.validate_common_attributes(data, 'certificate_authority_create')
if create_type == 'CA_CREATE_IMPORTED' and not load_certificate(data['certificate']):
verrors.add('certificate_authority_create.certificate', 'Unable to parse certificate')
await validate_cert_name(
self.middleware, data['name'], self._config.datastore,
verrors, 'certificate_authority_create.name'
)
verrors.check()
data = {
k: v for k, v in (
await self.middleware.call(f'certificateauthority.{self.map_create_functions[create_type]}', data)
).items()
if k in ['name', 'certificate', 'privatekey', 'type', 'signedby', 'add_to_trusted_store']
}
pk = await self.middleware.call(
'datastore.insert',
self._config.datastore,
data,
{'prefix': self._config.datastore_prefix}
)
await self.middleware.call('service.start', 'ssl')
return await self.get_instance(pk)
@accepts(
Patch(
'ca_create_internal', 'ca_create_intermediate',
('add', {'name': 'signedby', 'type': 'int', 'required': True}),
),
)
@private
async def create_intermediate_ca(self, data):
signing_cert = await self.get_instance(data['signedby'])
serial = await self.middleware.call('certificateauthority.get_serial_for_certificate', signing_cert['id'])
data['type'] = CA_TYPE_INTERMEDIATE
cert_info = get_cert_info_from_data(data)
cert_info.update({
'ca_privatekey': signing_cert['privatekey'],
'ca_certificate': signing_cert['certificate'],
'serial': serial,
'cert_extensions': data['cert_extensions']
})
cert, key = await self.middleware.call(
'cryptokey.generate_certificate_authority',
cert_info
)
data['certificate'] = cert
data['privatekey'] = key
return data
@accepts(
Patch(
'ca_create', 'ca_create_imported',
('edit', _set_required('certificate')),
('rm', {'name': 'create_type'}),
)
)
@private
def create_imported_ca(self, data):
data['type'] = CA_TYPE_EXISTING
if all(k in data for k in ('passphrase', 'privatekey')):
data['privatekey'] = export_private_key(data['privatekey'], data['passphrase'])
return data
@accepts(
Patch(
'ca_create', 'ca_create_internal',
('edit', _set_required('lifetime')),
('edit', _set_required('country')),
('edit', _set_required('state')),
('edit', _set_required('city')),
('edit', _set_required('organization')),
('edit', _set_required('email')),
('edit', _set_required('san')),
('rm', {'name': 'create_type'}),
register=True
)
)
@private
async def create_internal(self, data):
cert_info = get_cert_info_from_data(data)
cert_info['serial'] = random.getrandbits(24)
cert_info['cert_extensions'] = data['cert_extensions']
(cert, key) = await self.middleware.call(
'cryptokey.generate_self_signed_ca',
cert_info
)
data['type'] = CA_TYPE_INTERNAL
data['certificate'] = cert
data['privatekey'] = key
return data
@accepts(
Int('id', required=True),
Dict(
'ca_update',
Bool('revoked'),
Bool('add_to_trusted_store'),
Int('ca_id'),
Int('csr_cert_id'),
Str('create_type', enum=['CA_SIGN_CSR']),
Str('name'),
),
)
async def do_update(self, id_, data):
"""
Update Certificate Authority of `id`
Only `name` and `revoked` attribute can be updated.
If `revoked` is enabled, the CA and its complete chain is marked as revoked and added to the CA's
certificate revocation list.
.. examples(websocket)::
Update a Certificate Authority of `id`
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "certificateauthority.update",
"params": [
1,
{
"name": "updated_ca_name"
}
]
}
"""
if data.pop('create_type', '') == 'CA_SIGN_CSR':
# BEING USED BY OLD LEGACY FOR SIGNING CSR'S. THIS CAN BE REMOVED WHEN LEGACY UI IS REMOVED
data['ca_id'] = id_
return await self.middleware.call(
'certificateauthority.ca_sign_csr_impl', data, 'certificate_authority_update'
)
else:
for key in ['ca_id', 'csr_cert_id']:
data.pop(key, None)
old = await self.get_instance(id_)
# signedby is changed back to integer from a dict
old['signedby'] = old['signedby']['id'] if old.get('signedby') else None
new = old.copy()
new.update(data)
verrors = ValidationErrors()
if any(new[k] != old[k] for k in ('name', 'revoked', 'add_to_trusted_store')):
if new['name'] != old['name']:
await validate_cert_name(
self.middleware, new['name'], self._config.datastore,
verrors, 'certificate_authority_update.name'
)
if old['revoked'] != new['revoked'] and new['revoked'] and not new['privatekey']:
verrors.add(
'certificate_authority_update.revoked',
'Only Certificate Authorities with a privatekey can be marked as revoked.'
)
elif old['revoked'] and not new['revoked']:
verrors.add(
'certificate_authority_update.revoked',
'Certificate Authority has already been revoked and this cannot be reversed'
)
if not verrors and new['revoked'] and new['add_to_trusted_store']:
verrors.add(
'certificate_authority_update.add_to_trusted_store',
'Revoked certificates cannot be added to system\'s trusted store'
)
verrors.check()
await self.middleware.call(
'datastore.update',
self._config.datastore,
id_,
{'name': new['name'], 'add_to_trusted_store': new['add_to_trusted_store']},
{'prefix': self._config.datastore_prefix}
)
if old['revoked'] != new['revoked'] and new['revoked']:
await self.middleware.call('certificateauthority.revoke_ca_chain', id_)
await self.middleware.call('service.start', 'ssl')
return await self.get_instance(id_)
async def do_delete(self, id_):
"""
Delete a Certificate Authority of `id`
.. examples(websocket)::
Delete a Certificate Authority of `id`
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "certificateauthority.delete",
"params": [
1
]
}
"""
await self.get_instance(id_)
await self.middleware.call('certificateauthority.check_dependencies', id_)
response = await self.middleware.call(
'datastore.delete',
self._config.datastore,
id_
)
await self.middleware.call('service.start', 'ssl')
return response
| 18,871 | Python | .py | 417 | 33.023981 | 116 | 0.559562 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,993 | renew_certs.py | truenas_middleware/src/middlewared/middlewared/plugins/crypto_/renew_certs.py | import datetime
from .generate_self_signed import generate_self_signed_certificate
from middlewared.service import job, periodic, private, Service
from middlewared.utils.time_utils import utc_now
class CertificateService(Service):
@periodic(86400)
@private
@job(lock='acme_cert_renewal')
def renew_certs(self, job):
system_cert = self.middleware.call_sync('system.general.config')['ui_certificate']
if system_cert and all(
system_cert[k] == v for k, v in (
('organization', 'iXsystems'),
('san', ['DNS:localhost']),
('signedby', None),
('cert_type_existing', True),
)
):
filters = [(
'OR', (('acme', '!=', None), ('id', '=', system_cert['id']))
)]
else:
filters = [('acme', '!=', None)]
certs = self.middleware.call_sync('certificate.query', filters)
progress = 0
changed_certs = []
for cert in certs:
progress += (100 / len(certs))
if not (
datetime.datetime.strptime(cert['until'], '%a %b %d %H:%M:%S %Y') - utc_now()
).days < cert.get('renew_days', 5):
continue
# renew cert
self.logger.debug(f'Renewing certificate {cert["name"]}')
if not cert.get('acme'):
cert_str, key = generate_self_signed_certificate()
cert_payload = {
'certificate': cert_str,
'privatekey': key,
}
else:
final_order = self.middleware.call_sync(
'acme.issue_certificate',
job, progress / 4, {
'tos': True,
'acme_directory_uri': cert['acme']['directory'],
'dns_mapping': cert['domains_authenticators'],
},
cert
)
cert_payload = {
'certificate': final_order.fullchain_pem,
'acme_uri': final_order.uri,
}
self.middleware.call_sync(
'datastore.update',
'system.certificate',
cert['id'],
cert_payload,
{'prefix': 'cert_'}
)
changed_certs.append(cert)
self.middleware.call_sync('etc.generate', 'ssl')
for cert in changed_certs:
try:
self.middleware.call_sync('certificate.redeploy_cert_attachments', cert['id'])
except Exception:
self.logger.error(
'Failed to reload services dependent on %r certificate', cert['name'], exc_info=True
)
job.set_progress(progress)
| 2,861 | Python | .py | 71 | 26.225352 | 104 | 0.489921 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,994 | cryptokey_validate.py | truenas_middleware/src/middlewared/middlewared/plugins/crypto_/cryptokey_validate.py | import itertools
from cryptography.hazmat.primitives.asymmetric import ec
from cryptography.hazmat.primitives.asymmetric.ed25519 import Ed25519PrivateKey
from OpenSSL import crypto, SSL
from middlewared.service import Service
from .load_utils import load_private_key
from .utils import RE_CERTIFICATE
class CryptoKeyService(Service):
class Config:
private = True
def validate_cert_with_chain(self, cert, chain):
check_cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert)
store = crypto.X509Store()
for chain_cert in itertools.chain.from_iterable(map(lambda c: RE_CERTIFICATE.findall(c), chain)):
store.add_cert(
crypto.load_certificate(crypto.FILETYPE_PEM, chain_cert)
)
store_ctx = crypto.X509StoreContext(store, check_cert)
try:
store_ctx.verify_certificate()
except crypto.X509StoreContextError:
return False
else:
return True
def validate_certificate_with_key(self, certificate, private_key, schema_name, verrors, passphrase=None):
if (
(certificate and private_key) and
all(k not in verrors for k in (f'{schema_name}.certificate', f'{schema_name}.privatekey'))
):
public_key_obj = crypto.load_certificate(crypto.FILETYPE_PEM, certificate)
private_key_obj = crypto.load_privatekey(
crypto.FILETYPE_PEM,
private_key,
passphrase=passphrase.encode() if passphrase else None
)
try:
context = SSL.Context(SSL.TLSv1_2_METHOD)
context.use_certificate(public_key_obj)
context.use_privatekey(private_key_obj)
context.check_privatekey()
except SSL.Error as e:
verrors.add(
f'{schema_name}.privatekey',
f'Private key does not match certificate: {e}'
)
return verrors
def validate_private_key(self, private_key, verrors, schema_name, passphrase=None):
private_key_obj = load_private_key(private_key, passphrase)
if not private_key_obj:
verrors.add(
f'{schema_name}.privatekey',
'A valid private key is required, with a passphrase if one has been set.'
)
elif (
'create' in schema_name and not isinstance(
private_key_obj, (ec.EllipticCurvePrivateKey, Ed25519PrivateKey),
) and private_key_obj.key_size < 1024
):
# When a cert/ca is being created, disallow keys with size less then 1024
# Update is allowed for now for keeping compatibility with very old cert/keys
# We do not do this check for any EC based key
verrors.add(
f'{schema_name}.privatekey',
'Key size must be greater than or equal to 1024 bits.'
)
| 2,987 | Python | .py | 65 | 34.492308 | 109 | 0.622337 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,995 | key_utils.py | truenas_middleware/src/middlewared/middlewared/plugins/crypto_/key_utils.py | import typing
from cryptography.hazmat.primitives.asymmetric import dsa, ec, ed25519, ed448, rsa
from cryptography.hazmat.primitives.asymmetric.ed25519 import Ed25519PrivateKey
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, serialization
from middlewared.schema import accepts, Bool, Dict, Int, Str
from .load_utils import load_private_key
from .utils import EC_CURVES, EC_CURVE_DEFAULT
def retrieve_signing_algorithm(data: dict, signing_key: typing.Union[
ed25519.Ed25519PrivateKey,
ed448.Ed448PrivateKey,
rsa.RSAPrivateKey,
dsa.DSAPrivateKey,
ec.EllipticCurvePrivateKey,
]):
if isinstance(signing_key, Ed25519PrivateKey):
return None
else:
return getattr(hashes, data.get('digest_algorithm') or 'SHA256')()
@accepts(
Dict(
'generate_private_key',
Bool('serialize', default=False),
Int('key_length', default=2048),
Str('type', default='RSA', enum=['RSA', 'EC']),
Str('curve', enum=EC_CURVES, default=EC_CURVE_DEFAULT)
)
)
def generate_private_key(options: dict) -> typing.Union[
str,
ed25519.Ed25519PrivateKey,
ed448.Ed448PrivateKey,
rsa.RSAPrivateKey,
dsa.DSAPrivateKey,
ec.EllipticCurvePrivateKey,
]:
# We should make sure to return in PEM format
# Reason for using PKCS8
# https://stackoverflow.com/questions/48958304/pkcs1-and-pkcs8-format-for-rsa-private-key
if options.get('type') == 'EC':
if options['curve'] == 'ed25519':
key = Ed25519PrivateKey.generate()
else:
key = ec.generate_private_key(
getattr(ec, options.get('curve')),
default_backend()
)
else:
key = rsa.generate_private_key(
public_exponent=65537,
key_size=options.get('key_length'),
backend=default_backend()
)
if options.get('serialize'):
return key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption()
).decode()
else:
return key
def export_private_key(buffer: str, passphrase: typing.Optional[str] = None) -> typing.Optional[str]:
key = load_private_key(buffer, passphrase)
if key:
return export_private_key_object(key)
def export_private_key_object(key: typing.Union[
ed25519.Ed25519PrivateKey,
ed448.Ed448PrivateKey,
rsa.RSAPrivateKey,
dsa.DSAPrivateKey,
ec.EllipticCurvePrivateKey,
]) -> str:
return key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption()
).decode()
| 2,810 | Python | .py | 77 | 30.025974 | 101 | 0.691912 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,996 | generate_certs.py | truenas_middleware/src/middlewared/middlewared/plugins/crypto_/generate_certs.py | import typing
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from .extensions_utils import add_extensions
from .generate_utils import generate_builder, normalize_san
from .key_utils import export_private_key_object, generate_private_key, retrieve_signing_algorithm
from .load_utils import load_certificate, load_private_key
from .utils import CERT_BACKEND_MAPPINGS, EC_CURVE_DEFAULT
def generate_certificate(data: dict) -> typing.Tuple[str, str]:
key = generate_private_key({
'type': data.get('key_type') or 'RSA',
'curve': data.get('ec_curve') or EC_CURVE_DEFAULT,
'key_length': data.get('key_length') or 2048
})
if data.get('ca_privatekey'):
ca_key = load_private_key(data['ca_privatekey'])
else:
ca_key = None
san_list = normalize_san(data.get('san'))
builder_data = {
'crypto_subject_name': {
k: data.get(v) for k, v in CERT_BACKEND_MAPPINGS.items()
},
'san': san_list,
'serial': data.get('serial'),
'lifetime': data.get('lifetime')
}
if data.get('ca_certificate'):
ca_data = load_certificate(data['ca_certificate'])
builder_data['crypto_issuer_name'] = {
k: ca_data.get(v) for k, v in CERT_BACKEND_MAPPINGS.items()
}
issuer = x509.load_pem_x509_certificate(data['ca_certificate'].encode(), default_backend())
else:
issuer = None
cert = add_extensions(generate_builder(builder_data), data.get('cert_extensions'), key, issuer)
cert = cert.sign(
ca_key or key, retrieve_signing_algorithm(data, ca_key or key), default_backend()
)
return cert.public_bytes(serialization.Encoding.PEM).decode(), export_private_key_object(key)
| 1,837 | Python | .py | 41 | 38.585366 | 99 | 0.680291 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,997 | cryptokey_load.py | truenas_middleware/src/middlewared/middlewared/plugins/crypto_/cryptokey_load.py | from middlewared.schema import accepts, Str
from middlewared.service import Service
from .load_utils import load_certificate, load_certificate_request
class CryptoKeyService(Service):
class Config:
private = True
@accepts(Str('certificate', required=True, max_length=None))
def load_certificate(self, certificate):
return load_certificate(certificate)
@accepts(Str('csr', required=True, max_length=None))
def load_certificate_request(self, csr):
return load_certificate_request(csr)
| 533 | Python | .py | 12 | 39.25 | 66 | 0.757282 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,998 | cryptokey_extensions.py | truenas_middleware/src/middlewared/middlewared/plugins/crypto_/cryptokey_extensions.py | from cryptography import x509
from middlewared.schema import accepts, Ref, Str
from middlewared.service import Service, ValidationErrors
from .extensions_utils import get_extension_params
class CryptoKeyService(Service):
class Config:
private = True
@accepts(
Ref('cert_extensions'),
Str('schema')
)
def validate_extensions(self, extensions_data, schema):
# We do not need to validate some extensions like `AuthorityKeyIdentifier`.
# They are generated from the cert/ca's public key contents. So we skip these.
skip_extension = ['AuthorityKeyIdentifier']
verrors = ValidationErrors()
for extension in filter(lambda v: v[1]['enabled'] and v[0] not in skip_extension, extensions_data.items()):
klass = getattr(x509.extensions, extension[0])
try:
klass(*get_extension_params(extension))
except Exception as e:
verrors.add(
f'{schema}.{extension[0]}',
f'Please provide valid values for {extension[0]}: {e}'
)
if extensions_data['KeyUsage']['enabled'] and extensions_data['KeyUsage']['key_cert_sign']:
if not extensions_data['BasicConstraints']['enabled'] or not extensions_data['BasicConstraints']['ca']:
verrors.add(
f'{schema}.BasicConstraints',
'Please enable ca when key_cert_sign is set in KeyUsage as per RFC 5280.'
)
if extensions_data['ExtendedKeyUsage']['enabled'] and not extensions_data['ExtendedKeyUsage']['usages']:
verrors.add(
f'{schema}.ExtendedKeyUsage.usages',
'Please specify at least one USAGE for this extension.'
)
return verrors
| 1,827 | Python | .py | 37 | 38.135135 | 115 | 0.62507 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,999 | generate_ca.py | truenas_middleware/src/middlewared/middlewared/plugins/crypto_/generate_ca.py | import typing
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from .extensions_utils import add_extensions
from .generate_utils import generate_builder, normalize_san
from .key_utils import export_private_key_object, generate_private_key, retrieve_signing_algorithm
from .load_utils import load_certificate, load_private_key
from .utils import CERT_BACKEND_MAPPINGS, EC_CURVE_DEFAULT
def generate_certificate_authority(data: dict) -> typing.Tuple[str, str]:
key = generate_private_key({
'type': data.get('key_type') or 'RSA',
'curve': data.get('ec_curve') or EC_CURVE_DEFAULT,
'key_length': data.get('key_length') or 2048
})
if data.get('ca_privatekey'):
ca_key = load_private_key(data['ca_privatekey'])
else:
ca_key = None
san_list = normalize_san(data.get('san') or [])
builder_data = {
'crypto_subject_name': {
k: data.get(v) for k, v in CERT_BACKEND_MAPPINGS.items()
},
'san': san_list,
'serial': data.get('serial'),
'lifetime': data.get('lifetime')
}
if data.get('ca_certificate'):
ca_data = load_certificate(data['ca_certificate'])
builder_data['crypto_issuer_name'] = {
k: ca_data.get(v) for k, v in CERT_BACKEND_MAPPINGS.items()
}
issuer = x509.load_pem_x509_certificate(data['ca_certificate'].encode(), default_backend())
else:
issuer = None
cert = add_extensions(generate_builder(builder_data), data.get('cert_extensions'), key, issuer)
cert = cert.sign(
ca_key or key, retrieve_signing_algorithm(data, ca_key or key), default_backend()
)
return cert.public_bytes(serialization.Encoding.PEM).decode(), export_private_key_object(key)
| 1,853 | Python | .py | 41 | 38.97561 | 99 | 0.680355 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |