id int64 0 458k | file_name stringlengths 4 119 | file_path stringlengths 14 227 | content stringlengths 24 9.96M | size int64 24 9.96M | language stringclasses 1 value | extension stringclasses 14 values | total_lines int64 1 219k | avg_line_length float64 2.52 4.63M | max_line_length int64 5 9.91M | alphanum_fraction float64 0 1 | repo_name stringlengths 7 101 | repo_stars int64 100 139k | repo_forks int64 0 26.4k | repo_open_issues int64 0 2.27k | repo_license stringclasses 12 values | repo_extraction_date stringclasses 433 values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
24,800 | smartd.py | truenas_middleware/src/middlewared/middlewared/plugins/service_/services/smartd.py | import logging
from .base import SimpleService
logger = logging.getLogger(__name__)
class SMARTDService(SimpleService):
name = "smartd"
reloadable = True
etc = ["rc", "smartd"]
systemd_unit = "smartmontools"
systemd_async_start = True
async def after_start(self):
await self.middleware.call('service.restart', 'netdata')
| 360 | Python | .py | 11 | 28.181818 | 64 | 0.710526 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,801 | ups.py | truenas_middleware/src/middlewared/middlewared/plugins/service_/services/ups.py | from .base import SimpleService
class UPSService(SimpleService):
name = "ups"
etc = ["ups"]
systemd_unit = "nut-monitor"
async def systemd_extra_units(self):
if (await self.middleware.call("ups.config"))["mode"] == "MASTER":
return ["nut-driver-enumerator", "nut-server", "nut.target"]
else:
return ["nut.target"]
async def before_start(self):
await self.middleware.call("ups.dismiss_alerts")
async def start(self):
if (await self.middleware.call("ups.config"))["mode"] == "MASTER":
await self._systemd_unit("nut-server", "start")
await self._systemd_unit("nut-driver-enumerator", "start")
await self._unit_action("Start")
async def after_start(self):
# Reconfigure mdns (add nut service)
await self.middleware.call('service.reload', 'mdns', {'ha_propagate': False})
async def before_stop(self):
await self.middleware.call("ups.dismiss_alerts")
async def stop(self):
await self._unit_action("Stop")
await self._systemd_unit("nut-driver-enumerator", "stop")
await self._systemd_unit("nut-server", "stop")
await self._systemd_unit("nut-driver.target", "stop")
async def after_stop(self):
# Reconfigure mdns (remove nut service)
await self.middleware.call('service.reload', 'mdns', {'ha_propagate': False})
| 1,413 | Python | .py | 30 | 39.133333 | 85 | 0.63901 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,802 | docker.py | truenas_middleware/src/middlewared/middlewared/plugins/service_/services/docker.py | import asyncio
from middlewared.plugins.docker.state_utils import Status
from .base import SimpleService
class DockerService(SimpleService):
name = 'docker'
etc = ['docker']
systemd_unit = 'docker'
async def before_start(self):
await self.middleware.call('docker.state.set_status', Status.INITIALIZING.value)
await self.middleware.call('docker.state.before_start_check')
for key, value in (
('vm.panic_on_oom', 0),
('vm.overcommit_memory', 1),
):
await self.middleware.call('sysctl.set_value', key, value)
async def start(self):
try:
await super().start()
timeout = 120 # We have this at 120 because HDDs are notorious and docker can take more time there
# First time when docker is started, it takes a bit more time to initialise itself properly
# and we need to have sleep here so that after start is called post_start is not dismissed
while timeout > 0:
if not await self.middleware.call('service.started', 'docker'):
await asyncio.sleep(2)
timeout -= 2
else:
break
finally:
asyncio.get_event_loop().call_later(
2,
lambda: self.middleware.create_task(self.middleware.call('docker.state.after_start_check')),
)
async def stop(self):
await super().stop()
await self._systemd_unit('docker.socket', 'stop')
async def after_start(self):
await self.middleware.call('docker.state.set_status', Status.RUNNING.value)
self.middleware.create_task(self.middleware.call('docker.events.setup'))
if (await self.middleware.call('docker.config'))['enable_image_updates']:
self.middleware.create_task(self.middleware.call('app.image.op.check_update'))
async def before_stop(self):
await self.middleware.call('docker.state.set_status', Status.STOPPING.value)
async def after_stop(self):
await self.middleware.call('docker.state.set_status', Status.STOPPED.value)
| 2,154 | Python | .py | 44 | 38.727273 | 111 | 0.638095 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,803 | keepalived.py | truenas_middleware/src/middlewared/middlewared/plugins/service_/services/keepalived.py | from .base import SimpleService
class KeepalivedService(SimpleService):
name = 'keepalived'
systemd_unit = 'keepalived'
reloadable = True
restartable = True
etc = ['keepalived']
async def restart(self):
# NOTE: this causes all interfaces on the node
# to send an advertisement with priority 0
# which means transition from MASTER to BACKUP
await self._systemd_unit('keepalived', 'restart')
async def reload(self):
await self._systemd_unit('keepalived', 'reload')
| 537 | Python | .py | 14 | 32.071429 | 57 | 0.690522 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,804 | cifs.py | truenas_middleware/src/middlewared/middlewared/plugins/service_/services/cifs.py | import errno
from .base import SimpleService
from middlewared.service_exception import CallError
class CIFSService(SimpleService):
name = "cifs"
reloadable = True
etc = ["smb"]
systemd_unit = "smbd"
async def start(self):
if not await self.middleware.call("smb.configure_wait"):
return
await self._systemd_unit("smbd", "start")
async def after_start(self):
# We reconfigure mdns (add SMB service, possibly also ADISK)
await self.middleware.call('service.reload', 'mdns')
async def stop(self):
await self._systemd_unit("smbd", "stop")
async def after_stop(self):
# reconfigure mdns (remove SMB service, possibly also ADISK)
await self.middleware.call('service.reload', 'mdns')
async def before_reload(self):
sync_job = await self.middleware.call("sharing.smb.sync_registry")
await sync_job.wait()
| 927 | Python | .py | 23 | 33.608696 | 74 | 0.678611 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,805 | libvirtd.py | truenas_middleware/src/middlewared/middlewared/plugins/service_/services/pseudo/libvirtd.py | from middlewared.plugins.service_.services.base import SimpleService
class LibvirtdService(SimpleService):
name = "libvirtd"
systemd_unit = "libvirtd"
etc = ["libvirt"]
async def after_start(self):
await self.middleware.call("service.start", "libvirt-guests")
async def before_stop(self):
await self.middleware.call("service.stop", "libvirt-guests")
class LibvirtGuestService(SimpleService):
name = "libvirt-guests"
systemd_unit = "libvirt-guests"
systemd_async_start = True
etc = ["libvirt_guests"]
| 558 | Python | .py | 14 | 34.714286 | 69 | 0.72119 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,806 | misc.py | truenas_middleware/src/middlewared/middlewared/plugins/service_/services/pseudo/misc.py | from middlewared.plugins.service_.services.base import SimpleService, systemd_unit
from middlewared.plugins.service_.services.base_interface import ServiceInterface
from middlewared.plugins.service_.services.base_state import ServiceState
class PseudoServiceBase(ServiceInterface):
async def get_state(self):
return ServiceState(True, [])
class CronService(PseudoServiceBase):
name = "cron"
etc = ["cron"]
restartable = True
async def restart(self):
pass
class KmipService(PseudoServiceBase):
name = "kmip"
async def start(self):
await self.middleware.call("service.start", "ssl")
await self.middleware.call("etc.generate", "kmip")
async def get_state(self):
return ServiceState(
(await self.middleware.call('kmip.config'))['enabled'],
[],
)
class LoaderService(PseudoServiceBase):
name = "loader"
etc = ["loader"]
reloadable = True
async def reload(self):
pass
class HostnameService(PseudoServiceBase):
name = "hostname"
reloadable = True
async def reload(self):
await self.middleware.call("etc.generate", "hostname")
await self.middleware.call("service.restart", "mdns")
class HttpService(PseudoServiceBase):
name = "http"
etc = ["nginx"]
restartable = True
reloadable = True
async def restart(self):
await self.middleware.call("system.general.update_ui_allowlist")
await systemd_unit("nginx", "restart")
async def reload(self):
await self.middleware.call("system.general.update_ui_allowlist")
await systemd_unit("nginx", "reload")
class NetworkService(PseudoServiceBase):
name = "network"
async def start(self):
await self.middleware.call("interface.sync")
await self.middleware.call("route.sync")
class NetworkGeneralService(PseudoServiceBase):
name = "networkgeneral"
reloadable = True
async def reload(self):
await self.middleware.call("service.reload", "resolvconf")
await self.middleware.call("service.restart", "routing")
class NfsMountdService(PseudoServiceBase):
'''
Used in HA mode to stop nfs-mountd on the standby node
'''
name = "mountd"
async def stop(self):
await systemd_unit("nfs-mountd", "stop")
class NtpdService(SimpleService):
name = "ntpd"
etc = ["ntpd"]
restartable = True
systemd_unit = "chronyd"
class OpenVmToolsService(SimpleService):
name = "open-vm-tools"
systemd_unit = "open-vm-tools"
class PowerdService(SimpleService):
name = "powerd"
etc = ["rc"]
# FIXME: Linux
class RcService(PseudoServiceBase):
name = "rc"
etc = ["rc"]
reloadable = True
async def reload(self):
pass
class ResolvConfService(PseudoServiceBase):
name = "resolvconf"
reloadable = True
async def reload(self):
await self.middleware.call("service.reload", "hostname")
await self.middleware.call("dns.sync")
class RoutingService(SimpleService):
name = "routing"
etc = ["rc"]
restartable = True
async def get_state(self):
return ServiceState(True, [])
async def restart(self):
await self.middleware.call("staticroute.sync")
class SslService(PseudoServiceBase):
name = "ssl"
etc = ["ssl"]
async def start(self):
pass
class SyslogdService(SimpleService):
name = "syslogd"
etc = ["syslogd"]
restartable = True
reloadable = True
systemd_unit = "syslog-ng"
class TimeservicesService(PseudoServiceBase):
name = "timeservices"
etc = ["localtime"]
reloadable = True
async def reload(self):
await self.middleware.call("service.restart", "ntpd")
settings = await self.middleware.call("datastore.config", "system.settings")
await self.middleware.call("core.environ_update", {"TZ": settings["stg_timezone"]})
class UserService(PseudoServiceBase):
name = "user"
etc = ["user"]
reloadable = True
async def reload(self):
pass
| 4,105 | Python | .py | 120 | 28.25 | 91 | 0.686701 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,807 | stats_util.py | truenas_middleware/src/middlewared/middlewared/plugins/apps/stats_util.py | from middlewared.utils.cpu import cpu_info
from .ix_apps.metadata import get_collective_metadata
from .ix_apps.utils import get_app_name_from_project_name
NANO_SECOND = 1000000000
def normalize_projects_stats(all_projects_stats: dict, old_stats: dict, interval: int) -> list[dict]:
normalized_projects_stats = []
all_configured_apps = get_collective_metadata()
for project, data in all_projects_stats.items():
app_name = get_app_name_from_project_name(project)
if app_name not in all_configured_apps:
continue
else:
all_configured_apps.pop(app_name)
normalized_data = {
'app_name': app_name,
'memory': data['memory'],
'blkio': data['blkio'],
}
# Docker provides CPU usage time in nanoseconds.
# To calculate the CPU usage percentage:
# 1. Calculate the difference in CPU usage (`cpu_delta`) between the current and previous stats.
# 2. Normalize this delta over the given time interval by dividing by (interval * NANO_SECOND).
# 3. Multiply by 100 to convert to percentage.
cpu_delta = data['cpu_usage'] - old_stats[project]['cpu_usage']
if cpu_delta >= 0:
normalized_data['cpu_usage'] = (cpu_delta / (interval * NANO_SECOND * cpu_info()['core_count'])) * 100
else:
# This will happen when there were multiple containers and an app is being stopped
# and old stats contain cpu usage times of multiple containers and current stats
# only contains the stats of the containers which are still running which means collectively
# current cpu usage time will be obviously low then what old stats contain
normalized_data['cpu_usage'] = 0
networks = []
for net_name, network_data in data['networks'].items():
networks.append({
'interface_name': net_name,
'rx_bytes': int(
(network_data['rx_bytes'] - old_stats[project]['networks'][net_name]['rx_bytes']) / interval
),
'tx_bytes': int(
(network_data['tx_bytes'] - old_stats[project]['networks'][net_name]['tx_bytes']) / interval
),
})
normalized_data['networks'] = networks
normalized_projects_stats.append(normalized_data)
for stopped_app in all_configured_apps:
normalized_projects_stats.append({
'app_name': stopped_app,
'memory': 0,
'cpu_usage': 0,
'networks': [],
'blkio': {'read': 0, 'write': 0},
})
return normalized_projects_stats
| 2,697 | Python | .py | 54 | 39.37037 | 114 | 0.610099 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,808 | resources.py | truenas_middleware/src/middlewared/middlewared/plugins/apps/resources.py | from middlewared.schema import accepts, Bool, Dict, Int, List, Ref, returns, Str
from middlewared.service import private, Service
from middlewared.utils.gpu import get_nvidia_gpus
from .ix_apps.utils import ContainerState
from .resources_utils import get_normalized_gpu_choices
from .utils import IX_APPS_MOUNT_PATH
class AppService(Service):
class Config:
namespace = 'app'
cli_namespace = 'app'
@accepts(
Str('app_name'),
Dict(
'options',
Bool('alive_only', default=True),
),
roles=['APPS_READ']
)
@returns(Dict(
additional_attrs=True,
example={
'afb901dc53a29016c385a9de43f089117e399622c042674f82c10c911848baba': {
'service_name': 'jellyfin',
'image': 'jellyfin/jellyfin:10.9.7',
'state': 'running',
'id': 'afb901dc53a29016c385a9de43f089117e399622c042674f82c10c911848baba',
}
}
))
async def container_ids(self, app_name, options):
"""
Returns container IDs for `app_name`.
"""
return {
c['id']: {
'service_name': c['service_name'],
'image': c['image'],
'state': c['state'],
'id': c['id'],
} for c in (
await self.middleware.call('app.get_instance', app_name)
)['active_workloads']['container_details'] if (
options['alive_only'] is False or ContainerState(c['state']) == ContainerState.RUNNING
)
}
@accepts(Str('app_name'), roles=['APPS_READ'])
@returns(Dict(
additional_attrs=True,
example={
'afb901dc53a29016c385a9de43f089117e399622c042674f82c10c911848baba': {
'service_name': 'jellyfin',
'image': 'jellyfin/jellyfin:10.9.7',
'state': 'running',
'id': 'afb901dc53a29016c385a9de43f089117e399622c042674f82c10c911848baba',
}
}
))
async def container_console_choices(self, app_name):
"""
Returns container console choices for `app_name`.
"""
return await self.container_ids(app_name, {'alive_only': True})
@accepts(roles=['APPS_READ'])
@returns(List(items=[Ref('certificate_entry')]))
async def certificate_choices(self):
"""
Returns certificates which can be used by applications.
"""
return await self.middleware.call(
'certificate.query', [['revoked', '=', False], ['cert_type_CSR', '=', False], ['parsed', '=', True]],
{'select': ['name', 'id']}
)
@accepts(roles=['APPS_READ'])
@returns(List(items=[Ref('certificateauthority_entry')]))
async def certificate_authority_choices(self):
"""
Returns certificate authorities which can be used by applications.
"""
return await self.middleware.call(
'certificateauthority.query', [['revoked', '=', False], ['parsed', '=', True]], {'select': ['name', 'id']}
)
@accepts(roles=['APPS_READ'])
@returns(List(items=[Int('used_port')]))
async def used_ports(self):
"""
Returns ports in use by applications.
"""
return sorted(list(set({
host_port['host_port']
for app in await self.middleware.call('app.query')
for port_entry in app['active_workloads']['used_ports']
for host_port in port_entry['host_ports']
})))
@accepts(roles=['APPS_READ'])
@returns(Dict(Str('ip_choice')))
async def ip_choices(self):
"""
Returns IP choices which can be used by applications.
"""
return {
ip['address']: ip['address']
for ip in await self.middleware.call('interface.ip_in_use', {'static': True, 'any': True})
}
@accepts(roles=['CATALOG_READ'])
@returns(Int())
async def available_space(self):
"""
Returns space available in bytes in the configured apps pool which apps can consume
"""
await self.middleware.call('docker.state.validate')
return (await self.middleware.call('filesystem.statfs', IX_APPS_MOUNT_PATH))['avail_bytes']
@accepts(roles=['APPS_READ'])
@returns(Dict('gpu_choices', additional_attrs=True))
async def gpu_choices(self):
"""
Returns GPU choices which can be used by applications.
"""
return {
gpu['pci_slot']: {
k: gpu[k] for k in ('vendor', 'description', 'vendor_specific_config', 'pci_slot')
}
for gpu in await self.gpu_choices_internal()
if not gpu['error']
}
@private
async def gpu_choices_internal(self):
return get_normalized_gpu_choices(
await self.middleware.call('device.get_gpus'),
await self.middleware.run_in_thread(get_nvidia_gpus),
)
| 4,994 | Python | .py | 130 | 28.969231 | 118 | 0.579175 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,809 | crud.py | truenas_middleware/src/middlewared/middlewared/plugins/apps/crud.py | import contextlib
import errno
import os
import shutil
import textwrap
from catalog_reader.custom_app import get_version_details
from middlewared.schema import accepts, Bool, Dict, Int, List, Ref, returns, Str
from middlewared.service import (
CallError, CRUDService, filterable, InstanceNotFound, job, pass_app, private, ValidationErrors
)
from middlewared.utils import filter_list
from middlewared.validators import Match, Range
from .compose_utils import compose_action
from .custom_app_utils import validate_payload
from .ix_apps.lifecycle import add_context_to_values, get_current_app_config, update_app_config
from .ix_apps.metadata import update_app_metadata, update_app_metadata_for_portals
from .ix_apps.path import get_app_parent_volume_ds, get_installed_app_path, get_installed_app_version_path
from .ix_apps.query import list_apps
from .ix_apps.setup import setup_install_app_dir
from .ix_apps.utils import AppState
from .version_utils import get_latest_version_from_app_versions
class AppService(CRUDService):
class Config:
namespace = 'app'
datastore_primary_key_type = 'string'
event_send = False
cli_namespace = 'app'
role_prefix = 'APPS'
ENTRY = Dict(
'app_entry',
Str('name'),
Str('id'),
Str('state', enum=[state.value for state in AppState]),
Bool('upgrade_available'),
Str('human_version'),
Str('version'),
Dict('metadata', additional_attrs=True),
Dict(
'active_workloads',
Int('containers'),
List('used_ports', items=[Dict(
'used_port',
Str('container_port'),
Str('protocol'),
List('host_ports', items=[Dict(
'host_port',
Str('host_port'),
Str('host_ip'),
)]),
additional_attrs=True,
)]),
List('container_details', items=[Dict(
'container_detail',
Str('id'),
Str('service_name'),
Str('image'),
List('port_config'),
Str('state', enum=['running', 'starting', 'exited']),
List('volume_mounts'),
additional_attrs=True,
)]),
List('volumes', items=[Dict(
'volume',
Str('source'),
Str('destination'),
Str('mode'),
Str('type'),
additional_attrs=True,
)]),
additional_attrs=True,
),
additional_attrs=True,
)
@filterable
@pass_app(rest=True)
def query(self, app, filters, options):
"""
Query all apps with `query-filters` and `query-options`.
`query-options.extra.host_ip` is a string which can be provided to override portal IP address
if it is a wildcard.
`query-options.extra.include_app_schema` is a boolean which can be set to include app schema in the response.
`query-options.extra.retrieve_config` is a boolean which can be set to retrieve app configuration
used to install/manage app.
"""
if not self.middleware.call_sync('docker.state.validate', False):
return filter_list([], filters, options)
extra = options.get('extra', {})
host_ip = extra.get('host_ip')
if not host_ip:
try:
if app.origin.is_tcp_ip_family:
host_ip = app.origin.loc_addr
except AttributeError:
pass
retrieve_app_schema = extra.get('include_app_schema', False)
kwargs = {
'host_ip': host_ip,
'retrieve_config': extra.get('retrieve_config', False),
'image_update_cache': self.middleware.call_sync('app.image.op.get_update_cache', True),
}
if len(filters) == 1 and filters[0][0] in ('id', 'name') and filters[0][1] == '=':
kwargs['specific_app'] = filters[0][2]
available_apps_mapping = self.middleware.call_sync('catalog.train_to_apps_version_mapping')
apps = list_apps(available_apps_mapping, **kwargs)
if not retrieve_app_schema:
return filter_list(apps, filters, options)
questions_context = self.middleware.call_sync('catalog.get_normalized_questions_context')
for app in apps:
if app['custom_app']:
version_details = get_version_details()
else:
version_details = self.middleware.call_sync(
'catalog.app_version_details', get_installed_app_version_path(app['name'], app['version']),
questions_context,
)
app['version_details'] = version_details
return filter_list(apps, filters, options)
@accepts(Str('app_name'), roles=['APPS_READ'])
@returns(Dict('app_config', additional_attrs=True))
def config(self, app_name):
"""
Retrieve user specified configuration of `app_name`.
"""
app = self.get_instance__sync(app_name)
return get_current_app_config(app_name, app['version'])
@accepts(Str('app_name'), roles=['APPS_WRITE'])
@returns(Ref('app_entry'))
@job(lock=lambda args: f'app_start_{args[0]}')
async def convert_to_custom(self, job, app_name):
"""
Convert `app_name` to a custom app.
"""
return await self.middleware.call('app.custom.convert', job, app_name)
@accepts(
Dict(
'app_create',
Bool('custom_app', default=False),
Dict('values', additional_attrs=True, private=True),
Dict('custom_compose_config', additional_attrs=True, private=True),
Str('custom_compose_config_string', private=True, max_length=2**31),
Str('catalog_app', required=False),
Str(
'app_name', required=True, validators=[Match(
r'^[a-z]([-a-z0-9]*[a-z0-9])?$',
explanation=textwrap.dedent(
'''
Application name must have the following:
1) Lowercase alphanumeric characters can be specified
2) Name must start with an alphabetic character and can end with alphanumeric character
3) Hyphen '-' is allowed but not as the first or last character
e.g abc123, abc, abcd-1232
'''
)
), Range(min_=1, max_=40)]
),
Str('train', default='stable'),
Str('version', default='latest'),
)
)
@job(lock=lambda args: f'app_create_{args[0]["app_name"]}')
def do_create(self, job, data):
"""
Create an app with `app_name` using `catalog_app` with `train` and `version`.
TODO: Add support for advanced mode which will enable users to use their own compose files
"""
self.middleware.call_sync('docker.state.validate')
if self.middleware.call_sync('app.query', [['id', '=', data['app_name']]]):
raise CallError(f'Application with name {data["app_name"]} already exists', errno=errno.EEXIST)
if data['custom_app']:
return self.middleware.call_sync('app.custom.create', data, job)
verrors = ValidationErrors()
if not data.get('catalog_app'):
verrors.add('app_create.catalog_app', 'This field is required')
verrors.check()
app_name = data['app_name']
complete_app_details = self.middleware.call_sync('catalog.get_app_details', data['catalog_app'], {
'train': data['train'],
})
version = data['version']
if version == 'latest':
version = get_latest_version_from_app_versions(complete_app_details['versions'])
if version not in complete_app_details['versions']:
raise CallError(f'Version {version} not found in {data["catalog_app"]} app', errno=errno.ENOENT)
return self.create_internal(job, app_name, version, data['values'], complete_app_details)
@private
def create_internal(
self, job, app_name, version, user_values, complete_app_details, dry_run=False, migrated_app=False,
):
app_version_details = complete_app_details['versions'][version]
self.middleware.call_sync('catalog.version_supported_error_check', app_version_details)
app_volume_ds_exists = bool(self.get_app_volume_ds(app_name))
# The idea is to validate the values provided first and if it passes our validation test, we
# can move forward with setting up the datasets and installing the catalog item
new_values = self.middleware.call_sync(
'app.schema.normalize_and_validate_values', app_version_details, user_values, False,
get_installed_app_path(app_name), None, dry_run is False,
)
job.set_progress(25, 'Initial Validation completed')
# Now that we have completed validation for the app in question wrt values provided,
# we will now perform the following steps
# 1) Create relevant dir for app
# 2) Copy app version into app dir
# 3) Have docker compose deploy the app in question
try:
setup_install_app_dir(app_name, app_version_details)
app_version_details = self.middleware.call_sync(
'catalog.app_version_details', get_installed_app_version_path(app_name, version)
)
new_values = add_context_to_values(app_name, new_values, app_version_details['app_metadata'], install=True)
update_app_config(app_name, version, new_values)
update_app_metadata(app_name, app_version_details, migrated_app)
self.middleware.call_sync('app.metadata.generate').wait_sync(raise_error=True)
# At this point the app exists
self.middleware.send_event('app.query', 'ADDED', id=app_name, fields=self.get_instance__sync(app_name))
job.set_progress(60, 'App installation in progress, pulling images')
if dry_run is False:
compose_action(app_name, version, 'up', force_recreate=True, remove_orphans=True)
except Exception as e:
job.set_progress(80, f'Failure occurred while installing {app_name!r}, cleaning up')
# We only want to remove app volume ds if it did not exist before the installation
# and was created during this installation process
self.remove_failed_resources(app_name, version, app_volume_ds_exists is False)
self.middleware.send_event('app.query', 'REMOVED', id=app_name)
raise e from None
else:
if dry_run is False:
job.set_progress(100, f'{app_name!r} installed successfully')
return self.get_instance__sync(app_name)
@private
def remove_failed_resources(self, app_name, version, remove_ds=False):
apps_volume_ds = self.get_app_volume_ds(app_name) if remove_ds else None
with contextlib.suppress(Exception):
compose_action(app_name, version, 'down', remove_orphans=True)
shutil.rmtree(get_installed_app_path(app_name), ignore_errors=True)
if apps_volume_ds and remove_ds:
try:
self.middleware.call_sync('zfs.dataset.delete', apps_volume_ds, {'recursive': True})
except Exception:
self.logger.error('Failed to remove %r app volume dataset', apps_volume_ds, exc_info=True)
self.middleware.call_sync('app.metadata.generate').wait_sync(raise_error=True)
self.middleware.send_event('app.query', 'REMOVED', id=app_name)
@accepts(
Str('app_name'),
Dict(
'app_update',
Dict('values', additional_attrs=True, private=True),
Dict('custom_compose_config', additional_attrs=True, private=True),
Str('custom_compose_config_string', private=True, max_length=2**31),
)
)
@job(lock=lambda args: f'app_update_{args[0]}')
def do_update(self, job, app_name, data):
"""
Update `app_name` app with new configuration.
"""
app = self.get_instance__sync(app_name)
app = self.update_internal(job, app, data, trigger_compose=app['state'] != 'STOPPED')
self.middleware.call_sync('app.metadata.generate').wait_sync(raise_error=True)
return app
@private
def update_internal(self, job, app, data, progress_keyword='Update', trigger_compose=True):
app_name = app['id']
if app['custom_app']:
if progress_keyword == 'Update':
new_values = validate_payload(data, 'app_update')
else:
new_values = get_current_app_config(app_name, app['version'])
else:
config = get_current_app_config(app_name, app['version'])
config.update(data['values'])
# We use update=False because we want defaults to be populated again if they are not present in the payload
# Why this is not dangerous is because the defaults will be added only if they are not present/configured
# for the app in question
app_version_details = self.middleware.call_sync(
'catalog.app_version_details', get_installed_app_version_path(app_name, app['version'])
)
new_values = self.middleware.call_sync(
'app.schema.normalize_and_validate_values', app_version_details, config, True,
get_installed_app_path(app_name), app
)
new_values = add_context_to_values(app_name, new_values, app['metadata'], update=True)
job.set_progress(25, 'Initial Validation completed')
update_app_config(app_name, app['version'], new_values, custom_app=app['custom_app'])
if app['custom_app'] is False:
# TODO: Eventually we would want this to be executed for custom apps as well
update_app_metadata_for_portals(app_name, app['version'])
job.set_progress(60, 'Configuration updated')
self.middleware.send_event('app.query', 'CHANGED', id=app_name, fields=self.get_instance__sync(app_name))
if trigger_compose:
job.set_progress(70, 'Updating docker resources')
compose_action(app_name, app['version'], 'up', force_recreate=True, remove_orphans=True)
job.set_progress(100, f'{progress_keyword} completed for {app_name!r}')
return self.get_instance__sync(app_name)
@accepts(
Str('app_name'),
Dict(
'options',
Bool('remove_images', default=True),
Bool('remove_ix_volumes', default=False),
)
)
@job(lock=lambda args: f'app_delete_{args[0]}')
def do_delete(self, job, app_name, options):
"""
Delete `app_name` app.
"""
app_config = self.get_instance__sync(app_name)
return self.delete_internal(job, app_name, app_config, options)
@private
def delete_internal(self, job, app_name, app_config, options):
job.set_progress(20, f'Deleting {app_name!r} app')
compose_action(
app_name, app_config['version'], 'down', remove_orphans=True,
remove_volumes=True, remove_images=options['remove_images'],
)
# Remove app from metadata first as if someone tries to query filesystem info of the app
# where the app resources have been nuked from filesystem, it will error out
self.middleware.call_sync('app.metadata.generate', [app_name]).wait_sync(raise_error=True)
job.set_progress(80, 'Cleaning up resources')
shutil.rmtree(get_installed_app_path(app_name))
if options['remove_ix_volumes'] and (apps_volume_ds := self.get_app_volume_ds(app_name)):
self.middleware.call_sync('zfs.dataset.delete', apps_volume_ds, {'recursive': True})
if options.get('send_event', True):
self.middleware.send_event('app.query', 'REMOVED', id=app_name)
self.middleware.call_sync('alert.oneshot_delete', 'AppUpdate', app_name)
job.set_progress(100, f'Deleted {app_name!r} app')
return True
@private
def get_app_volume_ds(self, app_name):
# This will return volume dataset of app if it exists, otherwise null
apps_volume_ds = get_app_parent_volume_ds(self.middleware.call_sync('docker.config')['dataset'], app_name)
with contextlib.suppress(InstanceNotFound):
return self.middleware.call_sync(
'zfs.dataset.get_instance', apps_volume_ds, {
'extra': {'retrieve_children': False, 'retrieve_properties': False}
}
)['id']
| 16,910 | Python | .py | 337 | 39.290801 | 119 | 0.612163 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,810 | cert_attachments.py | truenas_middleware/src/middlewared/middlewared/plugins/apps/cert_attachments.py | from middlewared.common.attachment.certificate import CertificateCRUDServiceAttachmentDelegate
from middlewared.service import Service
from .ix_apps.metadata import get_collective_config
class AppCertificateAttachmentDelegate(CertificateCRUDServiceAttachmentDelegate):
HUMAN_NAME = 'Applications'
NAMESPACE = 'app'
async def consuming_cert_human_output(self, cert_id):
attachments = await self.attachments(cert_id)
return f'{", ".join(app["id"] for app in attachments)!r} {self.HUMAN_NAME}' if attachments else None
async def attachments(self, cert_id):
config = await self.middleware.run_in_thread(get_collective_config)
apps_consuming_cert = [
app_name for app_name, app_config in config.items() if cert_id in app_config.get('ix_certificates', {})
]
return await self.middleware.call(f'{self.NAMESPACE}.query', [['id', 'in', apps_consuming_cert]])
async def redeploy(self, cert_id):
apps = [r['name'] for r in await self.attachments(cert_id)]
bulk_job = await self.middleware.call('core.bulk', 'app.redeploy', [[app] for app in apps])
for index, status in enumerate(await bulk_job.wait()):
if status['error']:
self.middleware.logger.error(
'Failed to redeploy %r app: %s', apps[index], status['error']
)
class AppCertificateService(Service):
class Config:
namespace = 'app.certificate'
private = True
async def get_apps_consuming_outdated_certs(self, filters=None):
apps_having_outdated_certs = []
filters = filters or []
certs = {c['id']: c for c in await self.middleware.call('certificate.query')}
config = await self.middleware.run_in_thread(get_collective_config)
apps = {app['name']: app for app in await self.middleware.call('app.query', filters)}
for app_name, app_config in config.items():
if app_name not in apps or not app_config.get('ix_certificates'):
continue
if any(
cert['certificate'] != certs[cert_id]['certificate']
for cert_id, cert in app_config['ix_certificates'].items()
if cert_id in certs
):
apps_having_outdated_certs.append(app_name)
return apps_having_outdated_certs
async def redeploy_apps_consuming_outdated_certs(self):
return await self.middleware.call(
'core.bulk', 'app.redeploy', [[r] for r in await self.get_apps_consuming_outdated_certs()]
)
async def setup(middleware):
await middleware.call(
'certificate.register_attachment_delegate', AppCertificateAttachmentDelegate(middleware)
)
| 2,751 | Python | .py | 51 | 44.54902 | 115 | 0.660581 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,811 | fs_attachments.py | truenas_middleware/src/middlewared/middlewared/plugins/apps/fs_attachments.py | from middlewared.common.attachment import FSAttachmentDelegate
class AppFSAttachmentDelegate(FSAttachmentDelegate):
name = 'apps'
title = 'Apps'
async def query(self, path, enabled, options=None):
apps_attached = []
for app in await self.middleware.call('app.query'):
# We don't want to consider those apps which fit in the following criteria:
# - app has no volumes
# - app is stopped and we are looking for enabled apps
# - app is not stopped and we are looking for disabled apps
if not (skip_app := not app['active_workloads']['volumes']):
if enabled:
skip_app |= app['state'] == 'STOPPED'
else:
skip_app |= app['state'] != 'STOPPED'
if skip_app:
continue
if await self.middleware.call(
'filesystem.is_child', [volume['source'] for volume in app['active_workloads']['volumes']], path
):
apps_attached.append({
'id': app['name'],
'name': app['name'],
})
return apps_attached
async def delete(self, attachments):
for attachment in attachments:
try:
await (await self.middleware.call('app.stop', attachment['id'])).wait(raise_error=True)
except Exception:
self.middleware.logger.error('Unable to stop %r app', attachment['id'], exc_info=True)
async def toggle(self, attachments, enabled):
# if enabled is true - we are going to ignore that as we don't want to scale up releases
# automatically when a path becomes available
for attachment in ([] if enabled else attachments):
action = 'start' if enabled else 'stop'
try:
await (await self.middleware.call(f'app.{action}', attachment['id'])).wait(raise_error=True)
except Exception:
self.middleware.logger.error('Unable to %s %r app', action, attachment['id'], exc_info=True)
async def stop(self, attachments):
await self.toggle(attachments, False)
async def start(self, attachments):
await self.toggle(attachments, True)
async def setup(middleware):
middleware.create_task(
middleware.call('pool.dataset.register_attachment_delegate', AppFSAttachmentDelegate(middleware))
)
| 2,448 | Python | .py | 49 | 38.265306 | 112 | 0.603687 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,812 | custom_app_utils.py | truenas_middleware/src/middlewared/middlewared/plugins/apps/custom_app_utils.py | import yaml
from middlewared.service import ValidationErrors
def validate_payload(data: dict, schema: str) -> dict:
verrors = ValidationErrors()
compose_keys = ('custom_compose_config', 'custom_compose_config_string')
if all(not data.get(k) for k in compose_keys):
verrors.add(f'{schema}.custom_compose_config', 'This field is required')
elif all(data.get(k) for k in compose_keys):
verrors.add(f'{schema}.custom_compose_config_string', 'Only one of these fields should be provided')
compose_config = data.get('custom_compose_config')
if data.get('custom_compose_config_string'):
try:
compose_config = yaml.safe_load(data['custom_compose_config_string'])
except yaml.YAMLError:
verrors.add('app_create.custom_compose_config_string', 'Invalid YAML provided')
verrors.check()
return compose_config
| 890 | Python | .py | 17 | 45.823529 | 108 | 0.705882 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,813 | custom_app.py | truenas_middleware/src/middlewared/middlewared/plugins/apps/custom_app.py | import contextlib
import shutil
from catalog_reader.custom_app import get_version_details
from middlewared.service import CallError, Service
from .compose_utils import compose_action
from .custom_app_utils import validate_payload
from .ix_apps.lifecycle import get_rendered_template_config_of_app, update_app_config
from .ix_apps.metadata import update_app_metadata
from .ix_apps.path import get_installed_app_path
from .ix_apps.setup import setup_install_app_dir
class AppCustomService(Service):
class Config:
namespace = 'app.custom'
private = True
def convert(self, job, app_name):
app = self.middleware.call_sync('app.get_instance', app_name)
if app['custom_app'] is True:
raise CallError(f'{app_name!r} is already a custom app')
rendered_config = get_rendered_template_config_of_app(app_name, app['version'])
if not rendered_config:
raise CallError(f'No rendered config found for {app_name!r}')
job.set_progress(10, 'Completed initial validation for conversion of app to custom app')
# What needs to happen here is the following:
# Merge all available compose files into one of the app and hold on to it
# Do an uninstall of the app and create it again with the new compose file
# Update metadata to reflect that this is a custom app
# Finally update collective metadata
job.set_progress(20, 'Removing existing app\'s docker resources')
self.middleware.call_sync(
'app.delete_internal', type('dummy_job', (object,), {'set_progress': lambda *args: None})(),
app_name, app, {'remove_images': False, 'remove_ix_volumes': False, 'send_event': False}
)
return self.create({
'app_name': app_name,
'custom_compose_config': rendered_config,
'conversion': True,
}, job)
def create(self, data, job=None, progress_base=0):
"""
Create a custom app.
"""
compose_config = validate_payload(data, 'app_create')
app_being_converted = data.get('conversion', False)
def update_progress(percentage_done, message):
nonlocal progress_base
job.set_progress(int((100 - progress_base) * (percentage_done / 100)) + progress_base, message)
# For debug purposes
job = job or type('dummy_job', (object,), {'set_progress': lambda *args: None})()
update_progress(25, 'Initial validation completed for custom app creation')
app_name = data['app_name']
app_version_details = get_version_details()
version = app_version_details['version']
try:
update_progress(35, 'Setting up App directory')
setup_install_app_dir(app_name, app_version_details, custom_app=True)
update_app_config(app_name, version, compose_config, custom_app=True)
update_app_metadata(app_name, app_version_details, migrated=False, custom_app=True)
if app_being_converted:
msg = 'App conversion in progress, pulling images'
else:
msg = 'App installation in progress, pulling images'
update_progress(60, msg)
compose_action(app_name, version, 'up', force_recreate=True, remove_orphans=True)
except Exception as e:
update_progress(
80,
'Failure occurred while '
f'{"converting" if app_being_converted else "installing"} {app_name!r}, cleaning up'
)
self.middleware.call_sync('app.remove_failed_resources', app_name, version)
raise e from None
else:
self.middleware.call_sync('app.metadata.generate').wait_sync(raise_error=True)
app_info = self.middleware.call_sync('app.get_instance', app_name)
if app_being_converted is False:
# We only want to send this when a new custom app is being installed, not when an
# existing app is being converted to a custom app
self.middleware.send_event('app.query', 'ADDED', id=app_name, fields=app_info)
job.set_progress(
100, f'{app_name!r} {"converted to custom app" if app_being_converted else "installed"} successfully'
)
return app_info
| 4,366 | Python | .py | 82 | 43.060976 | 117 | 0.644012 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,814 | schema_normalization.py | truenas_middleware/src/middlewared/middlewared/plugins/apps/schema_normalization.py | import os
from collections.abc import Callable
from middlewared.schema import Cron, Dict, Int, List, Str
from middlewared.service import Service
from .ix_apps.path import get_app_volume_path
from .schema_utils import get_list_item_from_value, RESERVED_NAMES
REF_MAPPING = {
'definitions/certificate': 'certificate',
'definitions/certificate_authority': 'certificate_authorities',
'definitions/gpu_configuration': 'gpu_configuration',
'normalize/acl': 'acl',
'normalize/ix_volume': 'ix_volume',
}
class AppSchemaService(Service):
class Config:
namespace = 'app.schema'
private = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for method in REF_MAPPING.values():
assert isinstance(getattr(self, f'normalize_{method}'), Callable) is True
async def normalize_and_validate_values(
self, item_details, values, update, app_dir, app_data=None, perform_actions=True,
):
dict_obj = await self.middleware.call(
'app.schema.validate_values', item_details, values, update, app_data,
)
new_values, context = await self.normalize_values(dict_obj, values, update, {
'app': {
'name': app_dir.split('/')[-1],
'path': app_dir,
},
'actions': [],
})
if perform_actions:
await self.perform_actions(context)
return new_values
async def perform_actions(self, context):
for action in sorted(context['actions'], key=lambda d: 0 if d['method'] == 'update_volumes' else 1):
await self.middleware.call(f'app.schema.action.{action["method"]}', *action['args'])
async def normalize_values(self, dict_obj, values, update, context):
for k in RESERVED_NAMES:
# We reset reserved names from configuration as these are automatically going to
# be added by middleware during the process of normalising the values
values[k[0]] = k[1]()
for attr in filter(lambda v: v.name in values, dict_obj.attrs.values()):
values[attr.name] = await self.normalize_question(attr, values[attr.name], update, values, context)
return values, context
async def normalize_question(self, question_attr, value, update, complete_config, context):
if value is None and isinstance(question_attr, (Dict, List)):
# This shows that the value provided has been explicitly specified as null and if validation
# was okay with it, we shouldn't try to normalize it
return value
if isinstance(question_attr, Dict) and not isinstance(question_attr, Cron):
for attr in filter(lambda v: v.name in value, question_attr.attrs.values()):
value[attr.name] = await self.normalize_question(
attr, value[attr.name], update, complete_config, context
)
if isinstance(question_attr, List):
for index, item in enumerate(value):
_, attr = get_list_item_from_value(item, question_attr)
if attr:
value[index] = await self.normalize_question(attr, item, update, complete_config, context)
for ref in filter(lambda k: k in REF_MAPPING, question_attr.ref):
value = await self.middleware.call(
f'app.schema.normalize_{REF_MAPPING[ref]}', question_attr, value, complete_config, context
)
return value
async def normalize_certificate(self, attr, value, complete_config, context):
assert isinstance(attr, Int) is True
if not value:
return value
complete_config['ix_certificates'][value] = await self.middleware.call('certificate.get_instance', value)
return value
async def normalize_certificate_authorities(self, attr, value, complete_config, context):
assert isinstance(attr, Int) is True
if not value:
return value
complete_config['ix_certificate_authorities'][value] = await self.middleware.call(
'certificateauthority.get_instance', value
)
return value
async def normalize_gpu_configuration(self, attr, value, complete_config, context):
gpu_choices = {
gpu['pci_slot']: gpu
for gpu in await self.middleware.call('app.gpu_choices_internal') if not gpu['error']
}
if not any(gpu['vendor'] != 'NVIDIA' for gpu in gpu_choices.values()):
value['use_all_gpus'] = False
for nvidia_gpu_pci_slot in list(value['nvidia_gpu_selection']):
if nvidia_gpu_pci_slot not in gpu_choices or gpu_choices[nvidia_gpu_pci_slot]['vendor'] != 'NVIDIA':
value['nvidia_gpu_selection'].pop(nvidia_gpu_pci_slot)
return value
async def normalize_ix_volume(self, attr, value, complete_config, context):
# Let's allow ix volume attr to be a string as well making it easier to define a volume in questions.yaml
assert isinstance(attr, (Dict, Str)) is True
if isinstance(attr, Dict):
vol_data = {'name': value['dataset_name'], 'properties': value.get('properties') or {}}
acl_dict = value.get('acl_entries', {})
else:
vol_data = {'name': value, 'properties': {}}
acl_dict = None
ds_name = vol_data['name']
action_dict = next((d for d in context['actions'] if d['method'] == 'update_volumes'), None)
if not action_dict:
context['actions'].append({
'method': 'update_volumes',
'args': [context['app']['name'], [vol_data]],
})
elif ds_name not in [v['name'] for v in action_dict['args'][-1]]:
action_dict['args'][-1].append(vol_data)
else:
# We already have this in action dict, let's not add a duplicate
return value
host_path = os.path.join(get_app_volume_path(context['app']['name']), ds_name)
complete_config['ix_volumes'][ds_name] = host_path
if acl_dict:
acl_dict['path'] = host_path
await self.normalize_acl(Dict(), acl_dict, complete_config, context)
return value
async def normalize_acl(self, attr, value, complete_config, context):
assert isinstance(attr, Dict) is True
if not value or any(not value[k] for k in ('entries', 'path')):
return value
if (action_dict := next((d for d in context['actions'] if d['method'] == 'apply_acls'), None)) is None:
context['actions'].append({
'method': 'apply_acls',
'args': [{value['path']: value}],
})
elif value['path'] not in action_dict['args'][-1]:
action_dict['args'][-1][value['path']] = value
else:
# We already have this in action dict, let's not add a duplicate
return value
return value
| 7,013 | Python | .py | 135 | 41.562963 | 113 | 0.618365 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,815 | schema_validation.py | truenas_middleware/src/middlewared/middlewared/plugins/apps/schema_validation.py | from pathlib import Path
from middlewared.schema import Dict
from middlewared.service import Service
from middlewared.utils import filter_list
from .schema_utils import construct_schema, get_list_item_from_value, NOT_PROVIDED, RESERVED_NAMES
VALIDATION_REF_MAPPING = {
'definitions/certificate': 'certificate',
'definitions/certificateAuthority': 'certificate_authority',
'definitions/port': 'port_available_on_node',
'normalize/acl': 'acl_entries',
}
# FIXME: See which are no longer valid
# https://github.com/truenas/middleware/blob/249ed505a121e5238e225a89d3a1fa60f2e55d27/src/middlewared/middlewared/
# plugins/chart_releases_linux/validation.py#L13
class AppSchemaService(Service):
class Config:
namespace = 'app.schema'
private = True
async def validate_values(self, app_version_details, new_values, update, app_data=None):
for k in RESERVED_NAMES:
new_values.pop(k[0], None)
verrors, new_values, dict_obj, schema_name = (
construct_schema(
app_version_details, new_values, update, (app_data or {}).get('config', NOT_PROVIDED)
)
).values()
verrors.check()
# If schema is okay, we see if we have question specific validation to be performed
questions = {}
for variable in app_version_details['schema']['questions']:
questions[variable['variable']] = variable
for key in filter(lambda k: k in questions, new_values):
await self.validate_question(
verrors=verrors,
parent_value=new_values,
value=new_values[key],
question=questions[key],
parent_attr=dict_obj,
var_attr=dict_obj.attrs[key],
schema_name=f'{schema_name}.{questions[key]["variable"]}',
app_data=app_data,
)
verrors.check()
return dict_obj
async def validate_question(
self, verrors, parent_value, value, question, parent_attr, var_attr, schema_name, app_data=None
):
schema = question['schema']
if schema['type'] == 'dict' and value:
dict_attrs = {v['variable']: v for v in schema['attrs']}
for k in filter(lambda k: k in dict_attrs, value):
await self.validate_question(
verrors, value, value[k], dict_attrs[k],
var_attr, var_attr.attrs[k], f'{schema_name}.{k}', app_data,
)
elif schema['type'] == 'list' and value:
for index, item in enumerate(value):
item_index, attr = get_list_item_from_value(item, var_attr)
if attr:
await self.validate_question(
verrors, value, item, schema['items'][item_index],
var_attr, attr, f'{schema_name}.{index}', app_data,
)
# FIXME: See if this is valid or not and port appropriately
'''
if schema['type'] == 'hostpath':
await self.validate_host_path_field(value, verrors, schema_name)
'''
for validator_def in filter(lambda k: k in VALIDATION_REF_MAPPING, schema.get('$ref', [])):
await self.middleware.call(
f'app.schema.validate_{VALIDATION_REF_MAPPING[validator_def]}',
verrors, value, question, schema_name, app_data,
)
subquestions_enabled = (
schema['show_subquestions_if'] == value
if 'show_subquestions_if' in schema else 'subquestions' in schema
)
if subquestions_enabled:
for sub_question in schema.get('subquestions', []):
# TODO: Add support for nested subquestions validation for List schema types.
if isinstance(parent_attr, Dict) and sub_question['variable'] in parent_value:
item_key, attr = sub_question['variable'], parent_attr.attrs[sub_question['variable']]
await self.validate_question(
verrors, parent_value, parent_value[sub_question['variable']], sub_question,
parent_attr, attr, f'{schema_name}.{item_key}', app_data,
)
return verrors
async def validate_certificate(self, verrors, value, question, schema_name, app_data):
if not value:
return
if not filter_list(await self.middleware.call('app.certificate_choices'), [['id', '=', value]]):
verrors.add(schema_name, 'Unable to locate certificate.')
async def validate_certificate_authority(self, verrors, value, question, schema_name, app_data):
if not value:
return
if not filter_list(
await self.middleware.call('app.certificate_authority_choices'), [['id', '=', value]]
):
verrors.add(schema_name, 'Unable to locate certificate authority.')
def validate_acl_entries(self, verrors, value, question, schema_name, app_data):
try:
if value.get('path') and not value.get('options', {}).get('force') and next(
Path(value['path']).iterdir(), None
):
verrors.add(schema_name, f'{value["path"]}: path contains existing data and `force` was not specified')
except FileNotFoundError:
verrors.add(schema_name, f'{value["path"]}: path does not exist')
async def validate_port_available_on_node(self, verrors, value, question, schema_name, app_data):
for port_entry in (app_data['active_workloads']['used_ports'] if app_data else []):
for host_port in port_entry['host_ports']:
if value == host_port['host_port']:
# TODO: This still leaves a case where user has multiple ports in a single app and mixes
# them to the same value however in this case we will still get an error raised by docker.
return
if value in await self.middleware.call('app.used_ports') or value in await self.middleware.call(
'port.ports_mapping', 'app'
):
verrors.add(schema_name, 'Port is already in use.')
| 6,229 | Python | .py | 118 | 40.923729 | 119 | 0.606409 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,816 | upgrade.py | truenas_middleware/src/middlewared/middlewared/plugins/apps/upgrade.py | from pkg_resources import parse_version
from middlewared.schema import accepts, Dict, List, Str, Ref, returns
from middlewared.service import CallError, job, private, Service, ValidationErrors
from .compose_utils import compose_action
from .ix_apps.lifecycle import add_context_to_values, get_current_app_config, update_app_config
from .ix_apps.path import get_installed_app_path
from .ix_apps.upgrade import upgrade_config
from .version_utils import get_latest_version_from_app_versions
class AppService(Service):
class Config:
namespace = 'app'
cli_namespace = 'app'
@accepts(
Str('app_name'),
Dict(
'options',
Dict('values', additional_attrs=True, private=True),
Str('app_version', empty=False, default='latest'),
),
roles=['APPS_WRITE'],
)
@returns(Ref('app_entry'))
@job(lock=lambda args: f'app_upgrade_{args[0]}')
def upgrade(self, job, app_name, options):
"""
Upgrade `app_name` app to `app_version`.
"""
app = self.middleware.call_sync('app.get_instance', app_name)
if app['state'] == 'STOPPED':
raise CallError('In order to upgrade an app, it must not be in stopped state')
if app['upgrade_available'] is False:
raise CallError(f'No upgrade available for {app_name!r}')
if app['custom_app']:
job.set_progress(20, 'Pulling app images')
self.middleware.call_sync('app.pull_images_internal', app_name, app, {'redeploy': True})
job.set_progress(100, 'App successfully upgraded and redeployed')
return
job.set_progress(0, f'Retrieving versions for {app_name!r} app')
versions_config = self.middleware.call_sync('app.get_versions', app, options)
upgrade_version = versions_config['specified_version']
job.set_progress(
20, f'Validating {app_name!r} app upgrade to {upgrade_version["version"]!r} version'
)
# In order for upgrade to complete, following must happen
# 1) New version should be copied over to app config's dir
# 2) Metadata should be updated to reflect new version
# 3) Necessary config changes should be added like context and new user specified values
# 4) New compose files should be rendered with the config changes
# 5) Docker should be notified to recreate resources and to let upgrade to commence
# 6) Update collective metadata config to reflect new version
# 7) Finally create ix-volumes snapshot for rollback
with upgrade_config(app_name, upgrade_version):
config = get_current_app_config(app_name, app['version'])
config.update(options['values'])
new_values = self.middleware.call_sync(
'app.schema.normalize_and_validate_values', upgrade_version, config, False,
get_installed_app_path(app_name), app,
)
new_values = add_context_to_values(
app_name, new_values, upgrade_version['app_metadata'], upgrade=True, upgrade_metadata={
'old_version_metadata': app['metadata'],
'new_version_metadata': upgrade_version['app_metadata'],
}
)
update_app_config(app_name, upgrade_version['version'], new_values)
job.set_progress(40, f'Configuration updated for {app_name!r}, upgrading app')
self.middleware.send_event(
'app.query', 'CHANGED', id=app_name, fields=self.middleware.call_sync('app.get_instance', app_name)
)
try:
compose_action(
app_name, upgrade_version['version'], 'up', force_recreate=True, remove_orphans=True, pull_images=True,
)
finally:
self.middleware.call_sync('app.metadata.generate').wait_sync(raise_error=True)
job.set_progress(50, 'Created snapshot for upgrade')
if app_volume_ds := self.middleware.call_sync('app.get_app_volume_ds', app_name):
snap_name = f'{app_volume_ds}@{app["version"]}'
if self.middleware.call_sync('zfs.snapshot.query', [['id', '=', snap_name]]):
self.middleware.call_sync('zfs.snapshot.delete', snap_name, {'recursive': True})
self.middleware.call_sync(
'zfs.snapshot.create', {
'dataset': app_volume_ds, 'name': app['version'], 'recursive': True
}
)
job.set_progress(100, 'Upgraded app successfully')
app = self.middleware.call_sync('app.get_instance', app_name)
if app['upgrade_available'] is False:
# We have this conditional for the case if user chose not to upgrade to latest version
# and jump to some intermediate version which is not latest
self.middleware.call_sync('alert.oneshot_delete', 'AppUpdate', app_name)
return app
@accepts(
Str('app_name'),
Dict(
'options',
Str('app_version', empty=False, default='latest'),
),
roles=['APPS_READ'],
)
@returns(Dict(
Str('latest_version', description='Latest version available for the app'),
Str('latest_human_version', description='Latest human readable version available for the app'),
Str('upgrade_version', description='Version user has requested to be upgraded at'),
Str('upgrade_human_version', description='Human readable version user has requested to be upgraded at'),
Str('changelog', max_length=None, null=True, description='Changelog for the upgrade version'),
List('available_versions_for_upgrade', items=[
Dict(
'version_info',
Str('version', description='Version of the app'),
Str('human_version', description='Human readable version of the app'),
)
], description='List of available versions for upgrade'),
))
async def upgrade_summary(self, app_name, options):
"""
Retrieve upgrade summary for `app_name`.
"""
app = await self.middleware.call('app.get_instance', app_name)
if app['upgrade_available'] is False:
raise CallError(f'No upgrade available for {app_name!r}')
versions_config = await self.get_versions(app, options)
return {
'latest_version': versions_config['latest_version']['version'],
'latest_human_version': versions_config['latest_version']['human_version'],
'upgrade_version': versions_config['specified_version']['version'],
'upgrade_human_version': versions_config['specified_version']['human_version'],
'changelog': versions_config['specified_version']['changelog'],
'available_versions_for_upgrade': [
{'version': v['version'], 'human_version': v['human_version']}
for v in versions_config['versions'].values()
if parse_version(v['version']) > parse_version(app['version'])
],
}
@private
async def get_versions(self, app, options):
if isinstance(app, str):
app = await self.middleware.call('app.get_instance', app)
metadata = app['metadata']
app_details = await self.middleware.call(
'catalog.get_app_details', metadata['name'], {'train': metadata['train']}
)
new_version = options['app_version']
if new_version == 'latest':
new_version = get_latest_version_from_app_versions(app_details['versions'])
if new_version not in app_details['versions']:
raise CallError(f'Unable to locate {new_version!r} version for {metadata["name"]!r} app')
verrors = ValidationErrors()
if parse_version(new_version) <= parse_version(app['version']):
verrors.add('options.app_version', 'Upgrade version must be greater than current version')
verrors.check()
return {
'specified_version': app_details['versions'][new_version],
'versions': app_details['versions'],
'latest_version': app_details['versions'][get_latest_version_from_app_versions(app_details['versions'])],
}
@private
async def clear_upgrade_alerts_for_all(self):
for app in await self.middleware.call('app.query'):
await self.middleware.call('alert.oneshot_delete', 'AppUpdate', app['id'])
@private
async def check_upgrade_alerts(self):
for app in await self.middleware.call('app.query'):
if app['upgrade_available']:
await self.middleware.call('alert.oneshot_create', 'AppUpdate', {'name': app['id']})
else:
await self.middleware.call('alert.oneshot_delete', 'AppUpdate', app['id'])
| 8,872 | Python | .py | 167 | 42.586826 | 119 | 0.62523 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,817 | resources_utils.py | truenas_middleware/src/middlewared/middlewared/plugins/apps/resources_utils.py | def get_gpu_base_dict() -> dict:
return {
'vendor': '',
'description': '',
'error': None,
'vendor_specific_config': {},
'gpu_details': {},
'pci_slot': None,
}
def get_normalized_gpu_choices(all_gpus_info: list[dict], nvidia_gpus: dict) -> list[dict]:
all_gpus_info = {gpu['addr']['pci_slot']: gpu for gpu in all_gpus_info}
gpus = []
for pci_slot, gpu_info in all_gpus_info.items():
gpu_config = get_gpu_base_dict() | {
'vendor': gpu_info['vendor'],
'description': gpu_info['description'],
'gpu_details': gpu_info,
'pci_slot': pci_slot,
}
gpus.append(gpu_config)
if gpu_info['vendor'] == 'NVIDIA':
if pci_slot not in nvidia_gpus:
gpu_config.update({
'error': 'Unable to locate GPU details from procfs',
})
continue
nvidia_gpu = nvidia_gpus[pci_slot]
error = None
if not nvidia_gpu.get('gpu_uuid'):
error = 'GPU UUID not found'
elif '?' in nvidia_gpu['gpu_uuid']:
error = 'Malformed GPU UUID found'
if error:
gpu_config.update({
'error': error,
'nvidia_gpu_details': nvidia_gpu,
})
continue
gpu_config.update({
'vendor_specific_config': {
'uuid': nvidia_gpu['gpu_uuid'],
},
'description': nvidia_gpu.get('model') or gpu_config['description'],
})
if not gpu_info['available_to_host']:
gpu_config.update({
'error': 'GPU not available to host',
})
return gpus
| 1,814 | Python | .py | 49 | 24.612245 | 91 | 0.483504 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,818 | logs.py | truenas_middleware/src/middlewared/middlewared/plugins/apps/logs.py | import errno
import docker.errors
from dateutil.parser import parse, ParserError
from middlewared.event import EventSource
from middlewared.schema import Dict, Int, Str
from middlewared.service import CallError
from middlewared.validators import Range
from .ix_apps.utils import AppState
from .ix_apps.docker.utils import get_docker_client
class AppContainerLogsFollowTailEventSource(EventSource):
"""
Retrieve logs of a container/service in an app.
Name of app and id of container/service is required.
Optionally `tail_lines` and `limit_bytes` can be specified.
`tail_lines` is an option to select how many lines of logs to retrieve for the said container. It
defaults to 500. If set to `null`, it will retrieve complete logs of the container.
"""
ACCEPTS = Dict(
Int('tail_lines', default=500, validators=[Range(min_=1)], null=True),
Str('app_name', required=True),
Str('container_id', required=True),
)
RETURNS = Dict(
Str('data', required=True),
Str('timestamp', required=True, null=True)
)
def __init__(self, *args, **kwargs):
super(AppContainerLogsFollowTailEventSource, self).__init__(*args, **kwargs)
self.logs_stream = None
def validate_log_args(self, app_name, container_id):
app = self.middleware.call_sync('app.get_instance', app_name)
if app['state'] not in (AppState.CRASHED.value, AppState.RUNNING.value, AppState.DEPLOYING.value):
raise CallError(f'Unable to retrieve logs of stopped {app_name!r} app')
if not any(c['id'] == container_id for c in app['active_workloads']['container_details']):
raise CallError(f'Container "{container_id}" not found in app "{app_name}"', errno=errno.ENOENT)
def run_sync(self):
app_name = self.arg['app_name']
container_id = self.arg['container_id']
tail_lines = self.arg['tail_lines'] or 'all'
self.validate_log_args(app_name, container_id)
with get_docker_client() as docker_client:
try:
container = docker_client.containers.get(container_id)
except docker.errors.NotFound:
raise CallError(f'Container "{container_id}" not found')
self.logs_stream = container.logs(stream=True, follow=True, timestamps=True, tail=tail_lines)
for log_entry in map(bytes.decode, self.logs_stream):
# Event should contain a timestamp in RFC3339 format, we should parse it and supply it
# separately so UI can highlight the timestamp giving us a cleaner view of the logs
timestamp = log_entry.split(maxsplit=1)[0].strip()
try:
timestamp = str(parse(timestamp))
except (TypeError, ParserError):
timestamp = None
else:
log_entry = log_entry.split(maxsplit=1)[-1].lstrip()
self.send_event('ADDED', fields={'data': log_entry, 'timestamp': timestamp})
async def cancel(self):
await super().cancel()
if self.logs_stream:
await self.middleware.run_in_thread(self.logs_stream.close)
async def on_finish(self):
self.logs_stream = None
def setup(middleware):
middleware.register_event_source(
'app.container_log_follow', AppContainerLogsFollowTailEventSource, roles=['APPS_READ']
)
| 3,434 | Python | .py | 67 | 42.313433 | 108 | 0.661189 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,819 | schema_action_context.py | truenas_middleware/src/middlewared/middlewared/plugins/apps/schema_action_context.py | import os
from middlewared.service import CallError, Service
from .ix_apps.path import get_app_parent_volume_ds_name
from .utils import DatasetDefaults
class AppSchemaActions(Service):
class Config:
namespace = 'app.schema.action'
private = True
async def update_volumes(self, app_name, volumes):
app_volume_ds = get_app_parent_volume_ds_name(
(await self.middleware.call('docker.config'))['dataset'], app_name
)
user_wants = {app_volume_ds: {'properties': {}}} | {os.path.join(app_volume_ds, v['name']): v for v in volumes}
existing_datasets = {
d['id'] for d in await self.middleware.call(
'zfs.dataset.query', [['id', 'in', list(user_wants)]], {'extra': {'retrieve_properties': False}}
)
}
for create_ds in sorted(set(user_wants) - existing_datasets):
await self.middleware.call(
'zfs.dataset.create', {
'properties': user_wants[create_ds]['properties'] | DatasetDefaults.create_time_props(),
'name': create_ds, 'type': 'FILESYSTEM',
}
)
await self.middleware.call('zfs.dataset.mount', create_ds)
async def apply_acls(self, acls_to_apply):
bulk_job = await self.middleware.call(
'core.bulk', 'filesystem.add_to_acl', [[acls_to_apply[acl_path]] for acl_path in acls_to_apply],
)
await bulk_job.wait()
failures = []
for status, acl_path in zip(bulk_job.result, acls_to_apply):
if status['error']:
failures.append((acl_path, status['error']))
if failures:
err_str = 'Failed to apply ACLs to the following paths: \n'
for index, entry in enumerate(failures):
err_str += f'{index + 1}) {entry[0]}: {entry[1]}\n'
raise CallError(err_str)
| 1,921 | Python | .py | 40 | 37.35 | 119 | 0.588235 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,820 | utils.py | truenas_middleware/src/middlewared/middlewared/plugins/apps/utils.py | import os
import subprocess
from middlewared.plugins.docker.state_utils import DatasetDefaults, IX_APPS_MOUNT_PATH # noqa
PROJECT_PREFIX = 'ix-'
def get_app_stop_cache_key(app_name: str) -> str:
return f'app_stop_{app_name}'
def run(*args, **kwargs) -> subprocess.CompletedProcess:
shell = isinstance(args[0], str)
if isinstance(args[0], list):
args = tuple(args[0])
kwargs.setdefault('stdout', subprocess.PIPE)
kwargs.setdefault('stderr', subprocess.PIPE)
kwargs.setdefault('timeout', 60)
check = kwargs.pop('check', False)
env = kwargs.pop('env', None) or os.environ
proc = subprocess.Popen(
args, stdout=kwargs['stdout'], stderr=kwargs['stderr'], shell=shell,
encoding='utf8', errors='ignore', env=env,
)
stdout = ''
try:
stdout, stderr = proc.communicate(timeout=kwargs['timeout'])
except subprocess.TimeoutExpired:
proc.kill()
stderr = 'Timed out waiting for response'
proc.returncode = -1
cp = subprocess.CompletedProcess(args, proc.returncode, stdout=stdout, stderr=stderr)
if check and cp.returncode:
raise subprocess.CalledProcessError(cp.returncode, cp.args, stderr=stderr)
return cp
| 1,232 | Python | .py | 30 | 35.5 | 94 | 0.692372 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,821 | app_scale.py | truenas_middleware/src/middlewared/middlewared/plugins/apps/app_scale.py | from middlewared.schema import accepts, Str, returns
from middlewared.service import job, Service
from .compose_utils import compose_action
from .ix_apps.query import get_default_workload_values
from .utils import get_app_stop_cache_key
class AppService(Service):
class Config:
namespace = 'app'
cli_namespace = 'app'
@accepts(Str('app_name'), roles=['APPS_WRITE'])
@returns()
@job(lock=lambda args: f'app_stop_{args[0]}')
def stop(self, job, app_name):
"""
Stop `app_name` app.
"""
app_config = self.middleware.call_sync('app.get_instance', app_name)
cache_key = get_app_stop_cache_key(app_name)
try:
self.middleware.call_sync('cache.put', cache_key, True)
self.middleware.send_event(
'app.query', 'CHANGED', id=app_name,
fields=app_config | {'state': 'STOPPING', 'active_workloads': get_default_workload_values()},
)
job.set_progress(20, f'Stopping {app_name!r} app')
compose_action(
app_name, app_config['version'], 'down', remove_orphans=True, remove_images=False, remove_volumes=False,
)
job.set_progress(100, f'Stopped {app_name!r} app')
finally:
self.middleware.send_event(
'app.query', 'CHANGED', id=app_name,
fields=app_config | {'state': 'STOPPED', 'active_workloads': get_default_workload_values()},
)
self.middleware.call_sync('cache.pop', cache_key)
@accepts(Str('app_name'), roles=['APPS_WRITE'])
@returns()
@job(lock=lambda args: f'app_start_{args[0]}')
def start(self, job, app_name):
"""
Start `app_name` app.
"""
app_config = self.middleware.call_sync('app.get_instance', app_name)
job.set_progress(20, f'Starting {app_name!r} app')
compose_action(app_name, app_config['version'], 'up', force_recreate=True, remove_orphans=True)
job.set_progress(100, f'Started {app_name!r} app')
@accepts(Str('app_name'), roles=['APPS_WRITE'])
@returns()
@job(lock=lambda args: f'app_redeploy_{args[0]}')
async def redeploy(self, job, app_name):
"""
Redeploy `app_name` app.
"""
app = await self.middleware.call('app.get_instance', app_name)
return await self.middleware.call('app.update_internal', job, app, {'values': {}}, 'Redeployment')
| 2,470 | Python | .py | 55 | 36.145455 | 120 | 0.607558 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,822 | pull_images.py | truenas_middleware/src/middlewared/middlewared/plugins/apps/pull_images.py | from middlewared.plugins.apps_images.utils import normalize_reference
from middlewared.schema import accepts, Bool, Dict, List, returns, Str
from middlewared.service import job, private, Service
from .compose_utils import compose_action
class AppService(Service):
class Config:
namespace = 'app'
cli_namespace = 'app'
@accepts(Str('name'), roles=['APPS_READ'])
@returns(List('images', items=[Str('image')]))
async def outdated_docker_images(self, app_name):
"""
Returns a list of outdated docker images for the specified app `name`.
"""
app = await self.middleware.call('app.get_instance', app_name)
image_update_cache = await self.middleware.call('app.image.op.get_update_cache', True)
images = []
for image_tag in app['active_workloads']['images']:
if image_update_cache.get(normalize_reference(image_tag)['complete_tag']):
images.append(image_tag)
return images
@accepts(
Str('name'),
Dict(
'options',
Bool('redeploy', default=True),
),
roles=['APPS_WRITE']
)
@returns()
@job(lock=lambda args: f'pull_images_{args[0]}')
def pull_images(self, job, app_name, options):
"""
Pulls docker images for the specified app `name`.
"""
app = self.middleware.call_sync('app.get_instance', app_name)
return self.pull_images_internal(app_name, app, options, job)
@private
def pull_images_internal(self, app_name, app, options, job=None):
job = job or type('dummy_job', (object,), {'set_progress': lambda *args: None})()
job.set_progress(20, 'Pulling app images')
compose_action(app_name, app['version'], action='pull')
job.set_progress(80 if options['redeploy'] else 100, 'Images pulled successfully')
if options['redeploy']:
self.middleware.call_sync('app.redeploy', app_name).wait_sync(raise_error=True)
job.set_progress(100, 'App redeployed successfully')
| 2,069 | Python | .py | 46 | 37 | 94 | 0.64002 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,823 | metadata.py | truenas_middleware/src/middlewared/middlewared/plugins/apps/metadata.py | import os
import yaml
from middlewared.service import job, Service
from .ix_apps.lifecycle import get_current_app_config
from .ix_apps.metadata import get_app_metadata
from .ix_apps.path import get_app_parent_config_path, get_collective_config_path, get_collective_metadata_path
class AppMetadataService(Service):
class Config:
namespace = 'app.metadata'
private = True
@job(lock='app_metadata_generate', lock_queue_size=1)
def generate(self, job, blacklisted_apps=None):
config = {}
metadata = {}
blacklisted_apps = blacklisted_apps or []
with os.scandir(get_app_parent_config_path()) as scan:
for entry in filter(lambda e: e.name not in blacklisted_apps and e.is_dir(), scan):
if not (app_metadata := get_app_metadata(entry.name)):
# The app is malformed or something is seriously wrong with it
continue
metadata[entry.name] = app_metadata
config[entry.name] = get_current_app_config(entry.name, app_metadata['version'])
with open(get_collective_metadata_path(), 'w') as f:
f.write(yaml.safe_dump(metadata))
with open(get_collective_config_path(), 'w') as f:
f.write(yaml.safe_dump(config))
job.set_progress(100, 'Updated metadata configuration for apps')
| 1,376 | Python | .py | 27 | 41.888889 | 110 | 0.660941 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,824 | ix_volumes.py | truenas_middleware/src/middlewared/middlewared/plugins/apps/ix_volumes.py | import collections
from middlewared.schema import accepts, Bool, Dict, returns, Str
from middlewared.service import filterable, filterable_returns, Service
from middlewared.utils import filter_list
from .ix_apps.path import get_app_mounts_ds
class AppsIxVolumeService(Service):
class Config:
namespace = 'app.ix_volume'
event_send = False
cli_namespace = 'app.ix_volume'
@filterable(roles=['APPS_READ'])
@filterable_returns(Dict(
'ix-volumes_query',
Str('app_name'),
Str('name'),
additional_attrs=True,
))
async def query(self, filters, options):
"""
Query ix-volumes with `filters` and `options`.
"""
if not await self.middleware.call('docker.state.validate', False):
return filter_list([], filters, options)
docker_ds = (await self.middleware.call('docker.config'))['dataset']
datasets = await self.middleware.call(
'zfs.dataset.query', [['id', '^', f'{get_app_mounts_ds(docker_ds)}/']], {
'extra': {'retrieve_properties': False, 'flat': True}
}
)
apps = collections.defaultdict(list)
for ds_name in filter(lambda d: d.count('/') > 3, map(lambda d: d['id'], datasets)):
name_split = ds_name.split('/', 4)
apps[name_split[3]].append(name_split[-1])
volumes = []
for app, app_volumes in apps.items():
for volume in app_volumes:
volumes.append({
'name': volume,
'app_name': app,
})
return filter_list(volumes, filters, options)
@accepts(Str('app_name'))
@returns(Bool('ix_volumes_exist'))
async def exists(self, app_name):
"""
Check if ix-volumes exist for `app_name`.
"""
return bool(await self.query([['app_name', '=', app_name]]))
| 1,919 | Python | .py | 48 | 30.9375 | 92 | 0.587319 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,825 | events.py | truenas_middleware/src/middlewared/middlewared/plugins/apps/events.py | from middlewared.service import Service
from .ix_apps.utils import get_app_name_from_project_name
from .utils import get_app_stop_cache_key
PROCESSING_APP_EVENT = set()
class AppEvents(Service):
class Config:
namespace = 'app.events'
private = True
async def process(self, app_name, container_event):
cache_key = get_app_stop_cache_key(app_name)
if (app := await self.middleware.call('app.query', [['id', '=', app_name]])) and not await self.middleware.call(
'cache.has_key', cache_key
):
self.middleware.send_event(
'app.query', 'CHANGED', id=app_name, fields=app[0],
)
async def app_event(middleware, event_type, args):
app_name = get_app_name_from_project_name(args['id'])
if app_name in PROCESSING_APP_EVENT:
return
PROCESSING_APP_EVENT.add(app_name)
try:
await middleware.call('app.events.process', app_name, args['fields'])
except Exception as e:
middleware.logger.warning('Unhandled exception: %s', e)
finally:
PROCESSING_APP_EVENT.remove(app_name)
async def setup(middleware):
middleware.event_subscribe('docker.events', app_event)
| 1,213 | Python | .py | 29 | 34.862069 | 120 | 0.666097 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,826 | compose_utils.py | truenas_middleware/src/middlewared/middlewared/plugins/apps/compose_utils.py | import itertools
import logging
import typing
from middlewared.service_exception import CallError
from .ix_apps.lifecycle import get_rendered_templates_of_app
from .utils import PROJECT_PREFIX, run
logger = logging.getLogger('app_lifecycle')
def compose_action(
app_name: str, app_version: str, action: typing.Literal['up', 'down', 'pull'], *,
force_recreate: bool = False, remove_orphans: bool = False, remove_images: bool = False,
remove_volumes: bool = False, pull_images: bool = False,
):
compose_files = list(itertools.chain(
*[('-f', item) for item in get_rendered_templates_of_app(app_name, app_version)]
))
if not compose_files:
raise CallError(f'No compose files found for app {app_name!r}')
args = ['-p', f'{PROJECT_PREFIX}{app_name}', action]
if action == 'up':
args.append('-d')
if force_recreate:
args.append('--force-recreate')
# This needs to happen because --force-recreate doesn't recreate docker networks
# So for example, an app was running and then system has been rebooted - the docker network
# remains there but the relevant interfaces it created do not and if the app didn't had a restart
# policy of always, when attempting to start the app again - it will fail because the network
# is not recreated with compose up action and we need an explicit down
compose_action(app_name, app_version, 'down', remove_orphans=True)
if remove_orphans:
args.append('--remove-orphans')
if pull_images:
args.append('--pull=always')
elif action == 'down':
if remove_orphans:
args.append('--remove-orphans')
if remove_images:
args.extend(['--rmi', 'all'])
if remove_volumes:
args.append('-v')
elif action == 'pull':
args.extend(['--policy', 'always'])
else:
raise CallError(f'Invalid action {action!r} for app {app_name!r}')
# TODO: We will likely have a configurable timeout on this end
cp = run(['docker', 'compose'] + compose_files + args, timeout=1200)
if cp.returncode != 0:
logger.error('Failed %r action for %r app: %s', action, app_name, cp.stderr)
raise CallError(
f'Failed {action!r} action for {app_name!r} app, please check /var/log/app_lifecycle.log for more details'
)
| 2,423 | Python | .py | 50 | 40.8 | 118 | 0.648477 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,827 | port_attachments.py | truenas_middleware/src/middlewared/middlewared/plugins/apps/port_attachments.py | from middlewared.common.ports import PortDelegate
class AppPortDelegate(PortDelegate):
name = 'applications'
namespace = 'app'
title = 'Applications'
async def get_ports(self):
ports = []
for app in filter(
lambda a: a['active_workloads']['used_ports'],
await self.middleware.call('app.query')
):
app_ports = []
for port_entry in app['active_workloads']['used_ports']:
for host_port in port_entry['host_ports']:
app_ports.append(('0.0.0.0', host_port['host_port']))
ports.append({
'description': f'{app["id"]!r} application',
'ports': app_ports,
})
return ports
async def setup(middleware):
await middleware.call('port.register_attachment_delegate', AppPortDelegate(middleware))
| 879 | Python | .py | 22 | 29.863636 | 91 | 0.586572 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,828 | stats.py | truenas_middleware/src/middlewared/middlewared/plugins/apps/stats.py | import time
from middlewared.event import EventSource
from middlewared.plugins.docker.state_utils import Status
from middlewared.schema import Dict, Int, Str, List
from middlewared.service import CallError
from middlewared.validators import Range
from .ix_apps.docker.stats import list_resources_stats_by_project
from .stats_util import normalize_projects_stats
class AppStatsEventSource(EventSource):
"""
Retrieve statistics of apps.
"""
ACCEPTS = Dict(
Int('interval', default=2, validators=[Range(min_=2)]),
)
RETURNS = List(
'apps_stats',
items=[
Dict(
'stats',
Str('app_name'),
Int('cpu_usage', description='Percentage of cpu used by an app'),
Int('memory', description='Current memory(in bytes) used by an app'),
List(
'networks',
items=[
Dict(
'interface_stats',
Str('interface_name', description='Name of the interface use by the app'),
Int('rx_bytes', description='Received bytes/s by an interface'),
Int('tx_bytes', description='Transmitted bytes/s by an interface')
),
]
),
Dict(
'blkio',
Int('read', description='Blkio read bytes'),
Int('write', description='Blkio write bytes')
)
)
]
)
def run_sync(self):
if not self.middleware.call_sync('docker.state.validate', False):
raise CallError('Apps are not available')
old_projects_stats = list_resources_stats_by_project()
interval = self.arg['interval']
time.sleep(interval)
while not self._cancel_sync.is_set():
try:
project_stats = list_resources_stats_by_project()
self.send_event(
'ADDED', fields=normalize_projects_stats(project_stats, old_projects_stats, interval)
)
old_projects_stats = project_stats
time.sleep(interval)
except Exception:
if self.middleware.call_sync('docker.status')['status'] != Status.RUNNING.value:
return
raise
def setup(middleware):
middleware.register_event_source('app.stats', AppStatsEventSource, roles=['APPS_READ'])
| 2,552 | Python | .py | 62 | 28.225806 | 105 | 0.553672 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,829 | rollback.py | truenas_middleware/src/middlewared/middlewared/plugins/apps/rollback.py | from middlewared.schema import accepts, Bool, Dict, List, Ref, returns, Str
from middlewared.service import job, Service, ValidationErrors
from .compose_utils import compose_action
from .ix_apps.lifecycle import add_context_to_values, get_current_app_config, update_app_config
from .ix_apps.metadata import update_app_metadata
from .ix_apps.path import get_installed_app_path, get_installed_app_version_path
from .ix_apps.rollback import clean_newer_versions, get_rollback_versions
class AppService(Service):
class Config:
namespace = 'app'
cli_namespace = 'app'
@accepts(
Str('app_name'),
Dict(
'options',
Str('app_version', empty=False, required=True),
Bool('rollback_snapshot', default=True),
),
roles=['APPS_WRITE'],
)
@returns(Ref('app_entry'))
@job(lock=lambda args: f'app_rollback_{args[0]}')
def rollback(self, job, app_name, options):
"""
Rollback `app_name` app to previous version.
"""
app = self.middleware.call_sync('app.get_instance', app_name)
verrors = ValidationErrors()
if options['app_version'] == app['version']:
verrors.add('options.app_version', 'Cannot rollback to same version')
elif options['app_version'] not in get_rollback_versions(app_name, app['version']):
verrors.add('options.app_version', 'Specified version is not available for rollback')
verrors.check()
rollback_version = self.middleware.call_sync(
'catalog.app_version_details', get_installed_app_version_path(app_name, options['app_version'])
)
config = get_current_app_config(app_name, options['app_version'])
new_values = self.middleware.call_sync(
'app.schema.normalize_and_validate_values', rollback_version, config, False,
get_installed_app_path(app_name), app,
)
new_values = add_context_to_values(app_name, new_values, rollback_version['app_metadata'], rollback=True)
update_app_config(app_name, options['app_version'], new_values)
job.set_progress(
20, f'Completed validation for {app_name!r} app rollback to {options["app_version"]!r} version'
)
# Rollback steps would be
# 1) Config should be updated
# 2) Compose files should be rendered
# 3) Metadata should be updated to reflect new version
# 4) Docker should be notified to recreate resources and to let rollback commence
# 5) Roll back ix_volume dataset's snapshots if available
# 6) Finally update collective metadata config to reflect new version
update_app_metadata(app_name, rollback_version)
self.middleware.send_event(
'app.query', 'CHANGED', id=app_name, fields=self.middleware.call_sync('app.get_instance', app_name)
)
try:
if options['rollback_snapshot'] and (
app_volume_ds := self.middleware.call_sync('app.get_app_volume_ds', app_name)
):
snap_name = f'{app_volume_ds}@{options["app_version"]}'
if self.middleware.call_sync('zfs.snapshot.query', [['id', '=', snap_name]]):
job.set_progress(40, f'Rolling back {app_name!r} app to {options["app_version"]!r} version')
self.middleware.call_sync(
'zfs.snapshot.rollback', snap_name, {
'force': True,
'recursive': True,
'recursive_clones': True,
'recursive_rollback': True,
}
)
compose_action(app_name, options['app_version'], 'up', force_recreate=True, remove_orphans=True)
finally:
self.middleware.call_sync('app.metadata.generate').wait_sync(raise_error=True)
clean_newer_versions(app_name, options['app_version'])
job.set_progress(100, f'Rollback completed for {app_name!r} app to {options["app_version"]!r} version')
return self.middleware.call_sync('app.get_instance', app_name)
@accepts(Str('app_name'), roles=['APPS_READ'])
@returns(List('rollback_versions', items=[Str('version')]))
def rollback_versions(self, app_name):
"""
Retrieve versions available for rollback for `app_name` app.
"""
app = self.middleware.call_sync('app.get_instance', app_name)
return get_rollback_versions(app_name, app['version'])
| 4,561 | Python | .py | 86 | 42.337209 | 113 | 0.624524 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,830 | schema_utils.py | truenas_middleware/src/middlewared/middlewared/plugins/apps/schema_utils.py | import itertools
from middlewared.service import ValidationErrors
from middlewared.schema import (
Attribute, Bool, Cron, Dict, Dir, File, HostPath, Int, IPAddr, List, NOT_PROVIDED, Path, Str, URI,
)
from middlewared.validators import Match, Range, validate_schema
CONTEXT_KEY_NAME = 'ix_context'
RESERVED_NAMES = [
('ix_certificates', dict),
('ix_certificate_authorities', dict),
('ix_volumes', dict),
(CONTEXT_KEY_NAME, dict),
]
SCHEMA_MAPPING = {
'string': Str,
'int': Int,
'boolean': Bool,
'path': Path,
# to support large text / toml data of upto 1MiB
'text': lambda *args, **kwargs: Str(*args, **kwargs, max_length=1024 * 1024),
'hostpath': HostPath,
'hostpathdirectory': Dir,
'hostpathfile': File,
'list': List,
'dict': Dict,
'ipaddr': IPAddr,
'cron': Cron,
'uri': URI,
}
def construct_schema(
item_version_details: dict, new_values: dict, update: bool, old_values: dict | object = NOT_PROVIDED
) -> dict:
schema_name = f'app_{"update" if update else "create"}'
attrs = list(itertools.chain.from_iterable(
get_schema(q, False, old_values) for q in item_version_details['schema']['questions']
))
dict_obj = update_conditional_defaults(
Dict(schema_name, *attrs, update=False, additional_attrs=True), {
'schema': {'attrs': item_version_details['schema']['questions']}
}
)
verrors = ValidationErrors()
verrors.add_child('values', validate_schema(
attrs, new_values, True, dict_kwargs={
'conditional_defaults': dict_obj.conditional_defaults, 'update': False,
}
))
return {
'verrors': verrors,
'new_values': new_values,
'dict_obj': dict_obj,
'schema_name': schema_name,
}
def update_conditional_defaults(dict_obj: Dict, variable_details: dict) -> Dict:
schema = variable_details['schema']
for var in filter(lambda k: any(c in k['schema'] for c in ('show_subquestions_if', 'show_if')), schema['attrs']):
var_schema = var['schema']
attrs = []
filters = []
if 'show_subquestions_if' in var_schema:
filters.append([var['variable'], '=', var_schema['show_subquestions_if']])
attrs.extend([a['variable'] for a in var_schema['subquestions']])
if 'show_if' in var_schema:
filters.extend(var_schema['show_if'])
attrs.append(var['variable'])
dict_obj.conditional_defaults[var['variable']] = {'filters': filters, 'attrs': attrs}
return dict_obj
def get_schema(variable_details: dict, update: bool, existing: dict | object = NOT_PROVIDED) -> list:
schema_details = variable_details['schema']
schema_class = SCHEMA_MAPPING[schema_details['type']]
cur_val = existing.get(variable_details['variable'], NOT_PROVIDED) if isinstance(existing, dict) else NOT_PROVIDED
# Validation is ensured at chart level to ensure that we don't have enum for say boolean
obj_kwargs = {k: schema_details[k] for k in filter(
lambda k: k in schema_details,
('required', 'default', 'private', 'ipv4', 'ipv6', 'cidr', 'null', 'additional_attrs', 'editable', 'empty')
)}
if schema_details.get('immutable') and cur_val is not NOT_PROVIDED:
obj_kwargs['default'] = cur_val
obj_kwargs['editable'] = False
if schema_class not in (Cron, Dict):
obj = schema_class(variable_details['variable'], **obj_kwargs)
else:
obj = schema_class(
variable_details['variable'],
*list(itertools.chain.from_iterable(
get_schema(var, update, cur_val or NOT_PROVIDED) for var in schema_details.get('attrs', [])
)),
update=update, **obj_kwargs
)
if schema_class == Dict:
obj = update_conditional_defaults(obj, variable_details)
result = []
obj.ref = schema_details.get('$ref', [])
if schema_class in (Str, Int):
range_vars = ['min', 'max'] if schema_class == Int else ['min_length', 'max_length']
range_args = {f'{k}_': schema_details[v] for k, v in zip(['min', 'max'], range_vars) if schema_details.get(v)}
if range_args:
obj.validators.append(Range(**range_args))
if 'enum' in schema_details:
obj.enum = [v['value'] for v in schema_details['enum']]
if schema_class == Str:
if range_args.get('max_'):
# This needs to be done as string schema has built in support for max length as
# well apart from the range validator we add
obj.max_length = range_args['max_']
if 'valid_chars' in schema_details:
obj.validators.append(Match(
schema_details['valid_chars'], explanation=schema_details.get('valid_chars_error')
))
if schema_class == List:
obj.items = list(itertools.chain.from_iterable(get_schema(i, update) for i in schema_details['items']))
elif 'subquestions' in schema_details:
result.extend(list(itertools.chain.from_iterable(
get_schema(i, update, existing) for i in schema_details['subquestions']
)))
result.insert(0, obj)
return result
def get_list_item_from_value(value: list, question_attr: List) -> tuple[int, Attribute]:
for index, attr in enumerate(question_attr.items):
try:
attr.validate(value)
except ValidationErrors:
pass
else:
return index, attr
| 5,544 | Python | .py | 125 | 36.632 | 118 | 0.626135 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,831 | version_utils.py | truenas_middleware/src/middlewared/middlewared/plugins/apps/version_utils.py | import errno
from pkg_resources import parse_version
from middlewared.service import CallError
def get_latest_version_from_app_versions(app_versions: dict) -> str:
if not app_versions:
raise CallError('No versions found', errno=errno.ENOENT)
elif all(not app_version['healthy'] for app_version in app_versions.values()):
raise CallError('No healthy app version found', errno=errno.ENOENT)
return str(sorted(map(parse_version, app_versions) or ['latest'], reverse=True)[0])
| 506 | Python | .py | 9 | 51.555556 | 87 | 0.75 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,832 | setup.py | truenas_middleware/src/middlewared/middlewared/plugins/apps/ix_apps/setup.py | import os
import shutil
import textwrap
import yaml
from middlewared.utils.io import write_if_changed
from .metadata import update_app_yaml_for_last_update
from .path import get_app_parent_config_path, get_installed_app_version_path
def setup_install_app_dir(app_name: str, app_version_details: dict, custom_app: bool = False):
os.makedirs(os.path.join(get_app_parent_config_path(), app_name, 'versions'), exist_ok=True)
to_install_app_version = os.path.basename(app_version_details['version'])
destination = get_installed_app_version_path(app_name, to_install_app_version)
if custom_app:
# TODO: See if it makes sense to creat a dummy app on apps side instead
os.makedirs(os.path.join(destination, 'templates/rendered'), exist_ok=True)
with open(os.path.join(destination, 'README.md'), 'w') as f:
f.write(textwrap.dedent('''
# Custom App
This is a custom app where user can use his/her own docker compose file for deploying services.
'''))
f.flush()
write_if_changed(
os.path.join(destination, 'app.yaml'),
yaml.safe_dump(app_version_details['app_metadata']),
perms=0o600,
raise_error=False
)
else:
shutil.copytree(app_version_details['location'], destination)
update_app_yaml_for_last_update(destination, app_version_details['last_update'])
| 1,429 | Python | .py | 29 | 41.862069 | 107 | 0.682927 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,833 | portals.py | truenas_middleware/src/middlewared/middlewared/plugins/apps/ix_apps/portals.py | from apps_validation.portals import IX_NOTES_KEY, IX_PORTAL_KEY, validate_portals_and_notes, ValidationErrors
from .lifecycle import get_rendered_template_config_of_app
def normalized_port_value(scheme: str, port: int) -> str:
return '' if ((scheme == 'http' and port == 80) or (scheme == 'https' and port == 443)) else f':{port}'
def get_portals_and_app_notes(app_name: str, version: str) -> dict:
rendered_config = get_rendered_template_config_of_app(app_name, version)
portal_and_notes_config = {
k: rendered_config[k]
for k in (IX_NOTES_KEY, IX_PORTAL_KEY)
if k in rendered_config
}
config = {
'portals': {},
'notes': None,
}
if portal_and_notes_config:
try:
validate_portals_and_notes('portal', portal_and_notes_config)
except ValidationErrors:
return config
portals = {}
for portal in portal_and_notes_config.get(IX_PORTAL_KEY, []):
port_value = normalized_port_value(portal['scheme'], portal['port'])
portals[portal['name']] = f'{portal["scheme"]}://{portal["host"]}{port_value}{portal.get("path", "")}'
return {
'portals': portals,
'notes': portal_and_notes_config.get(IX_NOTES_KEY),
}
| 1,257 | Python | .py | 28 | 38.071429 | 110 | 0.638298 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,834 | path.py | truenas_middleware/src/middlewared/middlewared/plugins/apps/ix_apps/path.py | import os
from .utils import IX_APPS_MOUNT_PATH
def get_collective_config_path() -> str:
return os.path.join(IX_APPS_MOUNT_PATH, 'user_config.yaml')
def get_collective_metadata_path() -> str:
return os.path.join(IX_APPS_MOUNT_PATH, 'metadata.yaml')
def get_app_mounts_ds(docker_ds: str) -> str:
return os.path.join(docker_ds, 'app_mounts')
def get_app_parent_volume_ds(docker_ds: str, app_name: str) -> str:
return os.path.join(get_app_mounts_ds(docker_ds), app_name)
def get_app_parent_config_path() -> str:
return os.path.join(IX_APPS_MOUNT_PATH, 'app_configs')
def get_app_parent_volume_ds_name(docker_ds: str, app_name: str) -> str:
return os.path.join(docker_ds, 'app_mounts', app_name)
def get_app_parent_volume_path() -> str:
return os.path.join(IX_APPS_MOUNT_PATH, 'app_mounts')
def get_app_volume_path(app_name: str) -> str:
return os.path.join(get_app_parent_volume_path(), app_name)
def get_installed_app_path(app_name: str) -> str:
return os.path.join(get_app_parent_config_path(), app_name)
def get_installed_app_metadata_path(app_name: str) -> str:
return os.path.join(get_installed_app_path(app_name), 'metadata.yaml')
def get_installed_app_versions_dir_path(app_name: str) -> str:
return os.path.join(get_installed_app_path(app_name), 'versions')
def get_installed_app_version_path(app_name: str, version: str) -> str:
return os.path.join(get_installed_app_versions_dir_path(app_name), version)
def get_installed_app_config_path(app_name: str, version: str) -> str:
return os.path.join(get_installed_app_version_path(app_name, version), 'user_config.yaml')
def get_installed_custom_app_compose_file(app_name: str, version: str) -> str:
return os.path.join(get_installed_app_rendered_dir_path(app_name, version), 'docker-compose.yaml')
def get_installed_app_rendered_dir_path(app_name: str, version: str) -> str:
return os.path.join(get_installed_app_version_path(app_name, version), 'templates/rendered')
| 2,012 | Python | .py | 32 | 59.03125 | 102 | 0.72704 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,835 | upgrade.py | truenas_middleware/src/middlewared/middlewared/plugins/apps/ix_apps/upgrade.py | import contextlib
import shutil
from .metadata import update_app_metadata, update_app_yaml_for_last_update
from .path import get_installed_app_version_path
@contextlib.contextmanager
def upgrade_config(app_name: str, upgrade_version: dict):
version_path = get_installed_app_version_path(app_name, upgrade_version['version'])
shutil.rmtree(version_path, ignore_errors=True)
shutil.copytree(upgrade_version['location'], version_path)
update_app_yaml_for_last_update(version_path, upgrade_version['last_update'])
try:
yield version_path
except Exception:
shutil.rmtree(version_path, ignore_errors=True)
raise
else:
update_app_metadata(app_name, upgrade_version)
| 722 | Python | .py | 17 | 37.764706 | 87 | 0.753561 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,836 | lifecycle.py | truenas_middleware/src/middlewared/middlewared/plugins/apps/ix_apps/lifecycle.py | import copy
import contextlib
import pathlib
import typing
import yaml
from middlewared.service_exception import CallError
from middlewared.utils.io import write_if_changed
from .path import (
get_installed_app_config_path, get_installed_app_rendered_dir_path, get_installed_app_version_path,
get_installed_custom_app_compose_file,
)
from .utils import CONTEXT_KEY_NAME, run
def get_rendered_template_config_of_app(app_name: str, version: str) -> dict:
rendered_config = {}
for rendered_file in get_rendered_templates_of_app(app_name, version):
with contextlib.suppress(FileNotFoundError, yaml.YAMLError):
with open(rendered_file, 'r') as f:
if (data := yaml.safe_load(f)) is not None:
rendered_config.update(data)
return rendered_config
def get_rendered_templates_of_app(app_name: str, version: str) -> list[str]:
result = []
for entry in pathlib.Path(get_installed_app_rendered_dir_path(app_name, version)).iterdir():
if entry.is_file() and entry.name.endswith('.yaml'):
result.append(entry.as_posix())
return result
def write_new_app_config(app_name: str, version: str, values: dict[str, typing.Any]) -> None:
app_config_path = get_installed_app_config_path(app_name, version)
write_if_changed(app_config_path, yaml.safe_dump(values), perms=0o600, raise_error=False)
def get_current_app_config(app_name: str, version: str) -> dict:
with open(get_installed_app_config_path(app_name, version), 'r') as f:
return yaml.safe_load(f) or {}
def render_compose_templates(app_version_path: str, values_file_path: str):
cp = run(['/usr/bin/apps_render_app', 'render', '--path', app_version_path, '--values', values_file_path])
if cp.returncode != 0:
# FIXME: We probably want to log app related issues to it's own logging file so as to not spam middleware
raise CallError(f'Failed to render compose templates: {cp.stderr}')
def update_app_config(app_name: str, version: str, values: dict[str, typing.Any], custom_app: bool = False) -> None:
write_new_app_config(app_name, version, values)
if custom_app:
compose_file_path = get_installed_custom_app_compose_file(app_name, version)
write_if_changed(compose_file_path, yaml.safe_dump(values), perms=0o600, raise_error=False)
else:
render_compose_templates(
get_installed_app_version_path(app_name, version), get_installed_app_config_path(app_name, version)
)
def get_action_context(app_name: str) -> dict[str, typing.Any]:
# TODO: See what needs to be added/removed here
return copy.deepcopy({
'operation': None,
'is_install': False,
'is_rollback': False,
'is_update': False,
'is_upgrade': False,
'upgrade_metadata': {},
'app_name': app_name,
'app_metadata': {},
})
def add_context_to_values(
app_name: str, values: dict[str, typing.Any], app_metadata: dict, *, install: bool = False, update: bool = False,
upgrade: bool = False, upgrade_metadata: dict[str, typing.Any] = None, rollback: bool = False,
) -> dict[str, typing.Any]:
assert install or update or upgrade or rollback, 'At least one of install, update, rollback or upgrade must be True'
assert sum([install, rollback, update, upgrade]) <= 1, 'Only one of install, update, or upgrade can be True.'
if upgrade:
assert upgrade_metadata is not None, 'upgrade_metadata must be specified if upgrade is True.'
action_context = get_action_context(app_name)
operation_map = {
'INSTALL': install,
'ROLLBACK': rollback,
'UPDATE': update,
'UPGRADE': upgrade,
}
for operation, _ in filter(lambda i: i[1], operation_map.items()):
action_context.update({
'operation': operation,
'app_metadata': app_metadata,
f'is_{operation.lower()}': True,
**({'upgrade_metadata': upgrade_metadata} if operation == 'UPGRADE' else {})
})
values[CONTEXT_KEY_NAME] = action_context
return values
| 4,120 | Python | .py | 82 | 43.682927 | 120 | 0.675635 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,837 | utils.py | truenas_middleware/src/middlewared/middlewared/plugins/apps/ix_apps/utils.py | import enum
from catalog_reader.library import RE_VERSION # noqa
from middlewared.plugins.apps_images.utils import normalize_reference # noqa
from middlewared.plugins.apps.schema_utils import CONTEXT_KEY_NAME # noqa
from middlewared.plugins.apps.utils import IX_APPS_MOUNT_PATH, PROJECT_PREFIX, run # noqa
class AppState(enum.Enum):
CRASHED = 'CRASHED'
DEPLOYING = 'DEPLOYING'
RUNNING = 'RUNNING'
STOPPED = 'STOPPED'
STOPPING = 'STOPPING'
class ContainerState(enum.Enum):
CRASHED = 'crashed'
CREATED = 'created'
EXITED = 'exited'
RUNNING = 'running'
STARTING = 'starting'
def get_app_name_from_project_name(project_name: str) -> str:
return project_name[len(PROJECT_PREFIX):]
| 731 | Python | .py | 19 | 34.789474 | 90 | 0.741844 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,838 | metadata.py | truenas_middleware/src/middlewared/middlewared/plugins/apps/ix_apps/metadata.py | import os
import typing
import yaml
from middlewared.utils.io import write_if_changed
from .path import get_collective_config_path, get_collective_metadata_path, get_installed_app_metadata_path
from .portals import get_portals_and_app_notes
def _load_app_yaml(yaml_path: str) -> dict[str, typing.Any]:
""" wrapper around yaml.safe_load that ensure dict always returned """
try:
with open(yaml_path, 'r') as f:
if (data := yaml.safe_load(f)) is None:
# yaml.safe_load may return None if file empty
return {}
return data
except (FileNotFoundError, yaml.YAMLError):
return {}
def get_app_metadata(app_name: str) -> dict[str, typing.Any]:
return _load_app_yaml(get_installed_app_metadata_path(app_name))
def update_app_metadata(
app_name: str, app_version_details: dict, migrated: bool | None = None, custom_app: bool = False,
):
migrated = get_app_metadata(app_name).get('migrated', False) if migrated is None else migrated
write_if_changed(get_installed_app_metadata_path(app_name), yaml.safe_dump({
'metadata': app_version_details['app_metadata'],
'migrated': migrated,
'custom_app': custom_app,
**{k: app_version_details[k] for k in ('version', 'human_version')},
**get_portals_and_app_notes(app_name, app_version_details['version']),
# TODO: We should not try to get portals for custom apps for now
}), perms=0o600, raise_error=False)
def update_app_metadata_for_portals(app_name: str, version: str):
# This should be called after config of app has been updated as that will render compose files
app_metadata = get_app_metadata(app_name)
# Using write_if_changed ensures atomicity of the write via writing to a temporary
# file then renaming over existing one.
write_if_changed(get_installed_app_metadata_path(app_name), yaml.safe_dump({
**app_metadata,
**get_portals_and_app_notes(app_name, version),
}), perms=0o600, raise_error=False)
def get_collective_config() -> dict[str, dict]:
return _load_app_yaml(get_collective_config_path())
def get_collective_metadata() -> dict[str, dict]:
return _load_app_yaml(get_collective_metadata_path())
def update_app_yaml_for_last_update(version_path: str, last_update: str):
app_yaml_path = os.path.join(version_path, 'app.yaml')
app_config = _load_app_yaml(app_yaml_path)
app_config['last_update'] = last_update
write_if_changed(app_yaml_path, yaml.safe_dump(app_config), perms=0o600, raise_error=False)
| 2,611 | Python | .py | 48 | 47.895833 | 107 | 0.689737 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,839 | query.py | truenas_middleware/src/middlewared/middlewared/plugins/apps/ix_apps/query.py | import os
from collections import defaultdict
from dataclasses import dataclass
from pkg_resources import parse_version
from .docker.query import list_resources_by_project
from .metadata import get_collective_config, get_collective_metadata
from .lifecycle import get_current_app_config
from .path import get_app_parent_config_path
from .utils import AppState, ContainerState, get_app_name_from_project_name, normalize_reference, PROJECT_PREFIX
COMPOSE_SERVICE_KEY: str = 'com.docker.compose.service'
@dataclass(frozen=True, eq=True)
class VolumeMount:
source: str
destination: str
mode: str
type: str
def __hash__(self):
return hash((self.source, self.destination, self.type))
def upgrade_available_for_app(
version_mapping: dict[str, dict[str, dict[str, str]]], app_metadata: dict, image_updates_available: bool = False,
) -> bool:
# TODO: Eventually we would want this to work as well but this will always require middleware changes
# depending on what new functionality we want introduced for custom app, so let's take care of this at that point
catalog_app_metadata = app_metadata['metadata']
if app_metadata['custom_app'] is False and version_mapping.get(
catalog_app_metadata['train'], {}
).get(catalog_app_metadata['name']):
return parse_version(catalog_app_metadata['version']) < parse_version(
version_mapping[catalog_app_metadata['train']][catalog_app_metadata['name']]['version']
)
elif app_metadata['custom_app'] and image_updates_available:
return True
else:
return False
def normalize_portal_uri(portal_uri: str, host_ip: str | None) -> str:
if not host_ip or '0.0.0.0' not in portal_uri:
return portal_uri
return portal_uri.replace('0.0.0.0', host_ip)
def get_config_of_app(app_data: dict, collective_config: dict, retrieve_config: bool) -> dict:
return {
'config': collective_config.get(app_data['name']) or (
get_current_app_config(app_data['name'], app_data['version']) if app_data['version'] else {}
)
} if retrieve_config else {}
def normalize_portal_uris(portals: dict[str, str], host_ip: str | None) -> dict[str, str]:
return {name: normalize_portal_uri(uri, host_ip) for name, uri in portals.items()}
def list_apps(
train_to_apps_version_mapping: dict[str, dict[str, dict[str, str]]],
specific_app: str | None = None,
host_ip: str | None = None,
retrieve_config: bool = False,
image_update_cache: dict | None = None,
) -> list[dict]:
apps = []
image_update_cache = image_update_cache or {}
app_names = set()
metadata = get_collective_metadata()
collective_config = get_collective_config() if retrieve_config else {}
# This will only give us apps which are running or in deploying state
for app_name, app_resources in list_resources_by_project(
project_name=f'{PROJECT_PREFIX}{specific_app}' if specific_app else None,
).items():
app_name = get_app_name_from_project_name(app_name)
app_names.add(app_name)
if app_name not in metadata:
# The app is malformed or something is seriously wrong with it
continue
workloads = translate_resources_to_desired_workflow(app_resources)
# When we stop docker service and start it again - the containers can be in exited
# state which means we need to account for this.
state = AppState.STOPPED
workload_stats = defaultdict(int)
workloads_len = len(workloads['container_details'])
for container in workloads['container_details']:
workload_stats[container['state']] += 1
if workload_stats[ContainerState.CRASHED.value]:
state = AppState.CRASHED
elif workload_stats[ContainerState.CREATED.value] or workload_stats[ContainerState.STARTING.value]:
state = AppState.DEPLOYING
elif 0 < workloads_len == sum(
workload_stats[k.value] for k in (ContainerState.RUNNING, ContainerState.EXITED)
) and workload_stats[ContainerState.RUNNING.value]:
state = AppState.RUNNING
state = state.value
app_metadata = metadata[app_name]
active_workloads = get_default_workload_values() if state == 'STOPPED' else workloads
image_updates_available = any(
image_update_cache.get(normalize_reference(k)['complete_tag']) for k in active_workloads['images']
)
app_data = {
'name': app_name,
'id': app_name,
'active_workloads': active_workloads,
'state': state,
'upgrade_available': upgrade_available_for_app(train_to_apps_version_mapping, app_metadata),
'image_updates_available': image_updates_available,
**app_metadata | {'portals': normalize_portal_uris(app_metadata['portals'], host_ip)}
}
apps.append(app_data | get_config_of_app(app_data, collective_config, retrieve_config))
if specific_app and specific_app in app_names:
return apps
# We should now retrieve apps which are in stopped state
with os.scandir(get_app_parent_config_path()) as scan:
for entry in filter(
lambda e: e.is_dir() and ((specific_app and e.name == specific_app) or e.name not in app_names), scan
):
app_names.add(entry.name)
if entry.name not in metadata:
# The app is malformed or something is seriously wrong with it
continue
app_metadata = metadata[entry.name]
app_data = {
'name': entry.name,
'id': entry.name,
'active_workloads': get_default_workload_values(),
'state': AppState.STOPPED.value,
'upgrade_available': upgrade_available_for_app(train_to_apps_version_mapping, app_metadata),
'image_updates_available': False,
**app_metadata | {'portals': normalize_portal_uris(app_metadata['portals'], host_ip)}
}
apps.append(app_data | get_config_of_app(app_data, collective_config, retrieve_config))
return apps
def get_default_workload_values() -> dict:
return {
'containers': 0,
'used_ports': [],
'container_details': [], # This would contain service name and image in use
'volumes': [], # This would be docker volumes
'images': [],
}
def translate_resources_to_desired_workflow(app_resources: dict) -> dict:
# We are looking for following data points
# No of containers
# Used ports
# Networks
# Volumes
# Container mounts
workloads = get_default_workload_values()
volumes = set()
images = set()
workloads['containers'] = len(app_resources['containers'])
for container in app_resources['containers']:
service_name = container['Config']['Labels'][COMPOSE_SERVICE_KEY]
container_ports_config = []
images.add(container['Config']['Image'])
for container_port, host_config in container.get('NetworkSettings', {}).get('Ports', {}).items():
if not host_config:
# This will happen for ports which are not exposed on the host side
continue
port_config = {
'container_port': int(container_port.split('/')[0]),
'protocol': container_port.split('/')[1],
'host_ports': [
{'host_port': int(host_port['HostPort']), 'host_ip': host_port['HostIp']}
for host_port in host_config
]
}
container_ports_config.append(port_config)
volume_mounts = []
for volume_mount in container.get('Mounts', []):
volume_mounts.append(VolumeMount(
source=volume_mount['Source'],
destination=volume_mount['Destination'],
mode=volume_mount['Mode'],
type='bind' if volume_mount['Type'] == 'bind' else 'volume',
))
if container['State']['Status'].lower() == 'running':
if health_config := container['State'].get('Health'):
if health_config['Status'] == 'healthy':
state = ContainerState.RUNNING.value
else:
state = ContainerState.STARTING.value
else:
state = ContainerState.RUNNING.value
elif container['State']['Status'].lower() == 'created':
state = ContainerState.CREATED.value
elif container['State']['Status'] == 'exited' and container['State']['ExitCode'] != 0:
state = ContainerState.CRASHED.value
else:
state = 'exited'
workloads['container_details'].append({
'service_name': service_name,
'image': container['Config']['Image'],
'port_config': container_ports_config,
'state': state,
'volume_mounts': [v.__dict__ for v in volume_mounts],
'id': container['Id'],
})
workloads['used_ports'].extend(container_ports_config)
volumes.update(volume_mounts)
workloads.update({
'images': list(images),
'volumes': [v.__dict__ for v in volumes],
})
return workloads
| 9,348 | Python | .py | 195 | 38.687179 | 118 | 0.630154 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,840 | rollback.py | truenas_middleware/src/middlewared/middlewared/plugins/apps/ix_apps/rollback.py | import os
import shutil
from pkg_resources import parse_version
from .path import get_installed_app_versions_dir_path
from .utils import RE_VERSION
def get_rollback_versions(app_name: str, current_version: str) -> list[str]:
rollback_versions = []
with os.scandir(get_installed_app_versions_dir_path(app_name)) as scan:
for entry in filter(
lambda e: e.name != current_version and e.is_dir() and RE_VERSION.findall(e.name) and (
parse_version(e.name) < parse_version(current_version)
), scan
):
rollback_versions.append(entry.name)
return sorted(rollback_versions, key=parse_version)
def clean_newer_versions(app_name: str, current_version: str):
"""
Any versions above current_version will be removed from app's config
"""
with os.scandir(get_installed_app_versions_dir_path(app_name)) as scan:
for entry in filter(
lambda e: e.name != current_version and e.is_dir() and RE_VERSION.findall(e.name) and (
parse_version(e.name) > parse_version(current_version)
), scan
):
shutil.rmtree(entry.path, ignore_errors=True)
| 1,188 | Python | .py | 26 | 38.115385 | 99 | 0.668398 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,841 | networks.py | truenas_middleware/src/middlewared/middlewared/plugins/apps/ix_apps/docker/networks.py | from .casing import convert_case_for_dict_or_list
from .utils import get_docker_client
def list_networks() -> list[dict]:
networks = []
with get_docker_client() as client:
for network in client.networks.list(greedy=False):
attrs = network.attrs | {'short_id': network.short_id}
attrs['enable_ipv6'] = attrs.pop('EnableIPv6', False)
networks.append(convert_case_for_dict_or_list(attrs))
return networks
| 461 | Python | .py | 10 | 39.2 | 66 | 0.674107 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,842 | images.py | truenas_middleware/src/middlewared/middlewared/plugins/apps/ix_apps/docker/images.py | import typing
import docker.errors
from middlewared.service import CallError
from .casing import convert_case_for_dict_or_list
from .utils import get_docker_client
def list_images() -> list[dict]:
with get_docker_client() as client:
return [
image for image in map(
lambda i: convert_case_for_dict_or_list(i.attrs), client.images.list()
)
]
def pull_image(
image_tag: str, callback: typing.Callable = None, username: str | None = None, password: str | None = None
):
if username and not password:
raise CallError('Password is required when username is provided')
if password and not username:
raise CallError('Username is required when password is provided')
auth_config = {
'username': username,
'password': password,
} if username else None
with get_docker_client() as client:
try:
response = client.api.pull(image_tag, auth_config=auth_config, stream=True, decode=True)
for line in response:
if callback:
callback(line)
except docker.errors.APIError as e:
raise CallError(f'Failed to pull {image_tag!r} image: {e!s}')
def delete_image(image_id: str, force: bool = False):
with get_docker_client() as client:
try:
client.images.remove(image=image_id, force=force)
except docker.errors.ImageNotFound:
raise CallError(f'{image_id!r} image not found')
except docker.errors.APIError as e:
raise CallError(f'Failed to delete {image_id!r} image: {e!s}')
| 1,630 | Python | .py | 39 | 33.615385 | 110 | 0.647878 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,843 | utils.py | truenas_middleware/src/middlewared/middlewared/plugins/apps/ix_apps/docker/utils.py | import contextlib
import docker
PROJECT_KEY: str = 'com.docker.compose.project'
@contextlib.contextmanager
def get_docker_client() -> docker.DockerClient:
client = docker.from_env()
try:
yield client
finally:
client.close()
| 256 | Python | .py | 10 | 21.4 | 47 | 0.72314 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,844 | query.py | truenas_middleware/src/middlewared/middlewared/plugins/apps/ix_apps/docker/query.py | import docker.errors
from collections import defaultdict
from itertools import chain, repeat
from .utils import get_docker_client, PROJECT_KEY
def list_resources_by_project(project_name: str | None = None) -> dict[str, dict[str, list]]:
retries = 2
while retries > 0:
try:
return list_resources_by_project_internal(project_name)
except docker.errors.NotFound:
retries -= 1
if retries == 0:
raise
def list_resources_by_project_internal(project_name: str | None = None) -> dict[str, dict[str, list]]:
with get_docker_client() as client:
label_filter = {'label': f'{PROJECT_KEY}={project_name}' if project_name else PROJECT_KEY}
containers = client.containers.list(all=True, filters=label_filter, sparse=False)
networks = client.networks.list(filters=label_filter)
volumes = client.volumes.list(filters=label_filter)
projects = defaultdict(lambda: {'containers': [], 'networks': [], 'volumes': []})
for resource_type, resource in chain(
zip(repeat('containers'), containers), zip(repeat('networks'), networks), zip(repeat('volumes'), volumes)
):
projects[
resource.labels[PROJECT_KEY] if resource_type == 'containers' else resource.attrs['Labels'][PROJECT_KEY]
][resource_type].append(resource.attrs)
return projects
| 1,414 | Python | .py | 27 | 43.962963 | 120 | 0.662074 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,845 | stats.py | truenas_middleware/src/middlewared/middlewared/plugins/apps/ix_apps/docker/stats.py | from collections import defaultdict
import requests
from .utils import get_docker_client, PROJECT_KEY
def get_default_stats():
return defaultdict(lambda: {
'cpu_usage': 0,
'memory': 0,
'networks': defaultdict(lambda: {'rx_bytes': 0, 'tx_bytes': 0}),
'blkio': {'read': 0, 'write': 0},
})
def list_resources_stats_by_project(project_name: str | None = None) -> dict:
retries = 2
while retries > 0:
# We do this because when an app is being stopped, we can run into a race condition
# where the container got listed but when we queried it's stats we were not able
# to get them as the container by that time had been nuked (this is similar to what we
# do when we list resources by project)
try:
return list_resources_stats_by_project_internal(project_name)
except requests.exceptions.HTTPError:
retries -= 1
if retries == 0:
raise
def list_resources_stats_by_project_internal(project_name: str | None = None) -> dict:
projects = get_default_stats()
with get_docker_client() as client:
label_filter = {'label': f'{PROJECT_KEY}={project_name}' if project_name else PROJECT_KEY}
for container in client.containers.list(all=True, filters=label_filter, sparse=False):
stats = container.stats(stream=False, decode=None, one_shot=True)
project = container.labels.get(PROJECT_KEY)
if not project:
continue
blkio_container_stats = stats.get('blkio_stats', {}).get('io_service_bytes_recursive') or {}
project_stats = projects[project]
project_stats['cpu_usage'] += stats.get('cpu_stats', {}).get('cpu_usage', {}).get('total_usage', 0)
project_stats['memory'] += stats.get('memory_stats', {}).get('usage', 0)
for entry in filter(lambda x: x['op'] in ('read', 'write'), blkio_container_stats):
project_stats['blkio'][entry['op']] += entry['value']
for net_name, net_values in stats.get('networks', {}).items():
project_stats['networks'][net_name]['rx_bytes'] += net_values.get('rx_bytes', 0)
project_stats['networks'][net_name]['tx_bytes'] += net_values.get('tx_bytes', 0)
return projects
| 2,333 | Python | .py | 42 | 46.02381 | 111 | 0.622096 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,846 | casing.py | truenas_middleware/src/middlewared/middlewared/plugins/apps/ix_apps/docker/casing.py | import re
def change_case(value: str) -> str:
s1 = re.sub(r'(.)([A-Z][a-z]+)', r'\1_\2', value)
return re.sub(r'([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def convert_case_for_dict_or_list(data: dict | list) -> dict | list:
if isinstance(data, dict):
new_data = {}
for key, value in data.items():
new_key = change_case(key)
if isinstance(value, dict):
new_value = convert_case_for_dict_or_list(value)
elif isinstance(value, list):
new_value = [convert_case_for_dict_or_list(item) if isinstance(item, dict) else item for item in value]
else:
new_value = value
new_data[new_key] = new_value
return new_data
elif isinstance(data, list):
return [convert_case_for_dict_or_list(item) for item in data]
else:
return data
| 885 | Python | .py | 21 | 32.952381 | 119 | 0.567442 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,847 | cert_attachments.py | truenas_middleware/src/middlewared/middlewared/plugins/system/cert_attachments.py | from middlewared.common.attachment.certificate import CertificateServiceAttachmentDelegate
class SystemGeneralCertificateAttachmentDelegate(CertificateServiceAttachmentDelegate):
CERT_FIELD = 'ui_certificate'
HUMAN_NAME = 'UI Service'
NAMESPACE = 'system.general'
SERVICE = 'http'
class SystemAdvancedCertificateAttachmentDelegate(CertificateServiceAttachmentDelegate):
CERT_FIELD = 'syslog_tls_certificate'
HUMAN_NAME = 'Syslog Service'
NAMESPACE = 'system.advanced'
SERVICE = 'syslogd'
async def setup(middleware):
await middleware.call(
'certificate.register_attachment_delegate', SystemGeneralCertificateAttachmentDelegate(middleware)
)
await middleware.call(
'certificate.register_attachment_delegate', SystemAdvancedCertificateAttachmentDelegate(middleware)
)
| 838 | Python | .py | 18 | 41.555556 | 107 | 0.802956 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,848 | cli.py | truenas_middleware/src/middlewared/middlewared/plugins/system/cli.py | import contextlib
import signal
import psutil
from middlewared.service import Service, private
class SystemService(Service):
@private
def reload_cli(self):
for process in psutil.process_iter(['pid', 'cmdline']):
cmdline = process.cmdline()
if len(cmdline) >= 2 and cmdline[1] == '/usr/bin/cli':
with contextlib.suppress(Exception):
process.send_signal(signal.SIGUSR1)
| 448 | Python | .py | 12 | 29.666667 | 66 | 0.657407 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,849 | dmi.py | truenas_middleware/src/middlewared/middlewared/plugins/system/dmi.py | from functools import cache
from ixhardware import parse_dmi
from middlewared.service import private, Service
class SystemService(Service):
@private
@cache
def dmidecode_info(self):
dmi_info = self.dmidecode_info_internal()
return {
'bios-release-date': dmi_info.bios_release_date or "",
'ecc-memory': dmi_info.ecc_memory,
'baseboard-manufacturer': dmi_info.baseboard_manufacturer,
'baseboard-product-name': dmi_info.baseboard_product_name,
'system-manufacturer': dmi_info.system_manufacturer,
'system-product-name': dmi_info.system_product_name,
'system-serial-number': dmi_info.system_serial_number,
'system-version': dmi_info.system_version,
'has-ipmi': dmi_info.has_ipmi,
}
@private
@cache
def dmidecode_info_internal(self):
return parse_dmi()
| 918 | Python | .py | 23 | 31.521739 | 70 | 0.656918 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,850 | time.py | truenas_middleware/src/middlewared/middlewared/plugins/system/time.py | import subprocess
import time
from middlewared.schema import accepts, Int
from middlewared.service import private, Service, ValidationErrors
from middlewared.service_exception import CallError
MAX_PERMITTED_SLEW = 86400 * 30
class SystemService(Service):
@private
@accepts(Int('new_time', required=True))
def set_time(self, ts):
"""
This endpoint sets RTC to UTC and then sets the time to the specified
value.
"""
verr = ValidationErrors()
if ts < 0:
verr.add('system_set_time.new_time', 'timestamp must be positive value')
if abs(ts - time.time()) > MAX_PERMITTED_SLEW:
verr.add(
'system_set_time.new_time',
'new timestamp requires slewing clock more than maximum permitted value of 30 days'
)
verr.check()
# stop NTP service before making clock changes
self.middleware.call_sync('service.stop', 'ntpd')
# Make sure RTC is set to UTC
timedatectl = subprocess.run(['timedatectl', 'set-local-rtc', '0'], capture_output=True, check=False)
if timedatectl.returncode:
self.middleware.call_sync('service.start', 'ntpd')
raise CallError(f'Failed to set RTC to UTC: {timedatectl.stderr.decode()}')
# Set to our new timestamp
timedatectl = subprocess.run(['timedatectl', 'set-time', f'@{int(ts)}'], capture_output=True, check=False)
if timedatectl.returncode:
self.middleware.call_sync('service.start', 'ntpd')
raise CallError(f'Failed to set clock to ({ts}): {timedatectl.stderr.decode()}')
self.middleware.call_sync('service.start', 'ntpd')
| 1,710 | Python | .py | 36 | 38.861111 | 114 | 0.651834 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,851 | lifecycle.py | truenas_middleware/src/middlewared/middlewared/plugins/system/lifecycle.py | import asyncio
from middlewared.api import api_method
from middlewared.api.current import SystemRebootArgs, SystemRebootResult, SystemShutdownArgs, SystemShutdownResult
from middlewared.schema import accepts, Bool, returns, Str
from middlewared.service import job, private, Service, no_auth_required, pass_app
from middlewared.utils import run
from .utils import lifecycle_conf, RE_KDUMP_CONFIGURED
class SystemService(Service):
@private
async def first_boot(self):
return lifecycle_conf.SYSTEM_FIRST_BOOT
@no_auth_required
@accepts()
@returns(Str('system_boot_identifier'))
@pass_app()
async def boot_id(self, app):
"""
Returns a unique boot identifier.
It is supposed to be unique every system boot.
"""
# NOTE: this is used, at time of writing, by the UI
# team to handle caching of web page assets. This
# doesn't require authentication since our login page
# also has information that is cached. Security team
# is aware and the risk is minimal
return lifecycle_conf.SYSTEM_BOOT_ID
@accepts()
@returns(Bool('system_ready'))
async def ready(self):
"""
Returns whether the system completed boot and is ready to use
"""
return await self.middleware.call('system.state') != 'BOOTING'
@accepts()
@returns(Str('system_state', enum=['SHUTTING_DOWN', 'READY', 'BOOTING']))
async def state(self):
"""
Returns system state:
"BOOTING" - System is booting
"READY" - System completed boot and is ready to use
"SHUTTING_DOWN" - System is shutting down
"""
if lifecycle_conf.SYSTEM_SHUTTING_DOWN:
return 'SHUTTING_DOWN'
if lifecycle_conf.SYSTEM_READY:
return 'READY'
return 'BOOTING'
@api_method(SystemRebootArgs, SystemRebootResult)
@job()
@pass_app(rest=True)
async def reboot(self, app, job, reason, options):
"""
Reboots the operating system.
Emits an "added" event of name "system" and id "reboot".
"""
await self.middleware.log_audit_message(app, 'REBOOT', {'reason': reason}, True)
self.middleware.send_event('system.reboot', 'ADDED', fields={'reason': reason})
if options['delay'] is not None:
await asyncio.sleep(options['delay'])
await run(['/sbin/shutdown', '-r', 'now'])
@api_method(SystemShutdownArgs, SystemShutdownResult)
@job()
@pass_app(rest=True)
async def shutdown(self, app, job, reason, options):
"""
Shuts down the operating system.
An "added" event of name "system" and id "shutdown" is emitted when shutdown is initiated.
"""
await self.middleware.log_audit_message(app, 'SHUTDOWN', {'reason': reason}, True)
self.middleware.send_event('system.shutdown', 'ADDED', fields={'reason': reason})
if options['delay'] is not None:
await asyncio.sleep(options['delay'])
await run(['/sbin/poweroff'])
async def _event_system_ready(middleware, event_type, args):
lifecycle_conf.SYSTEM_READY = True
if (await middleware.call('system.advanced.config'))['kdump_enabled']:
cp = await run(['kdump-config', 'status'], check=False)
if cp.returncode:
middleware.logger.error('Failed to retrieve kdump-config status: %s', cp.stderr.decode())
else:
if not RE_KDUMP_CONFIGURED.findall(cp.stdout.decode()):
await middleware.call('alert.oneshot_create', 'KdumpNotReady', None)
else:
await middleware.call('alert.oneshot_delete', 'KdumpNotReady', None)
else:
await middleware.call('alert.oneshot_delete', 'KdumpNotReady', None)
if await middleware.call('system.first_boot'):
middleware.create_task(middleware.call('usage.firstboot'))
async def _event_system_shutdown(middleware, event_type, args):
lifecycle_conf.SYSTEM_SHUTTING_DOWN = True
async def setup(middleware):
middleware.event_subscribe('system.ready', _event_system_ready)
middleware.event_subscribe('system.shutdown', _event_system_shutdown)
| 4,209 | Python | .py | 93 | 37.645161 | 114 | 0.667156 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,852 | utils.py | truenas_middleware/src/middlewared/middlewared/plugins/system/utils.py | import enum
import os
import re
import typing
from middlewared.utils import MIDDLEWARE_RUN_DIR
DEBUG_MAX_SIZE = 30
FIRST_INSTALL_SENTINEL = '/data/first-boot'
RE_KDUMP_CONFIGURED = re.compile(r'current state\s*:\s*(ready to kdump)', flags=re.M)
class VMProvider(enum.Enum):
AZURE = 'AZURE'
NONE = 'NONE'
class Lifecycle:
def __init__(self):
self.SYSTEM_BOOT_ID = None
self.SYSTEM_FIRST_BOOT = False
# Flag telling whether the system completed boot and is ready to use
self.SYSTEM_READY = False
# Flag telling whether the system is shutting down
self.SYSTEM_SHUTTING_DOWN = False
def get_debug_execution_dir(system_dataset_path: str, iteration: typing.Optional[int] = 0) -> str:
debug_name = f'ixdiagnose-{iteration}' if iteration else 'ixdiagnose'
return os.path.join(MIDDLEWARE_RUN_DIR, debug_name) if system_dataset_path is None else os.path.join(
system_dataset_path, debug_name
)
lifecycle_conf = Lifecycle()
| 1,004 | Python | .py | 25 | 35.52 | 105 | 0.719008 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,853 | __init__.py | truenas_middleware/src/middlewared/middlewared/plugins/system/__init__.py | import os
import uuid
from middlewared.utils import BOOTREADY
from .utils import FIRST_INSTALL_SENTINEL, lifecycle_conf
def firstboot(middleware):
if os.path.exists(BOOTREADY):
lifecycle_conf.SYSTEM_READY = True
elif os.path.exists(FIRST_INSTALL_SENTINEL):
lifecycle_conf.SYSTEM_FIRST_BOOT = True
# Delete sentinel file before making clone as we
# we do not want the clone to have the file in it.
os.unlink(FIRST_INSTALL_SENTINEL)
if middleware.call_sync('system.is_enterprise'):
config = middleware.call_sync('datastore.config', 'system.advanced')
middleware.call_sync('datastore.update', 'system.advanced', config['id'], {'adv_autotune': True})
def read_system_boot_id(middleware):
try:
with open('/proc/sys/kernel/random/boot_id', 'r') as f:
return f.read().strip()
except FileNotFoundError:
middleware.logger.error('Failed to read boot_id from /proc/sys/kernel/random/boot_id')
return str(uuid.uuid4())
async def setup(middleware):
lifecycle_conf.SYSTEM_BOOT_ID = await middleware.run_in_thread(read_system_boot_id, middleware)
middleware.event_register('system.ready', 'Finished boot process')
middleware.event_register('system.reboot', 'Started reboot process')
middleware.event_register('system.shutdown', 'Started shutdown process')
await middleware.run_in_thread(firstboot, middleware)
settings = await middleware.call('system.general.config')
middleware.logger.debug('Setting timezone to %r', settings['timezone'])
await middleware.call('core.environ_update', {'TZ': settings['timezone']})
await middleware.call('system.general.set_language')
await middleware.call('sysctl.set_zvol_volmode', 2)
| 1,780 | Python | .py | 33 | 47.636364 | 109 | 0.720046 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,854 | product.py | truenas_middleware/src/middlewared/middlewared/plugins/system/product.py | # Copyright (c) - iXsystems Inc.
#
# Licensed under the terms of the TrueNAS Enterprise License Agreement
# See the file LICENSE.IX for complete terms and conditions
import os
from datetime import date
from licenselib.license import ContractType, Features, License
from middlewared.plugins.truenas import EULA_PENDING_PATH
from middlewared.schema import accepts, Bool, returns, Str
from middlewared.service import no_authz_required, private, Service, ValidationError
from middlewared.utils import ProductType, sw_info
from middlewared.utils.license import LICENSE_ADDHW_MAPPING
LICENSE_FILE = '/data/license'
LICENSE_FILE_MODE = 0o600
PRODUCT_NAME = 'TrueNAS'
class SystemService(Service):
PRODUCT_TYPE = None
@accepts(roles=['READONLY_ADMIN'])
@returns(Str('product_type'))
async def product_type(self):
"""
Returns the type of the product.
SCALE - TrueNAS SCALE, community version
SCALE_ENTERPRISE - TrueNAS SCALE Enterprise, appliance version
"""
if SystemService.PRODUCT_TYPE is None:
if await self.is_ha_capable():
# HA capable hardware
SystemService.PRODUCT_TYPE = ProductType.SCALE_ENTERPRISE
else:
if license_ := await self.middleware.call('system.license'):
if license_['model'].lower().startswith('freenas'):
# legacy freenas certified
SystemService.PRODUCT_TYPE = ProductType.SCALE
else:
# the license has been issued for a "certified" line
# of hardware which is considered enterprise
SystemService.PRODUCT_TYPE = ProductType.SCALE_ENTERPRISE
else:
# no license
SystemService.PRODUCT_TYPE = ProductType.SCALE
return SystemService.PRODUCT_TYPE
@private
async def is_ha_capable(self):
return await self.middleware.call('failover.hardware') != 'MANUAL'
@private
async def is_enterprise(self):
return await self.middleware.call('system.product_type') == ProductType.SCALE_ENTERPRISE
@no_authz_required
@accepts()
@returns(Str('truenas_version_shortname'))
def version_short(self):
"""Returns the short name of the software version of the system."""
return sw_info()['version']
@accepts(Str('version_str', default=None, required=False))
@returns(Str('truenas_release_notes_url', null=True))
def release_notes_url(self, version_str):
"""Returns the release notes URL for a version of SCALE.
`version_str` str: represents a version to check against
If `version` is not provided, then the release notes URL will return
a link for the currently installed version of SCALE.
"""
to_format = self.version_short() if version_str is None else version_str
to_format = to_format.split('-')[0].split('.') # looks like ['23', '10', '0', '1']
len_to_format = len(to_format)
if len_to_format >= 2:
maj_vers = '.'.join(to_format[0:2])
base_url = f'https://www.truenas.com/docs/scale/{maj_vers}/gettingstarted/scalereleasenotes'
if len_to_format == 2:
return base_url
else:
return f'{base_url}/#{"".join(to_format)}'
@no_authz_required
@accepts()
@returns(Str('truenas_version'))
def version(self):
"""Returns the full name of the software version of the system."""
return sw_info()['fullname']
@no_authz_required
@accepts()
@returns(Str('is_stable'))
def is_stable(self):
"""
Returns whether software version of the system is stable.
"""
return sw_info()['stable']
@private
async def platform(self):
return 'LINUX'
@private
def license(self):
return self._get_license()
@staticmethod
def _get_license():
try:
with open(LICENSE_FILE) as f:
licenseobj = License.load(f.read().strip('\n'))
except Exception:
return
license_ = {
'model': licenseobj.model,
'system_serial': licenseobj.system_serial,
'system_serial_ha': licenseobj.system_serial_ha,
'contract_type': ContractType(licenseobj.contract_type).name.upper(),
'contract_start': licenseobj.contract_start,
'contract_end': licenseobj.contract_end,
'legacy_contract_hardware': (
licenseobj.contract_hardware.name.upper()
if licenseobj.contract_type == ContractType.legacy
else None
),
'legacy_contract_software': (
licenseobj.contract_software.name.upper()
if licenseobj.contract_type == ContractType.legacy
else None
),
'customer_name': licenseobj.customer_name,
'expired': licenseobj.expired,
'features': [i.name.upper() for i in licenseobj.features],
'addhw': licenseobj.addhw,
'addhw_detail': [],
}
for quantity, code in licenseobj.addhw:
try:
license_['addhw_detail'].append(f'{quantity} x {LICENSE_ADDHW_MAPPING[code]} Expansion shelf')
except KeyError:
license_['addhw_detail'].append(f'<Unknown hardware {code}>')
if Features.fibrechannel not in licenseobj.features and licenseobj.contract_start < date(2017, 4, 14):
# Licenses issued before 2017-04-14 had a bug in the feature bit for fibrechannel, which
# means they were issued having dedup+jails instead.
if Features.dedup in licenseobj.features and Features.jails in licenseobj.features:
license_['features'].append(Features.fibrechannel.name.upper())
return license_
@private
def license_path(self):
return LICENSE_FILE
@accepts(Str('license'))
@returns()
def license_update(self, license_):
"""Update license file"""
try:
dser_license = License.load(license_)
except Exception:
raise ValidationError('system.license', 'This is not a valid license.')
else:
if dser_license.system_serial_ha:
if not self.middleware.call_sync('system.is_ha_capable'):
raise ValidationError('system.license', 'This is not an HA capable system.')
prev_product_type = self.middleware.call_sync('system.product_type')
with open(LICENSE_FILE, 'w+') as f:
f.write(license_)
os.fchmod(f.fileno(), LICENSE_FILE_MODE)
self.middleware.call_sync('etc.generate', 'rc')
SystemService.PRODUCT_TYPE = None
if self.middleware.call_sync('system.is_enterprise'):
with open(EULA_PENDING_PATH, 'a+') as f:
os.fchmod(f.fileno(), 0o600)
self.middleware.call_sync('alert.alert_source_clear_run', 'LicenseStatus')
self.middleware.call_sync('failover.configure.license', dser_license)
self.middleware.run_coroutine(
self.middleware.call_hook('system.post_license_update', prev_product_type=prev_product_type), wait=False,
)
@accepts(Str('feature', enum=['DEDUP', 'FIBRECHANNEL', 'VM']))
@returns(Bool('feature_enabled'))
async def feature_enabled(self, name):
"""
Returns whether the `feature` is enabled or not
"""
license_ = await self.middleware.call('system.license')
if license_ and name in license_['features']:
return True
return False
async def hook_license_update(middleware, prev_product_type, *args, **kwargs):
if prev_product_type != 'ENTERPRISE' and await middleware.call('system.product_type') == 'ENTERPRISE':
await middleware.call('system.advanced.update', {'autotune': True})
async def setup(middleware):
middleware.register_hook('system.post_license_update', hook_license_update)
| 8,145 | Python | .py | 177 | 36.067797 | 117 | 0.629223 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,855 | debug.py | truenas_middleware/src/middlewared/middlewared/plugins/system/debug.py | import contextlib
import io
import os
import requests
import shutil
import tarfile
import time
from middlewared.schema import accepts, returns
from middlewared.service import CallError, job, private, Service
from ixdiagnose.config import conf
from ixdiagnose.event import event_callbacks
from ixdiagnose.run import generate_debug
from .utils import DEBUG_MAX_SIZE, get_debug_execution_dir
class SystemService(Service):
@private
@job(lock='system.debug_generate', lock_queue_size=1)
def debug_generate(self, job):
"""
Generate system debug file.
Result value will be the absolute path of the file.
"""
system_dataset_path = self.middleware.call_sync('systemdataset.config')['path']
i = 0
while True:
execution_dir = get_debug_execution_dir(system_dataset_path, i)
dump = os.path.join(execution_dir, 'ixdiagnose.tgz')
# Be extra safe in case we have left over from previous run
try:
with contextlib.suppress(FileNotFoundError):
shutil.rmtree(execution_dir, ignore_errors=False)
except Exception as e:
i += 1
if i >= 5:
raise CallError(f'Failed to generate ixdiagnose debug: {e!r}')
else:
self.logger.warning('Failed to generate ixdiagnose debug: %r', str(e))
else:
break
conf.apply({
'compress': True,
'debug_path': os.path.join(execution_dir, 'debug'),
'clean_debug_path': True,
'compressed_path': dump,
})
def progress_callback(percent, desc):
job.set_progress(percent, desc)
event_callbacks.register(progress_callback)
try:
return generate_debug()
except Exception as e:
raise CallError(f'Failed to generate debug: {e!r}')
@accepts(roles=['READONLY_ADMIN'])
@returns()
@job(lock='system.debug', lock_queue_size=0, pipes=['output'])
def debug(self, job):
"""
Download a debug file.
"""
job.set_progress(0, 'Generating debug file')
debug_job = self.middleware.call_sync(
'system.debug_generate',
job_on_progress_cb=lambda encoded: job.set_progress(int(encoded['progress']['percent'] * 0.9),
encoded['progress']['description'])
)
standby_debug = None
if self.middleware.call_sync('failover.licensed'):
try:
standby_debug = self.middleware.call_sync(
'failover.call_remote', 'system.debug_generate', [], {'job': True}
)
except Exception:
self.logger.warn('Failed to get debug from standby node', exc_info=True)
else:
remote_ip = self.middleware.call_sync('failover.remote_ip')
url = self.middleware.call_sync(
'failover.call_remote', 'core.download', ['filesystem.get', [standby_debug], 'debug.txz'],
)[1]
url = f'http://{remote_ip}:6000{url}'
# no reason to honor proxy settings in this
# method since we're downloading the debug
# archive directly across the heartbeat
# interface which is point-to-point
proxies = {'http': '', 'https': ''}
standby_debug = io.BytesIO()
with requests.get(url, stream=True, proxies=proxies) as r:
for i in r.iter_content(chunk_size=1048576):
if standby_debug.tell() > DEBUG_MAX_SIZE * 1048576:
raise CallError(f'Standby debug file is bigger than {DEBUG_MAX_SIZE}MiB.')
standby_debug.write(i)
debug_job.wait_sync()
if debug_job.error:
raise CallError(debug_job.error)
job.set_progress(90, 'Preparing debug file for streaming')
if standby_debug:
# Debug file cannot be big on HA because we put both debugs in memory
# so they can be downloaded at once.
try:
if os.stat(debug_job.result).st_size > DEBUG_MAX_SIZE * 1048576:
raise CallError(f'Debug file is bigger than {DEBUG_MAX_SIZE}MiB.')
except FileNotFoundError:
raise CallError('Debug file was not found, try again.')
network = self.middleware.call_sync('network.configuration.config')
node = self.middleware.call_sync('failover.node')
tario = io.BytesIO()
host_status = self.middleware.call_sync('failover.status')
with tarfile.open(fileobj=tario, mode='w') as tar:
if node == 'A':
my_hostname = network['hostname']
remote_hostname = network['hostname_b']
else:
my_hostname = network['hostname_b']
remote_hostname = network['hostname']
tar.add(debug_job.result, f'{my_hostname}{"_active" if host_status == "MASTER" else ""}.txz')
tarinfo = tarfile.TarInfo(f'{remote_hostname}.txz')
tarinfo.size = standby_debug.tell()
# need to set a valid modify time because `standby_debug`
# is an io.BytesIO object which doesn't have any type of
# file metadata on it.
tarinfo.mtime = time.time()
standby_debug.seek(0)
tar.addfile(tarinfo, fileobj=standby_debug)
tario.seek(0)
shutil.copyfileobj(tario, job.pipes.output.w)
else:
with open(debug_job.result, 'rb') as f:
shutil.copyfileobj(f, job.pipes.output.w)
job.pipes.output.w.close()
| 5,938 | Python | .py | 127 | 33.716535 | 110 | 0.571206 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,856 | globalid.py | truenas_middleware/src/middlewared/middlewared/plugins/system/globalid.py | import middlewared.sqlalchemy as sa
from middlewared.schema import accepts, Dict, Int, returns, Str
from middlewared.service import Service
class SystemGlobalID(sa.Model):
__tablename__ = 'system_globalid'
id = sa.Column(sa.Integer(), primary_key=True)
system_uuid = sa.Column(sa.String(32))
class SystemGlobalIDService(Service):
class Config:
datastore_prefix = 'system_globalid'
namespace = 'system.global'
cli_namespace = 'system.global'
ENTRY = Dict(
'system_globalid_entry',
Int('id'),
Str('system_uuid', required=True),
register=True
)
@accepts(roles=['READONLY_ADMIN'])
@returns(Str('system_uuid'))
async def id(self):
"""
Retrieve a 128 bit hexadecimal UUID value unique for each TrueNAS system.
"""
return (await self.middleware.call('datastore.config', 'system.globalid'))['system_uuid']
| 930 | Python | .py | 25 | 30.92 | 97 | 0.672241 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,857 | health.py | truenas_middleware/src/middlewared/middlewared/plugins/system/health.py | import psutil
import time
from middlewared.event import EventSource
from middlewared.utils.threading import start_daemon_thread
CACHE_POOLS_STATUSES = 'system.system_health_pools'
class SystemHealthEventSource(EventSource):
"""
Notifies of current system health which include statistics about consumption of memory and CPU, pools and
if updates are available. An integer `delay` argument can be specified to determine the delay
on when the periodic event should be generated.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._check_update = None
start_daemon_thread(target=self.check_update)
def check_update(self):
while not self._cancel_sync.is_set():
try:
self._check_update = self.middleware.call_sync('update.check_available')['status']
except Exception:
self.middleware.logger.warn(
'Failed to check available update for system.health event', exc_info=True,
)
finally:
self._cancel_sync.wait(timeout=60 * 60 * 24)
def pools_statuses(self):
return {
p['name']: {'status': p['status']}
for p in self.middleware.call_sync('pool.query')
}
def run_sync(self):
try:
if self.arg:
delay = int(self.arg)
else:
delay = 10
except ValueError:
return
# Delay too slow
if delay < 5:
return
cp_time = psutil.cpu_times()
cp_old = cp_time
while not self._cancel_sync.is_set():
time.sleep(delay)
cp_time = psutil.cpu_times()
cp_diff = type(cp_time)(*map(lambda x: x[0] - x[1], zip(cp_time, cp_old)))
cp_old = cp_time
cpu_percent = round(((sum(cp_diff) - cp_diff.idle) / sum(cp_diff)) * 100, 2)
pools = self.middleware.call_sync(
'cache.get_or_put',
CACHE_POOLS_STATUSES,
1800,
self.pools_statuses,
)
self.send_event('ADDED', fields={
'cpu_percent': cpu_percent,
'memory': psutil.virtual_memory()._asdict(),
'pools': pools,
'update': self._check_update,
})
async def setup(middleware):
middleware.register_event_source('system.health', SystemHealthEventSource)
| 2,503 | Python | .py | 63 | 28.825397 | 109 | 0.572727 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,858 | coredump.py | truenas_middleware/src/middlewared/middlewared/plugins/system/coredump.py | from os.path import exists
from systemd import journal
from middlewared.service import private, Service
class SystemService(Service):
@private
def coredumps(self):
coredumps = []
try:
with journal.Reader() as reader:
reader.add_match(CODE_FUNC='submit_coredump')
for core in reader:
coredump = {
'time': core['COREDUMP_TIMESTAMP'].strftime('%c'),
'pid': core['COREDUMP_PID'],
'uid': core['COREDUMP_UID'],
'gid': core['COREDUMP_GID'],
'unit': core.get('COREDUMP_UNIT'),
'sig': core['COREDUMP_SIGNAL'],
'exe': core.get('COREDUMP_EXE'),
}
if 'COREDUMP_FILENAME' not in core or not isinstance(core['COREDUMP_FILENAME'], str):
coredump['corefile'] = 'none'
else:
coredump['corefile'] = 'present' if exists(core['COREDUMP_FILENAME']) else 'missing'
coredumps.append(coredump)
except Exception:
self.logger.warning('Failed to obtain coredump information', exc_info=True)
return coredumps
| 1,299 | Python | .py | 28 | 30.464286 | 108 | 0.513834 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,859 | vm_info.py | truenas_middleware/src/middlewared/middlewared/plugins/system/vm_info.py | # -*- coding=utf-8 -*-
import logging
from middlewared.service import private, Service
from middlewared.utils import run
from .utils import VMProvider
logger = logging.getLogger(__name__)
class SystemService(Service):
is_vm = None
vm_hypervisor = None
@private
async def vm(self):
if self.is_vm is None:
p = await run(["systemd-detect-virt"], check=False, encoding="utf-8", errors="ignore")
self.is_vm = p.stdout.strip() != "none"
return self.is_vm
@private
async def vm_provider(self):
if self.vm_hypervisor is None:
self.vm_hypervisor = VMProvider.NONE
if await self.vm():
dmi_info = await self.middleware.call("system.dmidecode_info")
if dmi_info["system-manufacturer"] == "Microsoft Corporation":
self.vm_hypervisor = VMProvider.AZURE
return self.vm_hypervisor.value
| 936 | Python | .py | 24 | 31.125 | 98 | 0.641196 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,860 | reboot.py | truenas_middleware/src/middlewared/middlewared/plugins/system/reboot.py | import enum
from middlewared.api import api_method
from middlewared.api.current import SystemRebootInfoArgs, SystemRebootInfoResult
from middlewared.service import private, Service
class RebootReason(enum.Enum):
FIPS = 'FIPS configuration was changed.'
UPGRADE = 'This system needs to be rebooted in order for the system upgrade to finish.'
class SystemRebootService(Service):
class Config:
cli_namespace = 'system.reboot_required'
namespace = 'system.reboot'
reboot_reasons : dict[str, str] = {}
@api_method(SystemRebootInfoArgs, SystemRebootInfoResult, roles=['SYSTEM_GENERAL_READ'])
async def info(self):
return {
'boot_id': await self.middleware.call('system.boot_id'),
'reboot_required_reasons': [
{
'code': code,
'reason': reason,
}
for code, reason in self.reboot_reasons.items()
],
}
@private
async def add_reason(self, code: str, reason: str):
"""
Adds a reason for why this system needs a reboot.
:param code: unique identifier for the reason.
:param reason: text explanation for the reason.
"""
self.reboot_reasons[code] = reason
await self._send_event()
@private
async def toggle_reason(self, code: str, reason: str):
"""
Adds a reason for why this system needs a reboot if it does not exist, removes it otherwise.
:param code: unique identifier for the reason.
:param reason: text explanation for the reason.
"""
if code in self.reboot_reasons:
self.reboot_reasons.pop(code)
else:
self.reboot_reasons[code] = reason
await self._send_event()
@private
async def list_reasons(self):
"""
List reasons code for why this system needs a reboot.
:return: a list of reason codes
"""
return list(self.reboot_reasons.keys())
@private
async def remove_reason(self, code: str):
"""
Removes a reason for why this system needs a reboot.
:param code: unique identifier for the reason that was used to add it.
"""
self.reboot_reasons.pop(code, None)
await self._send_event()
async def _send_event(self):
self.middleware.send_event('system.reboot.info', 'CHANGED', id=None, fields=await self.info())
async def setup(middleware):
middleware.event_register('system.reboot.info', 'Sent when a system reboot is required.',
roles=['SYSTEM_GENERAL_READ'])
| 2,653 | Python | .py | 65 | 32 | 102 | 0.632685 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,861 | info.py | truenas_middleware/src/middlewared/middlewared/plugins/system/info.py | import hashlib
import os
import psutil
import re
import socket
import time
from datetime import datetime, timedelta, timezone
from middlewared.schema import accepts, Bool, Datetime, Dict, Float, Int, List, returns, Str
from middlewared.service import private, Service
from middlewared.utils import sw_buildtime
RE_CPU_MODEL = re.compile(r'^model name\s*:\s*(.*)', flags=re.M)
class SystemService(Service):
CPU_INFO = {'cpu_model': None, 'core_count': None, 'physical_core_count': None}
HOST_ID = None
class Config:
cli_namespace = 'system'
@private
def mem_info(self):
result = {'physmem_size': None}
try:
with open('/proc/meminfo') as f:
for line in filter(lambda x: x.find('MemTotal') != -1, f):
fields = line.split()
# procfs reports in kB
result['physmem_size'] = int(fields[1]) * 1024
except (FileNotFoundError, ValueError, IndexError):
pass
return result
@private
def get_cpu_model(self):
with open('/proc/cpuinfo', 'r') as f:
model = RE_CPU_MODEL.search(f.read())
return model.group(1) if model else None
@private
async def cpu_info(self):
"""
CPU info doesn't change after boot so cache the results
"""
if self.CPU_INFO['cpu_model'] is None:
self.CPU_INFO['cpu_model'] = await self.middleware.call('system.get_cpu_model')
if self.CPU_INFO['core_count'] is None:
self.CPU_INFO['core_count'] = psutil.cpu_count(logical=True)
if self.CPU_INFO['physical_core_count'] is None:
self.CPU_INFO['physical_core_count'] = psutil.cpu_count(logical=False)
return self.CPU_INFO
@private
async def time_info(self):
uptime_seconds = time.clock_gettime(time.CLOCK_MONOTONIC_RAW)
current_time = time.time()
return {
'uptime_seconds': uptime_seconds,
'uptime': str(timedelta(seconds=uptime_seconds)),
'boot_time': datetime.fromtimestamp((current_time - uptime_seconds), timezone.utc),
'datetime': datetime.fromtimestamp(current_time, timezone.utc),
}
@private
@accepts()
@returns(Str('hostname'))
async def hostname(self):
return socket.gethostname()
@accepts(roles=['READONLY_ADMIN'])
@returns(Str('system_host_identifier'))
def host_id(self):
"""
Retrieve a hex string that is generated based
on the contents of the `/etc/hostid` file. This
is a permanent value that persists across
reboots/upgrades and can be used as a unique
identifier for the machine.
"""
if self.HOST_ID is None:
with open('/etc/hostid', 'rb') as f:
id_ = f.read().strip()
if id_:
self.HOST_ID = hashlib.sha256(id_).hexdigest()
return self.HOST_ID
@accepts(roles=['READONLY_ADMIN'])
@returns(Datetime('system_build_time'))
async def build_time(self):
"""Retrieve build time of the system."""
# NOTE: at time of writing, UI team is using this value
# for the "copyright" section
buildtime = sw_buildtime()
return datetime.fromtimestamp(int(buildtime)) if buildtime else buildtime
@accepts(roles=['READONLY_ADMIN'])
@returns(Dict(
'system_info',
Str('version', required=True, title='TrueNAS Version'),
Datetime('buildtime', required=True, title='TrueNAS build time'),
Str('hostname', required=True, title='System host name'),
Int('physmem', required=True, title='System physical memory'),
Str('model', required=True, title='CPU Model'),
Int('cores', required=True, title='CPU Cores'),
Int('physical_cores', required=True, title='CPU Physical Cores'),
List('loadavg', required=True),
Str('uptime', required=True),
Float('uptime_seconds', required=True),
Str('system_serial', required=True, null=True),
Str('system_product', required=True, null=True),
Str('system_product_version', required=True, null=True),
Dict('license', additional_attrs=True, null=True), # TODO: Fill this in please
Datetime('boottime', required=True),
Datetime('datetime', required=True),
Str('timezone', required=True),
Str('system_manufacturer', required=True, null=True),
Bool('ecc_memory', required=True),
))
async def info(self):
"""
Returns basic system information.
"""
time_info = await self.time_info()
dmidecode = await self.middleware.call('system.dmidecode_info')
cpu_info = await self.cpu_info()
mem_info = await self.middleware.run_in_thread(self.mem_info)
timezone_setting = (await self.middleware.call('datastore.config', 'system.settings'))['stg_timezone']
return {
'version': await self.middleware.call('system.version'),
'buildtime': await self.build_time(),
'hostname': await self.hostname(),
'physmem': mem_info['physmem_size'],
'model': cpu_info['cpu_model'],
'cores': cpu_info['core_count'],
'physical_cores': cpu_info['physical_core_count'],
'loadavg': list(os.getloadavg()),
'uptime': time_info['uptime'],
'uptime_seconds': time_info['uptime_seconds'],
'system_serial': dmidecode['system-serial-number'] if dmidecode['system-serial-number'] else None,
'system_product': dmidecode['system-product-name'] if dmidecode['system-product-name'] else None,
'system_product_version': dmidecode['system-version'] if dmidecode['system-version'] else None,
'license': await self.middleware.call('system.license'),
'boottime': time_info['boot_time'],
'datetime': time_info['datetime'],
'timezone': timezone_setting,
'system_manufacturer': dmidecode['system-manufacturer'] if dmidecode['system-manufacturer'] else None,
'ecc_memory': dmidecode['ecc-memory'],
}
@private
def get_synced_clock_time(self):
"""
Will return synced clock time if ntpd has synced with ntp servers
otherwise will return none
"""
threshold = 300.0 # seconds (Microsoft AD is 5mins, so if it's good enough for them, good enough for us)
for ntp in filter(lambda x: x['active'], self.middleware.call_sync('system.ntpserver.peers')):
if abs(ntp['offset']) <= threshold:
return datetime.now(timezone.utc)
| 6,724 | Python | .py | 147 | 36.55102 | 114 | 0.620632 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,862 | dataset_details.py | truenas_middleware/src/middlewared/middlewared/plugins/pool_/dataset_details.py | import os
import pathlib
from middlewared.plugins.zfs_.utils import zvol_path_to_name, TNUserProp
from middlewared.service import Service, private
from middlewared.schema import accepts, List, returns
from middlewared.utils.mount import getmntinfo
class PoolDatasetService(Service):
class Config:
namespace = 'pool.dataset'
@accepts(roles=['DATASET_READ', 'READONLY_ADMIN'])
@returns(List(
'dataset_details',
example=[{
'id': 'tank',
'type': 'FILESYSTEM',
'name': 'tank',
'pool': 'tank',
'encrypted': False,
'encryption_root': None,
'key_loaded': False,
'children': [
{
'id': 'tank/soemthing',
'type': 'VOLUME',
'name': 'tank/soemthing',
'pool': 'tank',
'encrypted': False,
'encryption_root': None,
'key_loaded': False,
'children': [],
'managed_by': {
'value': '10.231.1.155',
'rawvalue': '10.231.1.155',
'source': 'LOCAL',
'parsed': '10.231.1.155'
},
'quota_warning': {'value': '80', 'rawvalue': '80', 'source': 'LOCAL', 'parsed': '80'},
'quota_critical': {'value': '95', 'rawvalue': '95', 'source': 'LOCAL', 'parsed': '95'},
'refquota_warning': {'value': '80', 'rawvalue': '80', 'source': 'LOCAL', 'parsed': '80'},
'refquota_critical': {'value': '95', 'rawvalue': '95', 'source': 'LOCAL', 'parsed': '95'},
'reservation': {
'parsed': None, 'rawvalue': '0', 'value': None, 'source': 'DEFAULT', 'source_info': None
},
'refreservation': {
'parsed': None, 'rawvalue': '0', 'value': None, 'source': 'DEFAULT', 'source_info': None
},
'key_format': {
'parsed': 'none', 'rawvalue': 'none', 'value': None, 'source': 'DEFAULT', 'source_info': None
},
'volsize': {
'parsed': 57344, 'rawvalue': '57344', 'value': '56K', 'source': 'LOCAL', 'source_info': None
},
'encryption_algorithm': {
'parsed': 'off', 'rawvalue': 'off', 'value': None, 'source': 'DEFAULT', 'source_info': None
},
'used': {
'parsed': 57344, 'rawvalue': '57344', 'value': '56K', 'source': 'NONE', 'source_info': None
},
'usedbychildren': {
'parsed': 0, 'rawvalue': '0', 'value': '0B', 'source': 'NONE', 'source_info': None
},
'usedbydataset': {
'parsed': 57344, 'rawvalue': '57344', 'value': '56K', 'source': 'NONE', 'source_info': None
},
'usedbysnapshots': {
'parsed': 0, 'rawvalue': '0', 'value': '0B', 'source': 'NONE', 'source_info': None
},
'available': {
'parsed': 14328811520, 'rawvalue': '14328811520',
'value': '13.3G', 'source': 'NONE', 'source_info': None
},
'mountpoint': '/mnt/tank/something',
'sync': {
'parsed': 'standard', 'rawvalue': 'standard',
'value': 'STANDARD', 'source': 'DEFAULT', 'source_info': None
},
'compression': {
'parsed': 'lz4', 'rawvalue': 'lz4',
'value': 'LZ4', 'source': 'INHERITED', 'source_info': 'tank',
},
'deduplication': {
'parsed': 'on', 'rawvalue': 'on',
'value': 'ON', 'source': 'LOCAL', 'source_info': None,
},
'user_properties': {},
'snapshot_count': 0,
'locked': False,
'thick_provisioned': True,
'nfs_shares': [{
'enabled': True,
'path': '/mnt/tank/something'
}],
'smb_shares': [{
'enabled': False,
'path': '/mnt/tank/something/smbshare01',
'share_name': 'Home Pictures',
}],
'iscsi_shares': [{
'enabled': False,
'type': 'DISK',
'path': '/mnt/tank/something',
}],
'vms': [{
'name': 'deb01',
'path': '/dev/zvol/tank/something',
}],
'apps': [{
'name': 'diskoverdata',
'path': '/mnt/tank/something'
}],
'replication_tasks_count': 0,
'snapshot_tasks_count': 0,
'cloudsync_tasks_count': 0,
'rsync_tasks_count': 0
}
],
'mountpoint': '/mnt/tank',
'quota': {'parsed': None, 'rawvalue': '0', 'value': None, 'source': 'DEFAULT', 'source_info': None},
'refquota': {'parsed': None, 'rawvalue': '0', 'value': None, 'source': 'DEFAULT', 'source_info': None},
'reservation': {'parsed': None, 'rawvalue': '0', 'value': None, 'source': 'DEFAULT', 'source_info': None},
'refreservation': {
'parsed': None, 'rawvalue': '0', 'value': None, 'source': 'DEFAULT', 'source_info': None
},
'encryption_algorithm': {
'parsed': 'off', 'rawvalue': 'off', 'value': None, 'source': 'DEFAULT', 'source_info': None
},
'origin': {
'parsed': '', 'rawvalue': '', 'value': '', 'source': 'NONE', 'source_info': None
},
'used': {
'parsed': 3874467840, 'rawvalue': '3874467840', 'value': '3.61G', 'source': 'NONE', 'source_info': None
},
'usedbychildren': {
'parsed': 3874369536, 'rawvalue': '3874369536', 'value': '3.61G', 'source': 'NONE', 'source_info': None
},
'usedbydataset': {
'parsed': 98304, 'rawvalue': '98304', 'value': '96K', 'source': 'NONE', 'source_info': None
},
'usedbysnapshots': {'parsed': 0, 'rawvalue': '0', 'value': '0B', 'source': 'NONE', 'source_info': None},
'available': {
'parsed': 14328811520, 'rawvalue': '14328811520',
'value': '13.3G', 'source': 'NONE', 'source_info': None
},
'user_properties': {},
'snapshot_count': 0,
'locked': False,
'atime': False,
'casesensitive': True,
'readonly': False,
'nfs_shares': [],
'smb_shares': [],
'iscsi_shares': [],
'vms': [],
'virt_instances': [],
'apps': [{
'name': 'plex',
'path': '/mnt/evo/data',
}],
'replication_tasks_count': 0,
'snapshot_tasks_count': 0,
'cloudsync_tasks_count': 0,
'rsync_tasks_count': 0,
}]
))
def details(self):
"""
Retrieve all dataset(s) details outlining any services/tasks which might be consuming the dataset(s).
"""
options = {
'extra': {
'flat': True,
'order_by': 'name',
'properties': [
'used',
'available',
'usedbysnapshots',
'usedbydataset',
'usedbychildren',
'refquota',
'origin',
TNUserProp.REFQUOTA_CRIT.value,
TNUserProp.REFQUOTA_WARN.value,
'quota',
TNUserProp.QUOTA_CRIT.value,
TNUserProp.QUOTA_WARN.value,
'refreservation',
'reservation',
'mountpoint',
'encryption',
'encryptionroot',
'keyformat',
'keystatus',
'volsize',
'sync',
'compression',
'dedup',
],
'snapshots_count': True,
}
}
datasets = self.middleware.call_sync('pool.dataset.query', [], options)
mnt_info = getmntinfo()
info = self.build_details(mnt_info)
for dataset in datasets:
self.collapse_datasets(dataset, info, mnt_info)
return datasets
@private
def normalize_dataset(self, dataset, info, mnt_info):
atime, case, readonly = self.get_mntinfo(dataset, mnt_info)
dataset['locked'] = dataset['locked']
dataset['atime'] = atime
dataset['casesensitive'] = case
dataset['readonly'] = readonly
dataset['thick_provisioned'] = any((dataset['reservation']['value'], dataset['refreservation']['value']))
dataset['nfs_shares'] = self.get_nfs_shares(dataset, info['nfs'])
dataset['smb_shares'] = self.get_smb_shares(dataset, info['smb'])
dataset['iscsi_shares'] = self.get_iscsi_shares(dataset, info['iscsi'])
dataset['vms'] = self.get_vms(dataset, info['vm'])
dataset['apps'] = self.get_apps(dataset, info['app'])
dataset['virt_instances'] = self.get_virt_instances(dataset, info['virt_instance'])
dataset['replication_tasks_count'] = self.get_repl_tasks_count(dataset, info['repl'])
dataset['snapshot_tasks_count'] = self.get_snapshot_tasks_count(dataset, info['snap'])
dataset['cloudsync_tasks_count'] = self.get_cloudsync_tasks_count(dataset, info['cloud'])
dataset['rsync_tasks_count'] = self.get_rsync_tasks_count(dataset, info['rsync'])
@private
def collapse_datasets(self, dataset, info, mnt_info):
self.normalize_dataset(dataset, info, mnt_info)
for child in dataset.get('children', []):
self.collapse_datasets(child, info, mnt_info)
@private
def get_mount_info(self, path, mntinfo):
mount_info = {}
try:
devid = os.stat(path).st_dev
except Exception:
# path deleted/umounted/locked etc
pass
else:
if devid in mntinfo:
mount_info = mntinfo[devid]
return mount_info
@private
def get_mntinfo(self, ds, mntinfo):
atime = case = True
readonly = False
for devid, info in filter(lambda x: x[1]['mountpoint'] == ds['mountpoint'], mntinfo.items()):
atime = not ('NOATIME' in info['mount_opts'])
readonly = 'RO' in info['mount_opts']
case = any((i for i in ('CASESENSITIVE', 'CASEMIXED') if i in info['super_opts']))
# case sensitivity is either on or off (sensitive or insensitve)
# the "mixed" property is silently ignored in our use case because it
# only applies to illumos kernel when using the in-kernel SMB server.
# if it's set to "mixed" on linux, it's treated as case sensitive.
return atime, case, readonly
@private
def build_details(self, mntinfo):
results = {
'iscsi': [], 'nfs': [], 'smb': [],
'repl': [], 'snap': [], 'cloud': [],
'rsync': [], 'vm': [], 'app': [],
'virt_instance': [],
}
# iscsi
t_to_e = self.middleware.call_sync('iscsi.targetextent.query')
t = {i['id']: i for i in self.middleware.call_sync('iscsi.target.query')}
e = {i['id']: i for i in self.middleware.call_sync('iscsi.extent.query')}
for i in filter(lambda x: x['target'] in t and t[x['target']]['groups'] and x['extent'] in e, t_to_e):
"""
1. make sure target's and extent's id exist in the target to extent table
2. make sure the target has `groups` entry since, without it, it's impossible
that it's being shared via iscsi
"""
results['iscsi'].append({
'extent': e[i['extent']],
'target': t[i['target']],
'mount_info': self.get_mount_info(e[i['extent']]['path'], mntinfo),
})
# nfs and smb
for key in ('nfs', 'smb'):
for share in self.middleware.call_sync(f'sharing.{key}.query'):
share['mount_info'] = self.get_mount_info(share['path'], mntinfo)
results[key].append(share)
# replication
options = {'prefix': 'repl_'}
for task in self.middleware.call_sync('datastore.query', 'storage.replication', [], options):
# replication can only be configured on a dataset so getting mount info is unnecessary
results['repl'].append(task)
# snapshots
for task in self.middleware.call_sync('datastore.query', 'storage.task', [], {'prefix': 'task_'}):
# snapshots can only be configured on a dataset so getting mount info is unnecessary
results['snap'].append(task)
# cloud sync
for task in self.middleware.call_sync('datastore.query', 'tasks.cloudsync'):
task['mount_info'] = self.get_mount_info(task['path'], mntinfo)
results['cloud'].append(task)
# rsync
for task in self.middleware.call_sync('rsynctask.query'):
task['mount_info'] = self.get_mount_info(task['path'], mntinfo)
results['rsync'].append(task)
# vm
for vm in self.middleware.call_sync('datastore.query', 'vm.device', [['dtype', 'in', ['RAW', 'DISK']]]):
if vm['dtype'] == 'DISK':
# disk type is always a zvol
vm['zvol'] = zvol_path_to_name(vm['attributes']['path'])
else:
# raw type is always a file
vm['mount_info'] = self.get_mount_info(vm['attributes']['path'], mntinfo)
results['vm'].append(vm)
for app in self.middleware.call_sync('app.query'):
for path_config in filter(
lambda p: p.get('source', '').startswith('/mnt/') and not p['source'].startswith('/mnt/.ix-'),
app['active_workloads']['volumes']
):
results['app'].append({
'name': app['name'],
'path': path_config['source'],
'mount_info': self.get_mount_info(path_config['source'], mntinfo),
})
# virt instance
for instance in self.middleware.call_sync('virt.instance.query'):
for device in self.middleware.call_sync('virt.instance.device_list', instance['id']):
if device['dev_type'] != 'DISK':
continue
if not device['source']:
continue
device['instance'] = instance['id']
if device['source'].startswith('/dev/zvol/'):
# disk type is always a zvol
device['zvol'] = zvol_path_to_name(device['source'])
else:
# raw type is always a file
device['mount_info'] = self.get_mount_info(device['source'], mntinfo)
results['virt_instance'].append(device)
return results
@private
def get_nfs_shares(self, ds, nfsshares):
nfs_shares = []
for share in nfsshares:
if share['path'] == ds['mountpoint'] or share['mount_info'].get('mount_source') == ds['id']:
nfs_shares.append({'enabled': share['enabled'], 'path': share['path']})
return nfs_shares
@private
def get_smb_shares(self, ds, smbshares):
smb_shares = []
for share in smbshares:
if share['path'] == ds['mountpoint'] or share['mount_info'].get('mount_source') == ds['id']:
smb_shares.append({
'enabled': share['enabled'],
'path': share['path'],
'share_name': share['name']
})
return smb_shares
@private
def get_iscsi_shares(self, ds, iscsishares):
iscsi_shares = []
for share in iscsishares:
if share['extent']['type'] == 'DISK' and share['extent']['path'].removeprefix('zvol/') == ds['id']:
# we store extent information prefixed with `zvol/` (i.e. zvol/tank/zvol01).
iscsi_shares.append({
'enabled': share['extent']['enabled'],
'type': 'DISK',
'path': f'/dev/{share["extent"]["path"]}',
})
elif share['extent']['type'] == 'FILE' and share['mount_info'].get('mount_source') == ds['id']:
# this isn't common but possible, you can share a "file"
# via iscsi which means it's not a dataset but a file inside
# a dataset so we need to find the source dataset for the file
iscsi_shares.append({
'enabled': share['extent']['enabled'],
'type': 'FILE',
'path': share['extent']['path'],
})
return iscsi_shares
@private
def get_repl_tasks_count(self, ds, repltasks):
count = 0
for repl in filter(lambda x: x['direction'] == 'PUSH', repltasks):
# we only care about replication tasks that are configured to push
for src_ds in filter(lambda x: x == ds['id'], repl['source_datasets']):
count += 1
return count
@private
def get_snapshot_tasks_count(self, ds, snaptasks):
return len([i for i in snaptasks if i['dataset'] == ds['id']])
@private
def get_cloudsync_tasks_count(self, ds, cldtasks):
return self._get_push_tasks_count(ds, cldtasks)
@private
def get_rsync_tasks_count(self, ds, rsynctasks):
return self._get_push_tasks_count(ds, rsynctasks)
def _get_push_tasks_count(self, ds, tasks):
count = 0
if ds['mountpoint']:
for i in filter(lambda x: x['direction'] == 'PUSH', tasks):
# we only care about cloud sync tasks that are configured to push
if pathlib.Path(ds['mountpoint']).is_relative_to(i['path']):
count += 1
return count
@private
def get_vms(self, ds, _vms):
vms = []
for i in _vms:
if (
'zvol' in i and i['zvol'] == ds['id'] or
i['attributes']['path'] == ds['mountpoint'] or
i.get('mount_info', {}).get('mount_source') == ds['id']
):
vms.append({'name': i['vm']['name'], 'path': i['attributes']['path']})
return vms
@private
def get_virt_instances(self, ds, _instances):
instances = []
for i in _instances:
if (
'zvol' in i and i['zvol'] == ds['id'] or
i['source'] == ds['mountpoint'] or
i.get('mount_info', {}).get('mount_source') == ds['id']
):
instances.append({'name': i['instance'], 'path': i['source']})
return instances
@private
def get_apps(self, ds, _apps):
apps = []
for app in _apps:
if app['path'] == ds['mountpoint'] or app['mount_info'].get('mount_source') == ds['id']:
apps.append({'name': app['name'], 'path': app['path']})
return apps
| 20,097 | Python | .py | 427 | 32.344262 | 119 | 0.482957 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,863 | replace_disk.py | truenas_middleware/src/middlewared/middlewared/plugins/pool_/replace_disk.py | import errno
from middlewared.schema import accepts, Bool, Dict, Int, returns, Str
from middlewared.service import item_method, job, Service, ValidationErrors
from middlewared.service_exception import MatchNotFound
def find_disk_from_identifier(disks, ident):
for v in disks.values():
for info in filter(lambda x: x['identifier'] == ident, v):
return info
class PoolService(Service):
@item_method
@accepts(Int('id'), Dict(
'options',
Str('label', required=True),
Str('disk', required=True),
Bool('force', default=False),
Bool('preserve_settings', default=True),
Bool('preserve_description', default=True),
))
@returns(Bool('replaced_successfully'))
@job(lock='pool_replace')
async def replace(self, job, oid, options):
"""
Replace a disk on a pool.
`label` is the ZFS guid or a device name
`disk` is the identifier of a disk
If `preserve_settings` is true, then settings (power management, S.M.A.R.T., etc.) of a disk being replaced
will be applied to a new disk.
.. examples(websocket)::
Replace missing ZFS device with disk {serial}FOO.
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "pool.replace",
"params": [1, {
"label": "80802394992848654",
"disk": "{serial}FOO"
}]
}
"""
pool = await self.middleware.call('pool.get_instance', oid)
verrors = ValidationErrors()
disk = find_disk_from_identifier(await self.middleware.call('disk.details'), options['disk'])
if disk is None:
verrors.add('options.disk', f'Disk {options["disk"]!r} not found.', errno.ENOENT)
verrors.check()
if disk['imported_zpool'] is not None:
verrors.add(
'options.disk',
f'Disk {options["disk"]!r} is in use by zpool {disk["imported_zpool"]!r}.',
errno.EBUSY
)
elif not options['force']:
msg = ' Force must be specified.'
if disk['exported_zpool'] is not None:
verrors.add(
'options.force',
f'Disk {options["disk"]!r} is associated to exported zpool {disk["exported_zpool"]!r}.{msg}'
)
elif not await self.middleware.call('disk.check_clean', disk['devname']):
verrors.add(
'options.force',
f'Disk {options["disk"]!r} is not clean, partitions were found.{msg}'
)
if not await self.middleware.call(
'pool.find_disk_from_topology', options['label'], pool, {'include_siblings': True}
):
verrors.add('options.label', f'Label {options["label"]} not found.', errno.ENOENT)
verrors.check()
vdev = []
await self.middleware.call('pool.format_disks', job, {
disk['devname']: {
'vdev': vdev,
'size': None, # pool.format_disks checks size of disk
},
}, 0, 25)
try:
job.set_progress(30, 'Replacing disk')
new_devname = vdev[0].replace('/dev/', '')
await self.middleware.call('zfs.pool.replace', pool['name'], options['label'], new_devname)
except Exception:
raise
if options['preserve_settings']:
try:
old_disk = await self.middleware.call(
'disk.query',
[['zfs_guid', '=', options['label']]],
{'extra': {'include_expired': True}, 'get': True},
)
job.set_progress(98, 'Copying old disk settings to new')
await self.middleware.call('disk.copy_settings', old_disk, disk, options['preserve_settings'],
options['preserve_description'])
except MatchNotFound:
pass
return True
| 4,152 | Python | .py | 95 | 31.357895 | 115 | 0.544307 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,864 | unlock.py | truenas_middleware/src/middlewared/middlewared/plugins/pool_/unlock.py | from middlewared.plugins.zfs_.utils import zvol_name_to_path
from middlewared.schema import Dict, returns, Str
from middlewared.service import accepts, private, Service
from .utils import dataset_mountpoint
class PoolDatasetService(Service):
class Config:
namespace = 'pool.dataset'
@accepts(Str('dataset'), roles=['DATASET_READ'])
@returns(Dict('services_to_restart', additional_attrs=True))
async def unlock_services_restart_choices(self, dataset):
"""
Get a mapping of services identifiers and labels that can be restart on dataset unlock.
"""
dataset_instance = await self.middleware.call('pool.dataset.get_instance_quick', dataset)
services = {
'cifs': 'SMB',
'ftp': 'FTP',
'iscsitarget': 'iSCSI',
'nfs': 'NFS',
}
result = {}
for k, v in services.items():
if await self.middleware.call('service.started_or_enabled', k):
result[k] = v
result.update({
k: services[k] for k in map(
lambda a: a['service'], await self.middleware.call('pool.dataset.attachments', dataset)
) if k in services
})
if await self.middleware.call('pool.dataset.unlock_restarted_vms', dataset_instance):
result['vms'] = 'Virtual Machines'
return result
@private
async def unlock_restarted_vms(self, dataset):
result = []
for vm in await self.middleware.call('vm.query', [('autostart', '=', True)]):
for device in vm['devices']:
if device['dtype'] not in ('DISK', 'RAW'):
continue
path = device['attributes'].get('path')
if not path:
continue
unlock = False
if dataset['type'] == 'FILESYSTEM' and (mountpoint := dataset_mountpoint(dataset)):
unlock = path.startswith(mountpoint + '/') or path.startswith(
zvol_name_to_path(dataset['name']) + '/'
)
elif dataset['type'] == 'VOLUME' and zvol_name_to_path(dataset['name']) == path:
unlock = True
if unlock:
result.append(vm)
break
return result
@private
async def restart_vms_after_unlock(self, dataset):
for vm in await self.middleware.call('pool.dataset.unlock_restarted_vms', dataset):
if (await self.middleware.call('vm.status', vm['id']))['state'] == 'RUNNING':
stop_job = await self.middleware.call('vm.stop', vm['id'])
await stop_job.wait()
if stop_job.error:
self.logger.error('Failed to stop %r VM: %s', vm['name'], stop_job.error)
try:
await self.middleware.call('vm.start', vm['id'])
except Exception:
self.logger.error('Failed to start %r VM after %r unlock', vm['name'], dataset['name'], exc_info=True)
| 3,073 | Python | .py | 65 | 34.969231 | 118 | 0.564985 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,865 | dataset_recordsize.py | truenas_middleware/src/middlewared/middlewared/plugins/pool_/dataset_recordsize.py | from middlewared.schema import accepts, returns, List, Str
from middlewared.service import Service
class PoolDatasetService(Service):
class Config:
namespace = 'pool.dataset'
# https://openzfs.github.io/openzfs-docs/Performance%20and%20Tuning/Module%20Parameters.html#zfs-max-recordsize
# Maximum supported (at time of writing) is 16MB.
MAPPING = [
(1 << 9, '512'),
(1 << 9, '512B'),
(1 << 10, '1K'),
(1 << 11, '2K'),
(1 << 12, '4K'),
(1 << 13, '8K'),
(1 << 14, '16K'),
(1 << 15, '32K'),
(1 << 16, '64K'),
(1 << 17, '128K'),
(1 << 18, '256K'),
(1 << 19, '512K'),
(1 << 20, '1M'),
(1 << 21, '2M'),
(1 << 22, '4M'),
(1 << 23, '8M'),
(1 << 24, '16M'),
]
@accepts(Str('pool_name', default=None, null=True), roles=['DATASET_READ'])
@returns(List(items=[Str('recordsize_value')]))
def recordsize_choices(self, pool_name):
"""
Retrieve recordsize choices for datasets.
"""
minimum_recordsize = self.MAPPING[0][0]
if pool_name and self.middleware.call_sync('pool.is_draid_pool', pool_name):
minimum_recordsize = 1 << 17 # We want minimum of 128k for dRAID pools
with open('/sys/module/zfs/parameters/zfs_max_recordsize') as f:
val = int(f.read().strip())
return [v for k, v in self.MAPPING if minimum_recordsize <= k <= val]
| 1,488 | Python | .py | 38 | 31.052632 | 115 | 0.543629 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,866 | attach_disk.py | truenas_middleware/src/middlewared/middlewared/plugins/pool_/attach_disk.py | import asyncio
from middlewared.schema import accepts, Bool, Dict, Int, returns, Str
from middlewared.service import job, Service, ValidationErrors
class PoolService(Service):
@accepts(
Int('oid'),
Dict(
'pool_attach',
Str('target_vdev', required=True),
Str('new_disk', required=True),
Bool('allow_duplicate_serials', default=False),
)
)
@returns()
@job(lock=lambda args: f'pool_attach_{args[0]}')
async def attach(self, job, oid, options):
"""
`target_vdev` is the GUID of the vdev where the disk needs to be attached. In case of STRIPED vdev, this
is the STRIPED disk GUID which will be converted to mirror. If `target_vdev` is mirror, it will be converted
into a n-way mirror.
"""
pool = await self.middleware.call('pool.get_instance', oid)
verrors = ValidationErrors()
topology = pool['topology']
topology_type = vdev = None
for i in topology:
for v in topology[i]:
if v['guid'] == options['target_vdev']:
topology_type = i
vdev = v
break
if topology_type:
break
else:
verrors.add('pool_attach.target_vdev', 'Unable to locate VDEV')
verrors.check()
if topology_type in ('cache', 'spares'):
verrors.add('pool_attach.target_vdev', f'Attaching disks to {topology_type} not allowed.')
elif topology_type == 'data':
# We would like to make sure here that we don't have inconsistent vdev types across data
if vdev['type'] not in ('DISK', 'MIRROR', 'RAIDZ1', 'RAIDZ2', 'RAIDZ3'):
verrors.add('pool_attach.target_vdev', f'Attaching disk to {vdev["type"]} vdev is not allowed.')
# Let's validate new disk now
verrors.add_child(
'pool_attach',
await self.middleware.call('disk.check_disks_availability', [options['new_disk']],
options['allow_duplicate_serials']),
)
verrors.check()
job.set_progress(3, 'Completed validation')
guid = vdev['guid'] if vdev['type'] in ['DISK', 'RAIDZ1', 'RAIDZ2', 'RAIDZ3'] else vdev['children'][0]['guid']
disks = {options['new_disk']: {'vdev': []}}
job.set_progress(5, 'Formatting disks')
await self.middleware.call('pool.format_disks', job, disks, 5, 20)
job.set_progress(22, 'Extending pool')
devname = disks[options['new_disk']]['vdev'][0]
extend_job = await self.middleware.call('zfs.pool.extend', pool['name'], None, [
{'target': guid, 'type': 'DISK', 'path': devname}
])
await extend_job.wait(raise_error=True)
if vdev['type'] in ('RAIDZ1', 'RAIDZ2', 'RAIDZ3'):
while True:
expand = await self.middleware.call('zfs.pool.expand_state', pool['name'])
if expand['state'] is None:
job.set_progress(25, 'Waiting for expansion to start')
await asyncio.sleep(1)
continue
if expand['state'] == 'FINISHED':
job.set_progress(100, '')
break
if expand['waiting_for_resilver']:
message = 'Paused for resilver or clear'
else:
message = 'Expanding'
job.set_progress(max(min(expand['percentage'], 95), 25), message)
await asyncio.sleep(10 if expand['total_secs_left'] > 60 else 1)
| 3,657 | Python | .py | 76 | 35.802632 | 118 | 0.560258 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,867 | dataset_quota.py | truenas_middleware/src/middlewared/middlewared/plugins/pool_/dataset_quota.py | from middlewared.schema import accepts, Dict, Int, List, Ref, returns, Str
from middlewared.service import item_method, Service, ValidationErrors
from middlewared.utils import filter_list
class PoolDatasetService(Service):
class Config:
namespace = 'pool.dataset'
# TODO: Document this please
@accepts(
Str('ds', required=True),
Str('quota_type', enum=['USER', 'GROUP', 'DATASET', 'PROJECT']),
Ref('query-filters'),
Ref('query-options'),
roles=['DATASET_READ']
)
@item_method
async def get_quota(self, ds, quota_type, filters, options):
"""
Return a list of the specified `quota_type` of quotas on the ZFS dataset `ds`.
Support `query-filters` and `query-options`. used_bytes may not instantly
update as space is used.
When quota_type is not DATASET, each quota entry has these fields:
`id` - the uid or gid to which the quota applies.
`name` - the user or group name to which the quota applies. Value is
null if the id in the quota cannot be resolved to a user or group. This
indicates that the user or group does not exist on the server.
`quota` - the quota size in bytes. Absent if no quota is set.
`used_bytes` - the amount of bytes the user has written to the dataset.
A value of zero means unlimited.
`obj_quota` - the number of objects that may be owned by `id`.
A value of zero means unlimited. Absent if no objquota is set.
`obj_used` - the number of objects currently owned by `id`.
Note: SMB client requests to set a quota granting no space will result
in an on-disk quota of 1 KiB.
"""
dataset = (await self.middleware.call('pool.dataset.get_instance_quick', ds))['name']
quota_list = await self.middleware.call(
'zfs.dataset.get_quota', dataset, quota_type.lower()
)
return filter_list(quota_list, filters, options)
@accepts(
Str('ds', required=True),
List('quotas', items=[
Dict(
'quota_entry',
Str('quota_type',
enum=['DATASET', 'USER', 'USEROBJ', 'GROUP', 'GROUPOBJ'],
required=True),
Str('id', required=True),
Int('quota_value', required=True, null=True),
)
], default=[{
'quota_type': 'USER',
'id': '0',
'quota_value': 0
}]),
roles=['DATASET_WRITE']
)
@returns()
@item_method
async def set_quota(self, ds, data):
"""
There are three over-arching types of quotas for ZFS datasets.
1) dataset quotas and refquotas. If a DATASET quota type is specified in
this API call, then the API acts as a wrapper for `pool.dataset.update`.
2) User and group quotas. These limit the amount of disk space consumed
by files that are owned by the specified users or groups. If the respective
"object quota" type is specfied, then the quota limits the number of objects
that may be owned by the specified user or group.
3) Project quotas. These limit the amount of disk space consumed by files
that are owned by the specified project. Project quotas are not yet implemended.
This API allows users to set multiple quotas simultaneously by submitting a
list of quotas. The list may contain all supported quota types.
`ds` the name of the target ZFS dataset.
`quotas` specifies a list of `quota_entry` entries to apply to dataset.
`quota_entry` entries have these required parameters:
`quota_type`: specifies the type of quota to apply to the dataset. Possible
values are USER, USEROBJ, GROUP, GROUPOBJ, and DATASET. USEROBJ and GROUPOBJ
quotas limit the number of objects consumed by the specified user or group.
`id`: the uid, gid, or name to which the quota applies. If quota_type is
'DATASET', then `id` must be either `QUOTA` or `REFQUOTA`.
`quota_value`: the quota size in bytes. Setting a value of `0` removes
the user or group quota.
"""
MAX_QUOTAS = 100
verrors = ValidationErrors()
if len(data) > MAX_QUOTAS:
# no reason to continue
raise ValidationErrors(
'quotas',
f'The number of user or group quotas that can be set in single API call is limited to {MAX_QUOTAS}.'
)
quotas = []
ignore = ('PROJECT', 'PROJECTOBJ') # TODO: not implemented
for i, q in filter(lambda x: x[1]['quota_type'] not in ignore, enumerate(data)):
quota_type = q['quota_type'].lower()
if q['quota_type'] == 'DATASET':
if q['id'] not in ('QUOTA', 'REFQUOTA'):
verrors.add(f'quotas.{i}.id', 'id for quota_type DATASET must be either "QUOTA" or "REFQUOTA"')
else:
xid = q['id'].lower()
if any((i.get(xid, False) for i in quotas)):
verrors.add(
f'quotas.{i}.id',
f'Setting multiple values for {xid} for quota_type DATASET is not permitted'
)
else:
if not q['quota_value']:
q['quota_value'] = 'none'
xid = None
id_type = 'user' if quota_type.startswith('user') else 'group'
if not q['id'].isdigit():
try:
xid_obj = await self.middleware.call(f'{id_type}.get_{id_type}_obj',
{f'{id_type}name': q['id']})
xid = xid_obj['pw_uid'] if id_type == 'user' else xid_obj['gr_gid']
except Exception:
self.logger.debug('Failed to convert %s [%s] to id.', id_type, q['id'], exc_info=True)
verrors.add(f'quotas.{i}.id', f'{quota_type} {q["id"]} is not valid.')
else:
xid = int(q['id'])
if xid == 0:
verrors.add(
f'quotas.{i}.id', f'Setting {quota_type} quota on {id_type[0]}id [{xid}] is not permitted'
)
quotas.append({xid: q})
verrors.check()
if quotas:
await self.middleware.call('zfs.dataset.set_quota', ds, quotas)
| 6,564 | Python | .py | 128 | 38.71875 | 116 | 0.573412 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,868 | pool.py | truenas_middleware/src/middlewared/middlewared/plugins/pool_/pool.py | import errno
import os
import middlewared.sqlalchemy as sa
from fenced.fence import ExitCode as FencedExitCodes
from middlewared.plugins.boot import BOOT_POOL_NAME_VALID
from middlewared.plugins.zfs_.validation_utils import validate_pool_name
from middlewared.schema import Bool, Dict, Int, List, Patch, Ref, Str
from middlewared.service import accepts, CallError, CRUDService, job, private, returns, ValidationErrors
from middlewared.service_exception import InstanceNotFound
from middlewared.utils.size import format_size
from middlewared.validators import Range
from .utils import (
ZFS_CHECKSUM_CHOICES, ZFS_ENCRYPTION_ALGORITHM_CHOICES, ZPOOL_CACHE_FILE, RE_DRAID_DATA_DISKS, RE_DRAID_SPARE_DISKS
)
class PoolModel(sa.Model):
__tablename__ = 'storage_volume'
id = sa.Column(sa.Integer(), primary_key=True)
vol_name = sa.Column(sa.String(120), unique=True)
vol_guid = sa.Column(sa.String(50))
class PoolService(CRUDService):
ENTRY = Dict(
'pool_entry',
Int('id', required=True),
Str('name', required=True),
Str('guid', required=True),
Str('status', required=True),
Str('path', required=True),
Dict(
'scan',
additional_attrs=True,
required=True,
null=True,
example={
'function': None,
'state': None,
'start_time': None,
'end_time': None,
'percentage': None,
'bytes_to_process': None,
'bytes_processed': None,
'bytes_issued': None,
'pause': None,
'errors': None,
'total_secs_left': None,
}
),
Dict(
'expand',
additional_attrs=True,
required=True,
null=True,
example={
'state': 'FINISHED',
'expanding_vdev': 0,
'start_time': None,
'end_time': None,
'bytes_to_reflow': 835584,
'bytes_reflowed': 978944,
'waiting_for_resilver': 0,
'total_secs_left': None,
'percentage': 85.35564853556485,
},
),
Bool('is_upgraded'),
Bool('healthy', required=True),
Bool('warning', required=True),
Str('status_code', required=True, null=True),
Str('status_detail', required=True, null=True),
Int('size', required=True, null=True),
Int('allocated', required=True, null=True),
Int('free', required=True, null=True),
Int('freeing', required=True, null=True),
Str('fragmentation', required=True, null=True),
Str('size_str', required=True, null=True),
Str('allocated_str', required=True, null=True),
Str('free_str', required=True, null=True),
Str('freeing_str', required=True, null=True),
Dict(
'autotrim',
required=True,
additional_attrs=True,
example={
'parsed': 'off',
'rawvalue': 'off',
'source': 'DEFAULT',
'value': 'off',
}
),
Dict(
'topology',
List('data', required=True),
List('log', required=True),
List('cache', required=True),
List('spare', required=True),
List('special', required=True),
List('dedup', required=True),
required=True,
null=True,
)
)
class Config:
datastore = 'storage.volume'
datastore_extend = 'pool.pool_extend'
datastore_extend_context = 'pool.pool_extend_context'
datastore_prefix = 'vol_'
event_send = False
cli_namespace = 'storage.pool'
@accepts(Str('name'))
@returns(Ref('pool_entry'))
async def get_instance_by_name(self, name):
"""
Returns pool with name `name`. If `name` is not found, Validation error is raised.
"""
pool = await self.query([['name', '=', name]])
if not pool:
raise InstanceNotFound(f'Pool {name} does not exist')
return pool[0]
@private
@accepts(Str('pool_name'))
@returns(Patch('pool_entry', 'pool_normalize', ('rm', {'name': 'id'}), ('rm', {'name': 'guid'})))
async def pool_normalize_info(self, pool_name):
"""
Returns the current state of 'pool_name' including all vdevs, properties and datasets.
Common method for `pool.pool_extend` and `boot.get_state` returning a uniform
data structure for its consumers.
"""
rv = {
'name': pool_name,
'path': '/' if pool_name in BOOT_POOL_NAME_VALID else f'/mnt/{pool_name}',
'status': 'OFFLINE',
'scan': None,
'expand': None,
'topology': None,
'healthy': False,
'warning': False,
'status_code': None,
'status_detail': None,
'size': None,
'allocated': None,
'free': None,
'freeing': None,
'fragmentation': None,
'size_str': None,
'allocated_str': None,
'free_str': None,
'freeing_str': None,
'autotrim': {
'parsed': 'off',
'rawvalue': 'off',
'source': 'DEFAULT',
'value': 'off'
},
}
if info := await self.middleware.call('zfs.pool.query', [('name', '=', pool_name)]):
info = info[0]
# `zpool.c` uses `zpool_get_state_str` to print pool status.
# This function return value is exposed as `health` property.
# `SUSPENDED` is the only differing status at the moment.
status = info['status']
if info['properties']['health']['value'] == 'SUSPENDED':
status = 'SUSPENDED'
rv.update({
'status': status,
'scan': info['scan'],
'expand': info['expand'],
'topology': await self.middleware.call('pool.transform_topology', info['groups']),
'healthy': info['healthy'],
'warning': info['warning'],
'status_code': info['status_code'],
'status_detail': info['status_detail'],
'size': info['properties']['size']['parsed'],
'allocated': info['properties']['allocated']['parsed'],
'free': info['properties']['free']['parsed'],
'freeing': info['properties']['freeing']['parsed'],
'fragmentation': info['properties']['fragmentation']['parsed'],
'size_str': info['properties']['size']['rawvalue'],
'allocated_str': info['properties']['allocated']['rawvalue'],
'free_str': info['properties']['free']['rawvalue'],
'freeing_str': info['properties']['freeing']['rawvalue'],
'autotrim': info['properties']['autotrim'],
})
return rv
@private
def pool_extend_context(self, rows, extra):
return {
"extra": extra,
}
@private
def pool_extend(self, pool, context):
if context['extra'].get('is_upgraded'):
pool['is_upgraded'] = self.middleware.call_sync('pool.is_upgraded_by_name', pool['name'])
# WebUI expects the same data as in `boot.get_state`
pool |= self.middleware.call_sync('pool.pool_normalize_info', pool['name'])
return pool
async def __convert_topology_to_vdevs(self, topology):
# Gather all disks transversing the topology so we can
# format all disks in one pass, allowing it to be performed
# in parallel if we wish to do so.
disks = {}
vdevs = []
for i in ('data', 'cache', 'log', 'special', 'dedup'):
t_vdevs = topology.get(i)
if not t_vdevs:
continue
for t_vdev in t_vdevs:
vdev_devs_list = []
vdev = {
'root': i.upper(),
'type': t_vdev['type'],
'devices': vdev_devs_list,
}
if t_vdev['type'].startswith('DRAID'):
vdev['draid_data_disks'] = t_vdev['draid_data_disks']
vdev['draid_spare_disks'] = t_vdev['draid_spare_disks']
vdevs.append(vdev)
for disk in t_vdev['disks']:
disks[disk] = {'vdev': vdev_devs_list}
if topology.get('spares'):
vdev_devs_list = []
vdevs.append({
'root': 'SPARE',
'type': 'STRIPE',
'devices': vdev_devs_list,
})
for disk in topology['spares']:
disks[disk] = {'vdev': vdev_devs_list}
return disks, vdevs
@private
async def restart_services(self):
# regenerate crontab because of scrub
await self.middleware.call('service.restart', 'cron')
async def _process_topology(self, schema_name, data, old=None):
verrors = ValidationErrors()
verrors.add_child(
schema_name,
await self._validate_topology(data, old),
)
verrors.check()
disks, vdevs = await self.__convert_topology_to_vdevs(data['topology'])
verrors.add_child(
schema_name,
await self.middleware.call('disk.check_disks_availability', list(disks),
data['allow_duplicate_serials']),
)
verrors.check()
disks_cache = dict(map(lambda x: (x['devname'], x), await self.middleware.call('disk.query')))
min_data_size = min([
disks_cache[disk]['size']
for disk in (
sum([vdev['disks'] for vdev in data['topology'].get('data', [])], []) +
(
[
device['disk']
for device in await self.middleware.call(
'pool.flatten_topology',
{'data': old['topology']['data']},
)
if device['type'] == 'DISK'
]
if old else []
)
)
if disk in disks_cache
])
for spare_disk in data['topology'].get('spares') or []:
spare_size = disks_cache[spare_disk]['size']
if spare_size < min_data_size:
verrors.add(
f'{schema_name}.topology',
f'Spare {spare_disk} ({format_size(spare_size)}) is smaller than the smallest data disk '
f'({format_size(min_data_size)})'
)
verrors.check()
return disks, vdevs
async def _validate_topology(self, data, old=None):
verrors = ValidationErrors()
def disk_to_stripe(topology_type):
"""
We need to convert the original topology to use STRIPE
instead of DISK to match the user input data
"""
rv = []
spare = None
for i in old['topology'][topology_type]:
if i['type'] == 'DISK':
if spare is None:
spare = {
'type': 'STRIPE',
'disks': [i['path']],
}
rv.append(spare)
else:
spare['disks'].append(i['path'])
else:
entry = {
'type': i['type'],
'disks': [j['type'] for j in i['children']],
}
if i['type'] == 'DRAID':
# This needs to happen because type here says draid only and we need to
# normalize it so that it reflects the parity as well i.e DRAID1, DRAID2, etc.
# sample value of name here is: draid1:1d:2c:0s-0
entry['type'] = f'{i["type"]}{i["name"][len("draid"):len("draid") + 1]}'
entry['draid_spare_disks'] = int(RE_DRAID_SPARE_DISKS.findall(i['name'])[0][1:-1])
entry['draid_data_disks'] = int(RE_DRAID_DATA_DISKS.findall(i['name'])[0][1:-1])
rv.append(entry)
return rv
for topology_type in ('data', 'special', 'dedup'):
lastdatatype = None
topology_data = list(data['topology'].get(topology_type) or [])
if old:
topology_data += disk_to_stripe(topology_type)
for i, vdev in enumerate(topology_data):
numdisks = len(vdev['disks'])
minmap = {
'STRIPE': 1,
'MIRROR': 2,
'DRAID1': 2,
'DRAID2': 3,
'DRAID3': 4,
'RAIDZ1': 3,
'RAIDZ2': 4,
'RAIDZ3': 5,
}
mindisks = minmap[vdev['type']]
if numdisks < mindisks:
verrors.add(
f'topology.{topology_type}.{i}.disks',
f'You need at least {mindisks} disk(s) for this vdev type.',
)
if vdev['type'].startswith('DRAID'):
vdev.update({
'draid_data_disks': vdev.get('draid_data_disks'),
'draid_spare_disks': vdev.get('draid_spare_disks', 0),
})
nparity = int(vdev['type'][-1:])
verrors.extend(await self.middleware.call(
'zfs.pool.validate_draid_configuration', f'{topology_type}.{i}', numdisks, nparity, vdev
))
if data['topology'].get('spare'):
verrors.add(
'topology.spare',
'Dedicated spare disks should not be used with dRAID.'
)
else:
for k in ('draid_data_disks', 'draid_spare_disks'):
if k in vdev:
verrors.add(
f'topology.{topology_type}.{i}.{k}',
'This property is only valid with dRAID vdevs.',
)
if lastdatatype and lastdatatype != vdev['type']:
verrors.add(
f'topology.{topology_type}.{i}.type',
f'You are not allowed to create a pool with different {topology_type} vdev types '
f'({lastdatatype} and {vdev["type"]}).',
)
lastdatatype = vdev['type']
for i in ('cache', 'log', 'spare'):
value = data['topology'].get(i)
if value and len(value) > 1:
verrors.add(
f'topology.{i}',
f'Only one row for the virtual device of type {i} is allowed.',
)
return verrors
@accepts(Dict(
'pool_create',
Str('name', max_length=50, required=True),
Bool('encryption', default=False),
Str('deduplication', enum=[None, 'ON', 'VERIFY', 'OFF'], default=None, null=True),
Str('checksum', enum=[None] + ZFS_CHECKSUM_CHOICES, default=None, null=True),
Dict(
'encryption_options',
Bool('generate_key', default=False),
Int('pbkdf2iters', default=350000, validators=[Range(min_=100000)]),
Str('algorithm', default='AES-256-GCM', enum=ZFS_ENCRYPTION_ALGORITHM_CHOICES),
Str('passphrase', default=None, null=True, validators=[Range(min_=8)], empty=False, private=True),
Str('key', default=None, null=True, validators=[Range(min_=64, max_=64)], private=True),
register=True
),
Dict(
'topology',
List('data', items=[
Dict(
'datavdevs',
Str('type', enum=[
'DRAID1', 'DRAID2', 'DRAID3', 'RAIDZ1', 'RAIDZ2', 'RAIDZ3', 'MIRROR', 'STRIPE'
], required=True),
List('disks', items=[Str('disk')], required=True),
Int('draid_data_disks'),
Int('draid_spare_disks'),
),
], required=True),
List('special', items=[
Dict(
'specialvdevs',
Str('type', enum=['MIRROR', 'STRIPE'], required=True),
List('disks', items=[Str('disk')], required=True),
),
]),
List('dedup', items=[
Dict(
'dedupvdevs',
Str('type', enum=['MIRROR', 'STRIPE'], required=True),
List('disks', items=[Str('disk')], required=True),
),
]),
List('cache', items=[
Dict(
'cachevdevs',
Str('type', enum=['STRIPE'], required=True),
List('disks', items=[Str('disk')], required=True),
),
]),
List('log', items=[
Dict(
'logvdevs',
Str('type', enum=['STRIPE', 'MIRROR'], required=True),
List('disks', items=[Str('disk')], required=True),
),
]),
List('spares', items=[Str('disk')]),
required=True,
),
Bool('allow_duplicate_serials', default=False),
register=True,
), audit='Pool create', audit_extended=lambda data: data['name'])
@job(lock='pool_createupdate')
async def do_create(self, job, data):
"""
Create a new ZFS Pool.
`topology` is a object which requires at least one `data` entry.
All of `data` entries (vdevs) require to be of the same type.
`deduplication` when set to ON or VERIFY makes sure that no block of data is duplicated in the pool. When
VERIFY is specified, if two blocks have similar signatures, byte to byte comparison is performed to ensure that
the blocks are identical. This should be used in special circumstances as it carries a significant overhead.
`encryption` when enabled will create an ZFS encrypted root dataset for `name` pool.
`encryption_options` specifies configuration for encryption of root dataset for `name` pool.
`encryption_options.passphrase` must be specified if encryption for root dataset is desired with a passphrase
as a key.
Otherwise a hex encoded key can be specified by providing `encryption_options.key`.
`encryption_options.generate_key` when enabled automatically generates the key to be used
for dataset encryption.
It should be noted that keys are stored by the system for automatic locking/unlocking
on import/export of encrypted datasets. If that is not desired, dataset should be created
with a passphrase as a key.
Example of `topology`:
{
"data": [
{"type": "RAIDZ1", "disks": ["da1", "da2", "da3"]}
],
"cache": [
{"type": "STRIPE", "disks": ["da4"]}
],
"log": [
{"type": "STRIPE", "disks": ["da5"]}
],
"spares": ["da6"]
}
.. examples(websocket)::
Create a pool named "tank", raidz1 with 3 disks, 1 cache disk, 1 ZIL/log disk
and 1 hot spare disk.
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "pool.create",
"params": [{
"name": "tank",
"topology": {
"data": [
{"type": "RAIDZ1", "disks": ["da1", "da2", "da3"]}
],
"cache": [
{"type": "STRIPE", "disks": ["da4"]}
],
"log": [
{"type": "RAIDZ1", "disks": ["da5"]}
],
"spares": ["da6"]
}
}]
}
"""
verrors = ValidationErrors()
if await self.middleware.call('pool.query', [('name', '=', data['name'])]):
verrors.add('pool_create.name', 'A pool with this name already exists.', errno.EEXIST)
elif not validate_pool_name(data['name']):
verrors.add('pool_create.name', 'Invalid pool name', errno.EINVAL)
if not data['topology']['data']:
verrors.add('pool_create.topology.data', 'At least one data vdev is required')
encryption_dict = await self.middleware.call(
'pool.dataset.validate_encryption_data', None, verrors, {
'enabled': data.pop('encryption'), **data.pop('encryption_options'), 'key_file': False,
}, 'pool_create.encryption_options',
)
verrors.check()
is_ha = await self.middleware.call('failover.licensed')
if is_ha and (rc := await self.middleware.call('failover.fenced.start')):
if rc == FencedExitCodes.ALREADY_RUNNING.value:
try:
await self.middleware.call('failover.fenced.signal', {'reload': True})
except Exception:
self.logger.error('Unhandled exception reloading fenced', exc_info=True)
else:
err = 'Unexpected error starting fenced'
for i in filter(lambda x: x.value == rc, FencedExitCodes):
err = i.name
raise CallError(err)
disks, vdevs = await self._process_topology('pool_create', data)
if osize := (await self.middleware.call('system.advanced.config'))['overprovision']:
if log_disks := {disk: osize
for disk in sum([vdev['disks'] for vdev in data['topology'].get('log', [])], [])}:
# will log errors if there are any so it won't crash here (this matches CORE behavior)
await (await self.middleware.call('disk.resize', log_disks, True)).wait()
await self.middleware.call('pool.format_disks', job, disks, 0, 30)
options = {
'feature@lz4_compress': 'enabled',
'altroot': '/mnt',
'cachefile': ZPOOL_CACHE_FILE,
'failmode': 'continue',
'autoexpand': 'on',
'ashift': 12,
}
fsoptions = {
'atime': 'off',
'aclmode': 'discard',
'acltype': 'posix',
'compression': 'lz4',
'aclinherit': 'passthrough',
'xattr': 'sa',
'mountpoint': f'/{data["name"]}',
**encryption_dict
}
if any(topology['type'].startswith('DRAID') for topology in data['topology']['data']):
fsoptions['recordsize'] = '1M'
dedup = data.get('deduplication')
if dedup:
fsoptions['dedup'] = dedup.lower()
if data['checksum'] is not None:
fsoptions['checksum'] = data['checksum'].lower()
cachefile_dir = os.path.dirname(ZPOOL_CACHE_FILE)
if not os.path.isdir(cachefile_dir):
os.makedirs(cachefile_dir)
pool_id = z_pool = encrypted_dataset_pk = None
try:
job.set_progress(90, 'Creating ZFS Pool')
z_pool = await self.middleware.call('zfs.pool.create', {
'name': data['name'],
'vdevs': vdevs,
'options': options,
'fsoptions': fsoptions,
})
job.set_progress(95, 'Setting pool options')
# Inherit mountpoint after create because we set mountpoint on creation
# making it a "local" source.
await self.middleware.call('zfs.dataset.update', data['name'], {
'properties': {
'mountpoint': {'source': 'INHERIT'},
},
})
await self.middleware.call('zfs.dataset.mount', data['name'])
pool = {
'name': data['name'],
'guid': z_pool['guid'],
}
pool_id = await self.middleware.call(
'datastore.insert',
'storage.volume',
pool,
{'prefix': 'vol_'},
)
encrypted_dataset_data = {
'name': data['name'], 'encryption_key': encryption_dict.get('key'),
'key_format': encryption_dict.get('keyformat')
}
encrypted_dataset_pk = await self.middleware.call(
'pool.dataset.insert_or_update_encrypted_record', encrypted_dataset_data
)
await self.middleware.call('datastore.insert', 'storage.scrub', {'volume': pool_id}, {'prefix': 'scrub_'})
except Exception as e:
# Something wrong happened, we need to rollback and destroy pool.
self.logger.debug('Pool %s failed to create with topology %s', data['name'], data['topology'])
if z_pool:
try:
await self.middleware.call('zfs.pool.delete', data['name'])
except Exception:
self.logger.warning('Failed to delete pool on pool.create rollback', exc_info=True)
if pool_id:
await self.middleware.call('datastore.delete', 'storage.volume', pool_id)
if encrypted_dataset_pk:
await self.middleware.call(
'pool.dataset.delete_encrypted_datasets_from_db', [['id', '=', encrypted_dataset_pk]]
)
raise e
# There is really no point in waiting all these services to reload so do them
# in background.
self.middleware.create_task(self.middleware.call('pool.restart_services'))
pool = await self.get_instance(pool_id)
await self.middleware.call_hook('pool.post_create', pool=pool)
await self.middleware.call_hook('pool.post_create_or_update', pool=pool)
await self.middleware.call_hook(
'dataset.post_create', {'encrypted': bool(encryption_dict), **encrypted_dataset_data}
)
self.middleware.send_event('pool.query', 'ADDED', id=pool_id, fields=pool)
return pool
@accepts(Int('id'), Patch(
'pool_create', 'pool_update',
('add', {'name': 'autotrim', 'type': 'str', 'enum': ['ON', 'OFF']}),
('rm', {'name': 'name'}),
('rm', {'name': 'encryption'}),
('rm', {'name': 'encryption_options'}),
('rm', {'name': 'deduplication'}),
('rm', {'name': 'checksum'}),
('edit', {'name': 'topology', 'method': lambda x: setattr(x, 'update', True)}),
), audit='Pool update', audit_callback=True)
@job(lock='pool_createupdate')
async def do_update(self, job, audit_callback, id_, data):
"""
Update pool of `id`, adding the new topology.
The `type` of `data` must be the same of existing vdevs.
.. examples(websocket)::
Add a new set of raidz1 to pool of id 1.
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "pool.update",
"params": [1, {
"topology": {
"data": [
{"type": "RAIDZ1", "disks": ["da7", "da8", "da9"]}
]
}
}]
}
"""
pool = await self.get_instance(id_)
audit_callback(pool['name'])
disks = vdevs = None
if 'topology' in data:
disks, vdevs = await self._process_topology('pool_update', data, pool)
if disks and vdevs:
await self.middleware.call('pool.format_disks', job, disks, 0, 80)
job.set_progress(90, 'Extending ZFS Pool')
extend_job = await self.middleware.call('zfs.pool.extend', pool['name'], vdevs)
await extend_job.wait()
if extend_job.error:
raise CallError(extend_job.error)
properties = {}
if 'autotrim' in data:
properties['autotrim'] = {'value': data['autotrim'].lower()}
if (
zfs_pool := await self.middleware.call('zfs.pool.query', [['name', '=', pool['name']]])
) and zfs_pool[0]['properties']['ashift']['source'] == 'DEFAULT':
# https://ixsystems.atlassian.net/browse/NAS-112093
properties['ashift'] = {'value': '12'}
if properties:
await self.middleware.call('zfs.pool.update', pool['name'], {'properties': properties})
pool = await self.get_instance(id_)
await self.middleware.call_hook('pool.post_create_or_update', pool=pool)
return pool
@accepts(Str('pool_name'), roles=['READONLY_ADMIN'])
@returns()
def validate_name(self, pool_name):
"""
Validates `pool_name` is a valid name for a pool.
"""
verrors = ValidationErrors()
if not validate_pool_name(pool_name):
verrors.add(
'pool_name',
'Invalid pool name (please refer to https://openzfs.github.io/openzfs-docs/'
'man/8/zpool-create.8.html#DESCRIPTION for valid rules for pool name)',
errno.EINVAL
)
verrors.check()
return True
@private
async def is_draid_pool(self, pool_name):
if pool := await self.middleware.call('zfs.pool.query', [['name', '=', pool_name]]):
if any(group['type'] == 'draid' for group in pool[0]['groups']['data']):
return True
return False
async def retaste_disks_on_standby_hook(middleware, *args, **kwargs):
if not await middleware.call('failover.licensed'):
return
try:
await middleware.call('failover.call_remote', 'disk.retaste', [], {'raise_connect_error': False})
except Exception:
middleware.logger.warning('Failed to retaste disks on standby controller', exc_info=True)
async def setup(middleware):
middleware.register_hook('pool.post_create_or_update', retaste_disks_on_standby_hook)
| 30,922 | Python | .py | 694 | 30.613833 | 119 | 0.508164 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,869 | dataset_info.py | truenas_middleware/src/middlewared/middlewared/plugins/pool_/dataset_info.py | from middlewared.schema import accepts, Dict, returns, Str
from middlewared.service import Service
from .utils import ZFS_CHECKSUM_CHOICES, ZFS_COMPRESSION_ALGORITHM_CHOICES, ZFS_ENCRYPTION_ALGORITHM_CHOICES
class PoolDatasetService(Service):
class Config:
namespace = 'pool.dataset'
@accepts(roles=['DATASET_READ'])
@returns(Dict(
*[Str(k, enum=[k]) for k in ZFS_CHECKSUM_CHOICES if k != 'OFF'],
))
async def checksum_choices(self):
"""
Retrieve checksums supported for ZFS dataset.
"""
return {v: v for v in ZFS_CHECKSUM_CHOICES if v != 'OFF'}
@accepts(roles=['DATASET_READ'])
@returns(Dict(
*[Str(k, enum=[k]) for k in ZFS_COMPRESSION_ALGORITHM_CHOICES],
))
async def compression_choices(self):
"""
Retrieve compression algorithm supported by ZFS.
"""
return {v: v for v in ZFS_COMPRESSION_ALGORITHM_CHOICES}
@accepts(roles=['DATASET_READ'])
@returns(Dict(
*[Str(k, enum=[k]) for k in ZFS_ENCRYPTION_ALGORITHM_CHOICES],
))
async def encryption_algorithm_choices(self):
"""
Retrieve encryption algorithms supported for ZFS dataset encryption.
"""
return {v: v for v in ZFS_ENCRYPTION_ALGORITHM_CHOICES}
@accepts(Str('pool'), roles=['DATASET_READ'])
@returns(Str())
async def recommended_zvol_blocksize(self, pool):
"""
Helper method to get recommended size for a new zvol (dataset of type VOLUME).
.. examples(websocket)::
Get blocksize for pool "tank".
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "pool.dataset.recommended_zvol_blocksize",
"params": ["tank"]
}
"""
pool = await self.middleware.call('pool.query', [['name', '=', pool]], {'get': True})
"""
Cheatsheat for blocksizes is as follows:
2w/3w mirror = 16K
3wZ1, 4wZ2, 5wZ3 = 16K
4w/5wZ1, 5w/6wZ2, 6w/7wZ3 = 32K
6w/7w/8w/9wZ1, 7w/8w/9w/10wZ2, 8w/9w/10w/11wZ3 = 64K
10w+Z1, 11w+Z2, 12w+Z3 = 128K
If the zpool was forcefully created with mismatched
vdev geometry (i.e. 3wZ1 and a 5wZ1) then we calculate
the blocksize based on the largest vdev of the zpool.
"""
maxdisks = 1
for vdev in pool['topology']['data']:
if vdev['type'] == 'RAIDZ1':
disks = len(vdev['children']) - 1
elif vdev['type'] == 'RAIDZ2':
disks = len(vdev['children']) - 2
elif vdev['type'] == 'RAIDZ3':
disks = len(vdev['children']) - 3
elif vdev['type'] == 'MIRROR':
disks = maxdisks
else:
disks = len(vdev['children'])
if disks > maxdisks:
maxdisks = disks
return f'{max(16, min(128, 2 ** ((maxdisks * 8) - 1).bit_length()))}K'
| 3,040 | Python | .py | 75 | 31.04 | 108 | 0.57661 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,870 | export.py | truenas_middleware/src/middlewared/middlewared/plugins/pool_/export.py | import errno
import os
import shutil
from middlewared.schema import accepts, Bool, Dict, Int, returns
from middlewared.service import CallError, item_method, job, private, Service, ValidationError
from middlewared.utils.asyncio_ import asyncio_map
class PoolService(Service):
class Config:
cli_namespace = 'storage.pool'
event_send = False
@private
def cleanup_after_export(self, poolinfo, opts):
try:
if all((opts['destroy'], opts['cascade'])) and (contents := os.listdir(poolinfo['path'])):
if len(contents) == 1 and contents[0] in ('ix-applications', 'ix-apps'):
# This means:
# 1. zpool was destroyed (disks were wiped)
# 2. end-user chose to delete all share configuration associated
# to said zpool
# 3. somehow ix-applications was the only top-level directory that
# got left behind
#
# Since all 3 above are true, then we just need to remove this directory
# so we don't leave dangling directory(ies) in /mnt.
# (i.e. it'll leave something like /mnt/tank/ix-application/blah)
shutil.rmtree(poolinfo['path'])
else:
# remove top-level directory for zpool (i.e. /mnt/tank (ONLY if it's empty))
os.rmdir(poolinfo['path'])
except FileNotFoundError:
# means the pool was exported and the path where the
# root dataset (zpool) was mounted was removed
return
except Exception:
self.logger.warning('Failed to remove remaining directories after export', exc_info=True)
@item_method
@accepts(
Int('id'),
Dict(
'options',
Bool('cascade', default=False),
Bool('restart_services', default=False),
Bool('destroy', default=False),
),
)
@returns()
@job(lock='pool_export')
async def export(self, job, oid, options):
"""
Export pool of `id`.
`cascade` will delete all attachments of the given pool (`pool.attachments`).
`restart_services` will restart services that have open files on given pool.
`destroy` will also PERMANENTLY destroy the pool/data.
.. examples(websocket)::
Export pool of id 1.
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "pool.export,
"params": [1, {
"cascade": true,
"destroy": false
}]
}
If this is an HA system and failover is enabled and the last zpool is
exported/disconnected, then this will raise EOPNOTSUPP. Failover must
be disabled before exporting the last zpool on the system.
"""
pool = await self.middleware.call('pool.get_instance', oid)
root_ds = await self.middleware.call('pool.dataset.query', [['id', '=', pool['name']]])
if root_ds and root_ds[0]['locked'] and os.path.exists(root_ds[0]['mountpoint']):
# We should be removing immutable flag in this case if the path exists
await self.middleware.call('filesystem.set_immutable', False, root_ds[0]['mountpoint'])
pool_count = await self.middleware.call('pool.query', [], {'count': True})
if pool_count == 1 and await self.middleware.call('failover.licensed'):
if not (await self.middleware.call('failover.config'))['disabled']:
raise CallError('Disable failover before exporting last pool on system.', errno.EOPNOTSUPP)
enable_on_import_key = f'pool:{pool["name"]}:enable_on_import'
enable_on_import = {}
if not options['cascade']:
if await self.middleware.call('keyvalue.has_key', enable_on_import_key):
enable_on_import = await self.middleware.call('keyvalue.get', enable_on_import_key)
for i, delegate in enumerate(await self.middleware.call('pool.dataset.get_attachment_delegates')):
job.set_progress(
i, f'{"Deleting" if options["cascade"] else "Disabling"} pool attachments: {delegate.title}')
attachments = await delegate.query(pool['path'], True)
if attachments:
if options["cascade"]:
await delegate.delete(attachments)
else:
await delegate.toggle(attachments, False)
enable_on_import[delegate.name] = list(
set(enable_on_import.get(delegate.name, [])) |
{attachment['id'] for attachment in attachments}
)
if enable_on_import:
await self.middleware.call('keyvalue.set', enable_on_import_key, enable_on_import)
else:
await self.middleware.call('keyvalue.delete', enable_on_import_key)
job.set_progress(20, 'Terminating processes that are using this pool')
try:
await self.middleware.call('pool.dataset.kill_processes', pool['name'],
options.get('restart_services', False))
except ValidationError as e:
if e.errno == errno.ENOENT:
# Dataset might not exist (e.g. pool is not decrypted), this is not an error
pass
else:
raise
await self.middleware.call('iscsi.global.terminate_luns_for_pool', pool['name'])
job.set_progress(30, 'Running pre-export actions')
disks = await self.middleware.call('pool.get_disks', oid)
await self.middleware.call_hook('pool.pre_export', pool=pool['name'], options=options, job=job)
if pool['status'] == 'OFFLINE':
# Pool exists only in database, it's not imported
pass
elif options['destroy']:
job.set_progress(60, 'Destroying pool')
await self.middleware.call('zfs.pool.delete', pool['name'])
disks_to_clean = disks + [
vdev['disk']
for vdev in await self.middleware.call('pool.flatten_topology', pool['topology'])
if vdev['type'] == 'DISK' and vdev['disk'] is not None and vdev['disk'] not in disks
]
async def unlabel(disk):
wipe_job = await self.middleware.call(
'disk.wipe', disk, 'QUICK', False
)
await wipe_job.wait()
if wipe_job.error:
self.logger.warning('Failed to wipe disk %r: {%r}', disk, wipe_job.error)
job.set_progress(80, 'Cleaning disks')
await asyncio_map(unlabel, disks_to_clean, limit=16)
if await self.middleware.call('failover.licensed'):
try:
await self.middleware.call('failover.call_remote', 'disk.retaste', [],
{'raise_connect_error': False})
except Exception:
self.logger.warning('Failed to retaste disks on standby controller', exc_info=True)
job.set_progress(85, 'Syncing disk changes')
djob = await self.middleware.call('disk.sync_all')
await djob.wait()
if djob.error:
self.logger.warning('Failed syncing all disks: %r', djob.error)
else:
job.set_progress(80, 'Exporting pool')
await self.middleware.call('zfs.pool.export', pool['name'])
job.set_progress(90, 'Cleaning up after export')
await self.middleware.run_in_thread(self.cleanup_after_export, pool, options)
await self.middleware.call('datastore.delete', 'storage.volume', oid)
await self.middleware.call(
'pool.dataset.delete_encrypted_datasets_from_db',
[['OR', [['name', '=', pool['name']], ['name', '^', f'{pool["name"]}/']]]],
)
await self.middleware.call_hook('dataset.post_delete', pool['name'])
# scrub needs to be regenerated in crontab
await self.middleware.call('service.restart', 'cron')
await self.middleware.call_hook('pool.post_export', pool=pool['name'], options=options)
self.middleware.send_event('pool.query', 'REMOVED', id=oid)
| 8,470 | Python | .py | 160 | 39.725 | 109 | 0.581159 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,871 | resilver.py | truenas_middleware/src/middlewared/middlewared/plugins/pool_/resilver.py | from datetime import time
import middlewared.sqlalchemy as sa
from middlewared.schema import Bool, Dict, Int, List, Time
from middlewared.service import ConfigService, private, ValidationErrors
from middlewared.validators import Range
class PoolResilverModel(sa.Model):
__tablename__ = 'storage_resilver'
id = sa.Column(sa.Integer(), primary_key=True)
enabled = sa.Column(sa.Boolean(), default=True)
begin = sa.Column(sa.Time(), default=time(hour=18))
end = sa.Column(sa.Time(), default=time(hour=9))
weekday = sa.Column(sa.String(120), default='1,2,3,4,5,6,7')
class PoolResilverService(ConfigService):
class Config:
namespace = 'pool.resilver'
datastore = 'storage.resilver'
datastore_extend = 'pool.resilver.resilver_extend'
cli_namespace = 'storage.resilver'
ENTRY = Dict(
'pool_resilver_entry',
Int('id', required=True),
Time('begin', required=True),
Time('end', required=True),
Bool('enabled', required=True),
List('weekday', required=True, items=[Int('weekday', validators=[Range(min_=1, max_=7)])])
)
@private
async def resilver_extend(self, data):
data['begin'] = data['begin'].strftime('%H:%M')
data['end'] = data['end'].strftime('%H:%M')
data['weekday'] = [int(v) for v in data['weekday'].split(',') if v]
return data
@private
async def validate_fields_and_update(self, data, schema):
verrors = ValidationErrors()
weekdays = data.get('weekday')
if not weekdays:
verrors.add(
f'{schema}.weekday',
'At least one weekday should be selected'
)
else:
data['weekday'] = ','.join([str(day) for day in weekdays])
return verrors, data
async def do_update(self, data):
"""
Configure Pool Resilver Priority.
If `begin` time is greater than `end` time it means it will rollover the day, e.g.
begin = "19:00", end = "05:00" will increase pool resilver priority from 19:00 of one day
until 05:00 of the next day.
`weekday` follows crontab(5) values 0-7 (0 or 7 is Sun).
.. examples(websocket)::
Enable pool resilver priority all business days from 7PM to 5AM.
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "pool.resilver.update",
"params": [{
"enabled": true,
"begin": "19:00",
"end": "05:00",
"weekday": [1, 2, 3, 4, 5]
}]
}
"""
config = await self.config()
original_config = config.copy()
config.update(data)
verrors, new_config = await self.validate_fields_and_update(config, 'pool_resilver_update')
verrors.check()
# before checking if any changes have been made, original_config needs to be mapped to new_config
original_config['weekday'] = ','.join([str(day) for day in original_config['weekday']])
original_config['begin'] = time(*(int(value) for value in original_config['begin'].split(':')))
original_config['end'] = time(*(int(value) for value in original_config['end'].split(':')))
if len(set(original_config.items()) ^ set(new_config.items())) > 0:
# data has changed
await self.middleware.call(
'datastore.update',
self._config.datastore,
new_config['id'],
new_config
)
await self.middleware.call('service.restart', 'cron')
await self.middleware.call('pool.configure_resilver_priority')
return await self.config()
async def setup(middleware):
middleware.create_task(middleware.call('pool.configure_resilver_priority'))
| 3,959 | Python | .py | 88 | 35.045455 | 105 | 0.596724 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,872 | dataset_encryption_lock.py | truenas_middleware/src/middlewared/middlewared/plugins/pool_/dataset_encryption_lock.py | import asyncio
import errno
import os
import shutil
import uuid
from collections import defaultdict
from datetime import datetime
from pathlib import Path
from middlewared.schema import accepts, Bool, Dict, List, returns, Str
from middlewared.service import CallError, job, private, Service, ValidationErrors
from middlewared.utils.filesystem.directory import directory_is_empty
from middlewared.validators import Range
from .utils import dataset_mountpoint, dataset_can_be_mounted, retrieve_keys_from_file, ZFSKeyFormat
class PoolDatasetService(Service):
class Config:
namespace = 'pool.dataset'
@accepts(
Str('id'),
Dict(
'lock_options',
Bool('force_umount', default=False),
)
)
@returns(Bool('locked'))
@job(lock=lambda args: 'dataset_lock')
async def lock(self, job, id_, options):
"""
Locks `id` dataset. It will unmount the dataset and its children before locking.
After the dataset has been unmounted, system will set immutable flag on the dataset's mountpoint where
the dataset was mounted before it was locked making sure that the path cannot be modified. Once the dataset
is unlocked, it will not be affected by this change and consumers can continue consuming it.
"""
ds = await self.middleware.call('pool.dataset.get_instance_quick', id_, {
'encryption': True,
})
if not ds['encrypted']:
raise CallError(f'{id_} is not encrypted')
elif ds['locked']:
raise CallError(f'Dataset {id_} is already locked')
elif ZFSKeyFormat(ds['key_format']['value']) != ZFSKeyFormat.PASSPHRASE:
raise CallError('Only datasets which are encrypted with passphrase can be locked')
elif id_ != ds['encryption_root']:
raise CallError(f'Please lock {ds["encryption_root"]}. Only encryption roots can be locked.')
mountpoint = dataset_mountpoint(ds)
async def detach(delegate):
if mountpoint:
await delegate.stop(await delegate.query(mountpoint, True))
try:
await self.middleware.call('cache.put', 'about_to_lock_dataset', id_)
# Invalidate locked datasets cache if something got locked
await self.middleware.call('cache.pop', 'zfs_locked_datasets')
coroutines = [detach(dg) for dg in await self.middleware.call('pool.dataset.get_attachment_delegates')]
await asyncio.gather(*coroutines)
await self.middleware.call(
'zfs.dataset.unload_key', id_, {
'umount': True, 'force_umount': options['force_umount'], 'recursive': True
}
)
finally:
await self.middleware.call('cache.pop', 'about_to_lock_dataset')
if ds['mountpoint']:
await self.middleware.call('filesystem.set_immutable', True, ds['mountpoint'])
await self.middleware.call_hook('dataset.post_lock', id_)
return True
@accepts(
Str('id'),
Dict(
'unlock_options',
Bool('force', default=False),
Bool('key_file', default=False),
Bool('recursive', default=False),
Bool('toggle_attachments', default=True),
List(
'datasets', items=[
Dict(
'dataset',
Bool('force', required=True, default=False),
Str('name', required=True, empty=False),
Str('key', validators=[Range(min_=64, max_=64)], private=True),
Str('passphrase', empty=False, private=True),
Bool('recursive', default=False),
)
],
),
)
)
@returns(Dict(
List('unlocked', items=[Str('dataset')], required=True),
Dict(
'failed',
required=True,
additional_attrs=True,
example={'vol1/enc': {'error': 'Invalid Key', 'skipped': []}},
),
))
@job(lock=lambda args: f'dataset_unlock_{args[0]}', pipes=['input'], check_pipes=False)
def unlock(self, job, id_, options):
"""
Unlock dataset `id` (and its children if `unlock_options.recursive` is `true`).
If `id` dataset is not encrypted an exception will be raised. There is one exception:
when `id` is a root dataset and `unlock_options.recursive` is specified, encryption
validation will not be performed for `id`. This allow unlocking encrypted children for the entire pool `id`.
There are two ways to supply the key(s)/passphrase(s) for unlocking a dataset:
1. Upload a json file which contains encrypted dataset keys (it will be read from the input pipe if
`unlock_options.key_file` is `true`). The format is the one that is used for exporting encrypted dataset keys
(`pool.export_keys`).
2. Specify a key or a passphrase for each unlocked dataset using `unlock_options.datasets`.
If `unlock_options.datasets.{i}.recursive` is `true`, a key or a passphrase is applied to all the encrypted
children of a dataset.
`unlock_options.toggle_attachments` controls whether attachments should be put in action after unlocking
dataset(s). Toggling attachments can theoretically lead to service interruption when daemons configurations are
reloaded (this should not happen, and if this happens it should be considered a bug). As TrueNAS does not have
a state for resources that should be unlocked but are still locked, disabling this option will put the system
into an inconsistent state so it should really never be disabled.
In some cases it's possible that the provided key/passphrase is valid but the path where the dataset is
supposed to be mounted after being unlocked already exists and is not empty. In this case, unlock operation
would fail. This can be overridden by setting `unlock_options.datasets.X.force` boolean flag or by setting
`unlock_options.force` flag. When any of these flags are set, system will rename the existing
directory/file path where the dataset should be mounted resulting in successful unlock of the dataset.
"""
verrors = ValidationErrors()
dataset = self.middleware.call_sync('pool.dataset.get_instance', id_)
keys_supplied = {}
if options['key_file']:
keys_supplied = retrieve_keys_from_file(job)
for i, ds in enumerate(options['datasets']):
if all(ds.get(k) for k in ('key', 'passphrase')):
verrors.add(
f'unlock_options.datasets.{i}.dataset.key',
f'Must not be specified when passphrase for {ds["name"]} is supplied'
)
elif not any(ds.get(k) for k in ('key', 'passphrase')):
verrors.add(
f'unlock_options.datasets.{i}.dataset',
f'Passphrase or key must be specified for {ds["name"]}'
)
if not options['force'] and not ds['force']:
if self.middleware.call_sync(
'pool.dataset.get_instance_quick', ds['name'], {'encryption': True}
)['locked']:
# We are only concerned to do validation here if the dataset is locked
if err := dataset_can_be_mounted(ds['name'], os.path.join('/mnt', ds['name'])):
verrors.add(f'unlock_options.datasets.{i}.force', err)
keys_supplied[ds['name']] = ds.get('key') or ds.get('passphrase')
if '/' in id_ or not options['recursive']:
if not dataset['locked']:
verrors.add('id', f'{id_} dataset is not locked')
elif dataset['encryption_root'] != id_:
verrors.add('id', 'Only encryption roots can be unlocked')
else:
if not bool(
self.middleware.call_sync('pool.dataset.query_encrypted_roots_keys', [['name', '=', id_]])
) and id_ not in keys_supplied:
verrors.add('unlock_options.datasets', f'Please specify key for {id_}')
verrors.check()
locked_datasets = []
datasets = self.middleware.call_sync(
'pool.dataset.query_encrypted_datasets', id_.split('/', 1)[0], {'key_loaded': False}
)
self._assign_supplied_recursive_keys(options['datasets'], keys_supplied, list(datasets.keys()))
for name, ds in datasets.items():
ds_key = keys_supplied.get(name) or ds['encryption_key']
if ds['locked'] and id_.startswith(f'{name}/'):
# This ensures that `id` has locked parents and they should be unlocked first
locked_datasets.append(name)
elif ZFSKeyFormat(ds['key_format']['value']) == ZFSKeyFormat.RAW and ds_key:
# This is hex encoded right now - we want to change it back to raw
try:
ds_key = bytes.fromhex(ds_key)
except ValueError:
ds_key = None
datasets[name] = {'key': ds_key, **ds}
if locked_datasets:
raise CallError(f'{id_} has locked parents {",".join(locked_datasets)} which must be unlocked first')
failed = defaultdict(lambda: dict({'error': None, 'skipped': []}))
unlocked = []
names = sorted(
filter(
lambda n: n and f'{n}/'.startswith(f'{id_}/') and datasets[n]['locked'],
(datasets if options['recursive'] else [id_])
),
key=lambda v: v.count('/')
)
for name_i, name in enumerate(names):
skip = False
for i in range(name.count('/') + 1):
check = name.rsplit('/', i)[0]
if check in failed:
failed[check]['skipped'].append(name)
skip = True
break
if skip:
continue
if not datasets[name]['key']:
failed[name]['error'] = 'Missing key'
continue
job.set_progress(int(name_i / len(names) * 90 + 0.5), f'Unlocking {name!r}')
try:
self.middleware.call_sync(
'zfs.dataset.load_key', name, {'key': datasets[name]['key'], 'mount': False}
)
except CallError as e:
failed[name]['error'] = 'Invalid Key' if 'incorrect key provided' in str(e).lower() else str(e)
else:
# Before we mount the dataset in question, we should ensure that the path where it will be mounted
# is not already being used by some other service/share. In this case, we should simply rename the
# directory where it will be mounted
mount_path = os.path.join('/mnt', name)
if os.path.exists(mount_path):
try:
self.middleware.call_sync('filesystem.set_immutable', False, mount_path)
except OSError as e:
# It's ok to get `EROFS` because the dataset can have `readonly=on`
if e.errno != errno.EROFS:
raise
except Exception as e:
failed[name]['error'] = (
f'Dataset mount failed because immutable flag at {mount_path!r} could not be removed: {e}'
)
continue
if not os.path.isdir(mount_path) or not directory_is_empty(mount_path):
# rename please
shutil.move(mount_path, f'{mount_path}-{str(uuid.uuid4())[:4]}-{datetime.now().isoformat()}')
try:
self.middleware.call_sync('zfs.dataset.mount', name, {'recursive': True})
except CallError as e:
failed[name]['error'] = f'Failed to mount dataset: {e}'
else:
unlocked.append(name)
try:
self.middleware.call_sync('filesystem.set_immutable', False, mount_path)
except Exception:
pass
for failed_ds in failed:
failed_datasets = {}
for ds in [failed_ds] + failed[failed_ds]['skipped']:
mount_path = os.path.join('/mnt', ds)
if os.path.exists(mount_path):
try:
self.middleware.call_sync('filesystem.set_immutable', True, mount_path)
except OSError as e:
# It's ok to get `EROFS` because the dataset can have `readonly=on`
if e.errno != errno.EROFS:
raise
except Exception as e:
failed_datasets[ds] = str(e)
if failed_datasets:
failed[failed_ds]['error'] += '\n\nFailed to set immutable flag on following datasets:\n' + '\n'.join(
f'{i + 1}) {ds!r}: {failed_datasets[ds]}' for i, ds in enumerate(failed_datasets)
)
if unlocked:
# Invalidate locked datasets cache if something got unlocked
self.middleware.call_sync('cache.pop', 'zfs_locked_datasets')
if options['toggle_attachments']:
job.set_progress(91, 'Handling attachments')
self.middleware.call_sync('pool.dataset.unlock_handle_attachments', dataset)
job.set_progress(92, 'Updating database')
def dataset_data(unlocked_dataset):
return {
'encryption_key': keys_supplied.get(unlocked_dataset), 'name': unlocked_dataset,
'key_format': datasets[unlocked_dataset]['key_format']['value'],
}
for unlocked_dataset in filter(lambda d: d in keys_supplied, unlocked):
self.middleware.call_sync(
'pool.dataset.insert_or_update_encrypted_record', dataset_data(unlocked_dataset)
)
job.set_progress(94, 'Running post-unlock tasks')
self.middleware.call_hook_sync(
'dataset.post_unlock', datasets=[dataset_data(ds) for ds in unlocked],
)
return {'unlocked': unlocked, 'failed': failed}
def _assign_supplied_recursive_keys(self, request_datasets, keys_supplied, queried_datasets):
request_datasets = {ds['name']: ds for ds in request_datasets}
for name in queried_datasets:
if name not in keys_supplied:
for parent in Path(name).parents:
parent = str(parent)
if parent in request_datasets and request_datasets[parent]['recursive']:
if parent in keys_supplied:
keys_supplied[name] = keys_supplied[parent]
break
@private
async def unlock_handle_attachments(self, dataset):
mountpoint = dataset_mountpoint(dataset)
for attachment_delegate in await self.middleware.call('pool.dataset.get_attachment_delegates'):
# FIXME: put this into `VMFSAttachmentDelegate`
if attachment_delegate.name == 'vm':
await self.middleware.call('pool.dataset.restart_vms_after_unlock', dataset)
continue
if mountpoint:
if attachments := await attachment_delegate.query(mountpoint, True, {'locked': False}):
await attachment_delegate.start(attachments)
| 15,862 | Python | .py | 292 | 40.263699 | 119 | 0.577302 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,873 | dataset_encryption_info.py | truenas_middleware/src/middlewared/middlewared/plugins/pool_/dataset_encryption_info.py | import collections
import contextlib
import errno
import json
import os
import shutil
from io import BytesIO
from middlewared.schema import accepts, Bool, Dict, Int, List, Ref, returns, Str
from middlewared.service import CallError, job, periodic, private, Service, ValidationErrors
from middlewared.utils import filter_list
from middlewared.utils.path import is_child_realpath
from middlewared.validators import Range
from .utils import DATASET_DATABASE_MODEL_NAME, dataset_can_be_mounted, retrieve_keys_from_file, ZFSKeyFormat
class PoolDatasetService(Service):
class Config:
namespace = 'pool.dataset'
@private
async def locked_datasets_cached(self):
try:
return await self.middleware.call('cache.get', 'zfs_locked_datasets')
except KeyError:
locked_datasets = await self.middleware.call('zfs.dataset.locked_datasets')
if await self.middleware.call('system.ready'):
# Only cache if the system is ready
await self.middleware.call('cache.put', 'zfs_locked_datasets', locked_datasets, 20)
return locked_datasets
@accepts(
Str('id'),
Dict(
'encryption_root_summary_options',
Bool('key_file', default=False),
Bool('force', default=False),
List(
'datasets', items=[
Dict(
'dataset',
Bool('force', required=True, default=False),
Str('name', required=True, empty=False),
Str('key', validators=[Range(min_=64, max_=64)], private=True),
Str('passphrase', empty=False, private=True),
)
],
),
),
roles=['DATASET_READ']
)
@returns(List(items=[Dict(
'dataset_encryption_summary',
Str('name', required=True),
Str('key_format', required=True),
Bool('key_present_in_database', required=True),
Bool('valid_key', required=True),
Bool('locked', required=True),
Str('unlock_error', required=True, null=True),
Bool('unlock_successful', required=True),
)]))
@job(lock=lambda args: f'encryption_summary_options_{args[0]}', pipes=['input'], check_pipes=False)
def encryption_summary(self, job, id_, options):
"""
Retrieve summary of all encrypted roots under `id`.
Keys/passphrase can be supplied to check if the keys are valid.
It should be noted that there are 2 keys which show if a recursive unlock operation is
done for `id`, which dataset will be unlocked and if not why it won't be unlocked. The keys
namely are "unlock_successful" and "unlock_error". The former is a boolean value showing if unlock
would succeed/fail. The latter is description why it failed if it failed.
In some cases it's possible that the provided key/passphrase is valid but the path where the dataset is
supposed to be mounted after being unlocked already exists and is not empty. In this case, unlock operation
would fail and `unlock_error` will reflect this error appropriately. This can be overridden by setting
`encryption_root_summary_options.datasets.X.force` boolean flag or by setting
`encryption_root_summary_options.force` flag. In practice, when the dataset is going to be unlocked
and these flags have been provided to `pool.dataset.unlock`, system will rename the directory/file path
where the dataset should be mounted resulting in successful unlock of the dataset.
If a dataset is already unlocked, it will show up as true for "unlock_successful" regardless of what
key user provided as the unlock keys in the output are to reflect what a real unlock operation would
behave. If user is interested in seeing if a provided key is valid or not, then the key to look out for
in the output is "valid_key" which based on what system has in database or if a user provided one, validates
the key and sets a boolean value for the dataset.
Example output:
[
{
"name": "vol",
"key_format": "PASSPHRASE",
"key_present_in_database": false,
"valid_key": true,
"locked": true,
"unlock_error": null,
"unlock_successful": true
},
{
"name": "vol/c1/d1",
"key_format": "PASSPHRASE",
"key_present_in_database": false,
"valid_key": false,
"locked": true,
"unlock_error": "Provided key is invalid",
"unlock_successful": false
},
{
"name": "vol/c",
"key_format": "PASSPHRASE",
"key_present_in_database": false,
"valid_key": false,
"locked": true,
"unlock_error": "Key not provided",
"unlock_successful": false
},
{
"name": "vol/c/d2",
"key_format": "PASSPHRASE",
"key_present_in_database": false,
"valid_key": false,
"locked": true,
"unlock_error": "Child cannot be unlocked when parent \"vol/c\" is locked and provided key is invalid",
"unlock_successful": false
}
]
"""
keys_supplied = {}
verrors = ValidationErrors()
if options['key_file']:
keys_supplied = {k: {'key': v, 'force': False} for k, v in retrieve_keys_from_file(job).items()}
for i, ds in enumerate(options['datasets']):
if all(ds.get(k) for k in ('key', 'passphrase')):
verrors.add(
f'unlock_options.datasets.{i}.dataset.key',
f'Must not be specified when passphrase for {ds["name"]} is supplied'
)
keys_supplied[ds['name']] = {
'key': ds.get('key') or ds.get('passphrase'),
'force': ds['force'],
}
verrors.check()
datasets = self.query_encrypted_datasets(id_, {'all': True})
to_check = []
for name, ds in datasets.items():
ds_key = keys_supplied.get(name, {}).get('key') or ds['encryption_key']
if ZFSKeyFormat(ds['key_format']['value']) == ZFSKeyFormat.RAW and ds_key:
with contextlib.suppress(ValueError):
ds_key = bytes.fromhex(ds_key)
to_check.append((name, {'key': ds_key}))
check_job = self.middleware.call_sync('zfs.dataset.bulk_process', 'check_key', to_check)
check_job.wait_sync()
if check_job.error:
raise CallError(f'Failed to retrieve encryption summary for {id_}: {check_job.error}')
results = []
for ds_data, status in zip(to_check, check_job.result):
ds_name = ds_data[0]
data = datasets[ds_name]
results.append({
'name': ds_name,
'key_format': ZFSKeyFormat(data['key_format']['value']).value,
'key_present_in_database': bool(data['encryption_key']),
'valid_key': bool(status['result']), 'locked': data['locked'],
'unlock_error': None,
'unlock_successful': False,
})
failed = set()
for ds in sorted(results, key=lambda d: d['name'].count('/')):
for i in range(1, ds['name'].count('/') + 1):
check = ds['name'].rsplit('/', i)[0]
if check in failed:
failed.add(ds['name'])
ds['unlock_error'] = f'Child cannot be unlocked when parent "{check}" is locked'
if ds['locked'] and not options['force'] and not keys_supplied.get(ds['name'], {}).get('force'):
err = dataset_can_be_mounted(ds['name'], os.path.join('/mnt', ds['name']))
if ds['unlock_error'] and err:
ds['unlock_error'] += f' and {err}'
elif err:
ds['unlock_error'] = err
if ds['valid_key']:
ds['unlock_successful'] = not bool(ds['unlock_error'])
elif not ds['locked']:
# For datasets which are already not locked, unlock operation for them
# will succeed as they are not locked
ds['unlock_successful'] = True
else:
key_provided = ds['name'] in keys_supplied or ds['key_present_in_database']
if key_provided:
if ds['unlock_error']:
if ds['name'] in keys_supplied or ds['key_present_in_database']:
ds['unlock_error'] += ' and provided key is invalid'
else:
ds['unlock_error'] = 'Provided key is invalid'
elif not ds['unlock_error']:
ds['unlock_error'] = 'Key not provided'
failed.add(ds['name'])
return results
@periodic(86400)
@private
@job(lock=lambda args: f'sync_encrypted_pool_dataset_keys_{args}')
def sync_db_keys(self, job, name=None):
if not self.middleware.call_sync('failover.is_single_master_node'):
# We don't want to do this for passive controller
return
filters = [['OR', [['name', '=', name], ['name', '^', f'{name}/']]]] if name else []
# It is possible we have a pool configured but for some mistake/reason the pool did not import like
# during repair disks were not plugged in and system was booted, in such cases we would like to not
# remove the encryption keys from the database.
for root_ds in {pool['name'] for pool in self.middleware.call_sync('pool.query')} - {
ds['id'] for ds in self.middleware.call_sync(
'pool.dataset.query', [], {'extra': {'retrieve_children': False, 'properties': []}}
)
}:
filters.extend([['name', '!=', root_ds], ['name', '!^', f'{root_ds}/']])
db_datasets = self.query_encrypted_roots_keys(filters)
encrypted_roots = {
d['name']: d for d in self.middleware.call_sync(
'pool.dataset.query', filters, {'extra': {'properties': ['encryptionroot']}}
) if d['name'] == d['encryption_root']
}
to_remove = []
check_key_job = self.middleware.call_sync('zfs.dataset.bulk_process', 'check_key', [
(name, {'key': db_datasets[name]}) for name in db_datasets
])
check_key_job.wait_sync()
if check_key_job.error:
self.logger.error(f'Failed to sync database keys: {check_key_job.error}')
return
for dataset, status in zip(db_datasets, check_key_job.result):
if not status['result']:
to_remove.append(dataset)
elif status['error']:
if dataset not in encrypted_roots:
to_remove.append(dataset)
else:
self.logger.error(f'Failed to check encryption status for {dataset}: {status["error"]}')
self.middleware.call_sync('pool.dataset.delete_encrypted_datasets_from_db', [['name', 'in', to_remove]])
@private
def path_in_locked_datasets(self, path, locked_datasets=None):
if locked_datasets is None:
locked_datasets = self.middleware.call_sync('zfs.dataset.locked_datasets')
return any(is_child_realpath(path, d['mountpoint']) for d in locked_datasets if d['mountpoint'])
@private
@accepts(Ref('query-filters'))
def query_encrypted_roots_keys(self, filters):
# We query database first - if we are able to find an encryption key, we assume it's the correct one.
# If we are unable to find the key in database, we see if we have it in memory with the KMIP server, if not,
# there are 2 ways this can go, we don't retrieve the key or the user can sync KMIP keys and we will have it
# with the KMIP service again through which we can retrieve them
datasets = filter_list(self.middleware.call_sync('datastore.query', DATASET_DATABASE_MODEL_NAME), filters)
zfs_keys = self.middleware.call_sync('kmip.retrieve_zfs_keys')
keys = {}
for ds in datasets:
if ds['encryption_key']:
keys[ds['name']] = ds['encryption_key']
elif ds['name'] in zfs_keys:
keys[ds['name']] = zfs_keys[ds['name']]
return keys
@private
def query_encrypted_datasets(self, name, options=None):
# Common function to retrieve encrypted datasets
options = options or {}
key_loaded = options.get('key_loaded', True)
db_results = self.query_encrypted_roots_keys([['OR', [['name', '=', name], ['name', '^', f'{name}/']]]])
def normalize(ds):
passphrase = ZFSKeyFormat(ds['key_format']['value']) == ZFSKeyFormat.PASSPHRASE
key = db_results.get(ds['name']) if not passphrase else None
return ds['name'], {'encryption_key': key, **ds}
def check_key(ds):
return options.get('all') or (ds['key_loaded'] and key_loaded) or (not ds['key_loaded'] and not key_loaded)
return dict(map(
normalize,
filter(
lambda d: (
d['name'] == d['encryption_root'] and d['encrypted'] and f'{d["name"]}/'.startswith(
f'{name}/'
) and check_key(d)
),
self.middleware.call_sync('pool.dataset.query')
)
))
@accepts(Str('id'), roles=['DATASET_WRITE', 'REPLICATION_TASK_WRITE'])
@returns()
@job(lock='dataset_export_keys', pipes=['output'])
def export_keys(self, job, id_):
"""
Export keys for `id` and its children which are stored in the system. The exported file is a JSON file
which has a dictionary containing dataset names as keys and their keys as the value.
Please refer to websocket documentation for downloading the file.
"""
self.middleware.call_sync('pool.dataset.get_instance_quick', id_)
sync_job = self.middleware.call_sync('pool.dataset.sync_db_keys', id_)
sync_job.wait_sync()
datasets = self.query_encrypted_roots_keys([['OR', [['name', '=', id_], ['name', '^', f'{id_}/']]]])
with BytesIO(json.dumps(datasets).encode()) as f:
shutil.copyfileobj(f, job.pipes.output.w)
@accepts(Int('id'), roles=['DATASET_WRITE', 'REPLICATION_TASK_WRITE'])
@returns()
@job(pipes=['output'])
def export_keys_for_replication(self, job, task_id):
"""
Export keys for replication task `id` for source dataset(s) which are stored in the system. The exported file
is a JSON file which has a dictionary containing dataset names as keys and their keys as the value.
Please refer to websocket documentation for downloading the file.
"""
task = self.middleware.call_sync('replication.get_instance', task_id)
datasets = self.middleware.call_sync('pool.dataset.export_keys_for_replication_internal', task)
job.pipes.output.w.write(json.dumps(datasets).encode())
@private
async def export_keys_for_replication_internal(
self, replication_task_or_id, dataset_encryption_root_mapping=None, skip_syncing_db_keys=False,
):
if isinstance(replication_task_or_id, int):
task = await self.middleware.call('replication.get_instance', replication_task_or_id)
else:
task = replication_task_or_id
if task['direction'] != 'PUSH':
raise CallError('Only push replication tasks are supported.', errno.EINVAL)
if not skip_syncing_db_keys:
await (await self.middleware.call(
'core.bulk', 'pool.dataset.sync_db_keys', [[source] for source in task['source_datasets']]
)).wait()
mapping = {}
for source_ds in task['source_datasets']:
source_ds_details = await self.middleware.call('pool.dataset.query', [['id', '=', source_ds]], {'extra': {
'properties': ['encryptionroot'],
'retrieve_children': False,
}})
if source_ds_details and source_ds_details[0]['encryption_root'] != source_ds:
filters = ['name', '=', source_ds_details[0]['encryption_root']]
else:
if task['recursive']:
filters = ['OR', [['name', '=', source_ds], ['name', '^', f'{source_ds}/']]]
else:
filters = ['name', '=', source_ds]
mapping[source_ds] = await self.middleware.call('pool.dataset.query_encrypted_roots_keys', [filters])
# We have 3 cases to deal with
# 1. There are no encrypted datasets in source dataset, so let's just skip in that case
# 2. There is only 1 source dataset, in this case the destination dataset will be overwritten as is, so we
# generate mapping accordingly. For example if source is `tank/enc` and destination is `dest/enc`, in
# the destination system `dest/enc` will reflect `tank/enc` so we reflect that accordingly in the mapping
# 3. There are multiple source datasets, in this case they will become child of destination dataset
if not any(mapping.values()):
return {}
result = {}
include_encryption_root_children = not task['replicate'] and task['recursive']
target_ds = task['target_dataset']
source_mapping = await self.middleware.call(
'zettarepl.get_source_target_datasets_mapping', task['source_datasets'], target_ds
)
if include_encryption_root_children:
dataset_mapping = dataset_encryption_root_mapping or await self.dataset_encryption_root_mapping()
else:
dataset_mapping = {}
for source_ds in task['source_datasets']:
for ds_name, key in mapping[source_ds].items():
for dataset in (dataset_mapping[ds_name] if include_encryption_root_children else [{'id': ds_name}]):
result[dataset['id'].replace(
source_ds if len(source_ds) <= len(dataset['id']) else dataset['id'],
source_mapping[source_ds], 1
)] = key
return result
@private
async def dataset_encryption_root_mapping(self):
dataset_encryption_root_mapping = collections.defaultdict(list)
for dataset in await self.middleware.call(
'pool.dataset.query', [], {'extra': {'properties': ['encryptionroot']}}
):
dataset_encryption_root_mapping[dataset['encryption_root']].append(dataset)
return dataset_encryption_root_mapping
@accepts(
Str('id'),
Bool('download', default=False),
)
@returns(Str('key', null=True, private=True))
@job(lock='dataset_export_keys', pipes=['output'], check_pipes=False)
def export_key(self, job, id_, download):
"""
Export own encryption key for dataset `id`. If `download` is `true`, key will be downloaded in a json file
where the same file can be used to unlock the dataset, otherwise it will be returned as string.
Please refer to websocket documentation for downloading the file.
"""
if download:
job.check_pipe('output')
self.middleware.call_sync('pool.dataset.get_instance_quick', id_)
keys = self.query_encrypted_roots_keys([['name', '=', id_]])
if id_ not in keys:
raise CallError('Specified dataset does not have it\'s own encryption key.', errno.EINVAL)
key = keys[id_]
if download:
job.pipes.output.w.write(json.dumps({id_: key}).encode())
else:
return key
| 20,145 | Python | .py | 383 | 40.707572 | 119 | 0.587964 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,874 | scrub.py | truenas_middleware/src/middlewared/middlewared/plugins/pool_/scrub.py | from datetime import datetime
import asyncio
import re
import shlex
import middlewared.sqlalchemy as sa
from middlewared.schema import accepts, Bool, Cron, Dict, Int, Patch, returns, Str
from middlewared.service import CallError, CRUDService, job, private, ValidationErrors
from middlewared.utils import run
from middlewared.validators import Range
RE_HISTORY_ZPOOL_SCRUB_CREATE = re.compile(r'^([0-9\.\:\-]{19})\s+(py-libzfs: )?zpool (scrub|create)', re.MULTILINE)
class ScrubError(CallError):
pass
class PoolScrubModel(sa.Model):
__tablename__ = 'storage_scrub'
id = sa.Column(sa.Integer(), primary_key=True)
scrub_volume_id = sa.Column(sa.Integer(), sa.ForeignKey('storage_volume.id', ondelete='CASCADE'))
scrub_threshold = sa.Column(sa.Integer(), default=35)
scrub_description = sa.Column(sa.String(200))
scrub_minute = sa.Column(sa.String(100), default='00')
scrub_hour = sa.Column(sa.String(100), default='00')
scrub_daymonth = sa.Column(sa.String(100), default='*')
scrub_month = sa.Column(sa.String(100), default='*')
scrub_dayweek = sa.Column(sa.String(100), default='7')
scrub_enabled = sa.Column(sa.Boolean(), default=True)
class PoolScrubService(CRUDService):
class Config:
datastore = 'storage.scrub'
datastore_extend = 'pool.scrub.pool_scrub_extend'
datastore_prefix = 'scrub_'
namespace = 'pool.scrub'
cli_namespace = 'storage.scrub'
role_prefix = 'POOL_SCRUB'
ENTRY = Dict(
'pool_scrub_entry',
Int('pool', validators=[Range(min_=1)], required=True),
Int('threshold', validators=[Range(min_=0)], required=True),
Str('description', required=True),
Cron(
'schedule',
defaults={
'minute': '00',
'hour': '00',
'dow': '7'
},
required=True,
),
Bool('enabled', default=True, required=True),
Int('id', required=True),
Str('pool_name', required=True),
register=True
)
@private
async def pool_scrub_extend(self, data):
pool = data.pop('volume')
data['pool'] = pool['id']
data['pool_name'] = pool['vol_name']
Cron.convert_db_format_to_schedule(data)
return data
@private
async def validate_data(self, data, schema):
verrors = ValidationErrors()
pool_pk = data.get('pool')
if pool_pk:
pool_obj = await self.middleware.call(
'datastore.query',
'storage.volume',
[('id', '=', pool_pk)]
)
if len(pool_obj) == 0:
verrors.add(
f'{schema}.pool',
'The specified volume does not exist'
)
elif (
'id' not in data.keys() or
(
'id' in data.keys() and
'original_pool_id' in data.keys() and
pool_pk != data['original_pool_id']
)
):
scrub_obj = await self.query(filters=[('pool', '=', pool_pk)])
if len(scrub_obj) != 0:
verrors.add(
f'{schema}.pool',
'A scrub with this pool already exists'
)
return verrors, data
@accepts(
Patch(
'pool_scrub_entry', 'pool_scrub_entry',
('rm', {'name': 'id'}),
('rm', {'name': 'pool_name'}),
('edit', {'name': 'threshold', 'method': lambda x: setattr(x, 'required', False)}),
('edit', {'name': 'schedule', 'method': lambda x: setattr(x, 'required', False)}),
('edit', {'name': 'description', 'method': lambda x: setattr(x, 'required', False)}),
)
)
async def do_create(self, data):
"""
Create a scrub task for a pool.
`threshold` refers to the minimum amount of time in days has to be passed before
a scrub can run again.
.. examples(websocket)::
Create a scrub task for pool of id 1, to run every sunday but with a threshold of
35 days.
The check will run at 3AM every sunday.
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "pool.scrub.create"
"params": [{
"pool": 1,
"threshold": 35,
"description": "Monthly scrub for tank",
"schedule": "0 3 * * 7",
"enabled": true
}]
}
"""
verrors, data = await self.validate_data(data, 'pool_scrub_create')
verrors.check()
data['volume'] = data.pop('pool')
Cron.convert_schedule_to_db_format(data)
data['id'] = await self.middleware.call(
'datastore.insert',
self._config.datastore,
data,
{'prefix': self._config.datastore_prefix}
)
await self.middleware.call('service.restart', 'cron')
return await self.get_instance(data['id'])
async def do_update(self, id_, data):
"""
Update scrub task of `id`.
"""
task_data = await self.get_instance(id_)
original_data = task_data.copy()
task_data['original_pool_id'] = original_data['pool']
task_data.update(data)
verrors, task_data = await self.validate_data(task_data, 'pool_scrub_update')
verrors.check()
task_data.pop('original_pool_id')
Cron.convert_schedule_to_db_format(task_data)
Cron.convert_schedule_to_db_format(original_data)
if len(set(task_data.items()) ^ set(original_data.items())) > 0:
task_data['volume'] = task_data.pop('pool')
task_data.pop('pool_name', None)
await self.middleware.call(
'datastore.update',
self._config.datastore,
id_,
task_data,
{'prefix': self._config.datastore_prefix}
)
await self.middleware.call('service.restart', 'cron')
return await self.get_instance(id_)
@accepts(Int('id'))
async def do_delete(self, id_):
"""
Delete scrub task of `id`.
"""
response = await self.middleware.call(
'datastore.delete',
self._config.datastore,
id_
)
await self.middleware.call('service.restart', 'cron')
return response
@accepts(
Str('name', required=True),
Str('action', enum=['START', 'STOP', 'PAUSE'], default='START')
)
@returns()
@job(
description=lambda name, action="START": (
f"Scrub of pool {name!r}" if action == "START"
else f"{action.title()} scrubbing pool {name!r}"
),
lock=lambda i: f'{i[0]}-{i[1] if len(i) >= 2 else "START"}',
)
async def scrub(self, job, name, action):
"""
Start/Stop/Pause a scrub on pool `name`.
"""
await self.middleware.call('zfs.pool.scrub_action', name, action)
if action == 'START':
while True:
scrub = await self.middleware.call('zfs.pool.scrub_state', name)
if scrub['pause']:
job.set_progress(100, 'Scrub paused')
break
if scrub['function'] != 'SCRUB':
break
if scrub['state'] == 'FINISHED':
job.set_progress(100, 'Scrub finished')
break
if scrub['state'] == 'CANCELED':
break
if scrub['state'] == 'SCANNING':
job.set_progress(scrub['percentage'], 'Scrubbing')
await asyncio.sleep(1)
@accepts(Str('name'), Int('threshold', default=35))
@returns()
async def run(self, name, threshold):
"""
Initiate a scrub of a pool `name` if last scrub was performed more than `threshold` days before.
"""
await self.middleware.call('alert.oneshot_delete', 'ScrubNotStarted', name)
await self.middleware.call('alert.oneshot_delete', 'ScrubStarted', name)
try:
started = await self.__run(name, threshold)
except ScrubError as e:
await self.middleware.call('alert.oneshot_create', 'ScrubNotStarted', {
'pool': name,
'text': e.errmsg,
})
else:
if started:
await self.middleware.call('alert.oneshot_create', 'ScrubStarted', name)
async def __run(self, name, threshold):
if name == await self.middleware.call('boot.pool_name'):
pool = await self.middleware.call('zfs.pool.query', [['name', '=', name]], {'get': True})
else:
if await self.middleware.call('failover.licensed'):
if await self.middleware.call('failover.status') == 'BACKUP':
return
pool = await self.middleware.call('pool.query', [['name', '=', name]], {'get': True})
if pool['status'] == 'OFFLINE':
raise ScrubError(f'Pool {name} is offline, not running scrub')
if pool['scan']['state'] == 'SCANNING':
return False
last_scrubs = (await run(
'sh', '-c', f'zpool history {shlex.quote(name)} | grep -E "zpool (scrub|create|import)"',
encoding='utf-8',
errors='ignore',
)).stdout
for match in reversed(list(RE_HISTORY_ZPOOL_SCRUB_CREATE.finditer(last_scrubs))):
last_scrub = datetime.strptime(match.group(1), '%Y-%m-%d.%H:%M:%S')
break
else:
self.logger.warning("Could not find last scrub of pool %r", name)
last_scrub = datetime.min
if (datetime.now() - last_scrub).total_seconds() < (threshold - 1) * 86400:
self.logger.debug('Pool %r last scrub %r', name, last_scrub)
return False
await self.middleware.call('pool.scrub.scrub', pool['name'])
return True
| 10,273 | Python | .py | 249 | 29.971888 | 116 | 0.543267 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,875 | format_disks.py | truenas_middleware/src/middlewared/middlewared/plugins/pool_/format_disks.py | from middlewared.service import private, Service
from middlewared.utils.asyncio_ import asyncio_map
class PoolService(Service):
@private
async def format_disks(self, job, disks, base_percentage=0, upper_percentage=100):
"""
Format all disks, putting all ZFS partitions created into their respective vdevs.
"""
await self.middleware.call('disk.sed_unlock_all')
formatted = 0
len_disks = len(disks)
current_percentage = base_percentage
single_disk_percentage = (upper_percentage - base_percentage) / len_disks
async def format_disk(arg):
nonlocal formatted, current_percentage
disk, config = arg
await self.middleware.call('disk.format', disk)
formatted += 1
current_percentage += single_disk_percentage
job.set_progress(current_percentage, f'Formatting disks ({formatted}/{len_disks})')
await asyncio_map(format_disk, disks.items(), limit=16)
disk_sync_job = await self.middleware.call('disk.sync_all')
await disk_sync_job.wait(raise_error=True)
zfs_part_type = await self.middleware.call('disk.get_zfs_part_type')
for disk, config in disks.items():
devname = await self.middleware.call('disk.gptid_from_part_type', disk, zfs_part_type)
config['vdev'].append(f'/dev/{devname}')
| 1,401 | Python | .py | 27 | 42.592593 | 98 | 0.663982 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,876 | user_props.py | truenas_middleware/src/middlewared/middlewared/plugins/pool_/user_props.py | from middlewared.schema import Dict, Str
from middlewared.service import accepts, CRUDService, filterable, ValidationErrors
from middlewared.utils import filter_list
from middlewared.validators import Match
class PoolDatasetUserPropService(CRUDService):
class Config:
datastore_primary_key_type = 'string'
namespace = 'pool.dataset.userprop'
cli_namespace = 'storage.dataset.user_prop'
ENTRY = Dict(
'pool_dataset_userprop_entry',
Str('id', required=True),
Dict('properties', additional_attrs=True, required=True),
)
@filterable
def query(self, filters, options):
"""
Query all user properties for ZFS datasets.
"""
return filter_list(
[
{k: d[k] for k in ('id', 'properties')} for d in
(self.middleware.call_sync('zfs.dataset.query', [], {
'extra': {'user_properties': True, 'properties': []}
}))
], filters, options
)
async def __common_validation(self, dataset, data, schema, update=False):
verrors = ValidationErrors()
exists = data['name'] in dataset['properties']
if (exists and not update) or (not exists and update):
if update:
msg = f'{data["name"]} does not exist in {dataset["id"]} user properties'
else:
msg = f'{data["name"]} exists in {dataset["id"]} user properties'
verrors.add(f'{schema}.property.name', msg)
return verrors
@accepts(
Dict(
'dataset_user_prop_create',
Str('id', required=True, empty=False),
Dict(
'property',
Str('name', required=True, validators=[Match(r'.*:.*')]),
Str('value', required=True),
)
)
)
async def do_create(self, data):
"""
Create a user property for a given `id` dataset.
"""
dataset = await self.get_instance(data['id'])
verrors = await self.__common_validation(dataset, data['property'], 'dataset_user_prop_create')
verrors.check()
await self.middleware.call(
'zfs.dataset.update', data['id'], {
'properties': {data['property']['name']: {'value': data['property']['value']}}
}
)
return await self.get_instance(data['id'])
@accepts(
Str('id'),
Dict(
'dataset_user_prop_update',
Str('name', required=True),
Str('value', required=True),
)
)
async def do_update(self, id_, data):
"""
Update `dataset_user_prop_update.name` user property for `id` dataset.
"""
dataset = await self.get_instance(id_)
verrors = await self.__common_validation(dataset, data, 'dataset_user_prop_update', True)
verrors.check()
await self.middleware.call(
'zfs.dataset.update', id_, {
'properties': {data['name']: {'value': data['value']}}
}
)
return await self.get_instance(id_)
@accepts(
Str('id'),
Dict(
'dataset_user_prop_delete',
Str('name', required=True),
)
)
async def do_delete(self, id_, options):
"""
Delete user property `dataset_user_prop_delete.name` for `id` dataset.
"""
dataset = await self.get_instance(id_)
verrors = await self.__common_validation(dataset, options, 'dataset_user_prop_delete', True)
verrors.check()
await self.middleware.call(
'zfs.dataset.update', id_, {
'properties': {options['name']: {'source': 'INHERIT'}}
}
)
return True
| 3,797 | Python | .py | 102 | 27.294118 | 103 | 0.557065 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,877 | utils.py | truenas_middleware/src/middlewared/middlewared/plugins/pool_/utils.py | import enum
import itertools
import json
import os
import re
from pathlib import Path
from middlewared.plugins.zfs_.utils import TNUserProp
from middlewared.service_exception import CallError
from middlewared.utils.size import MB
from middlewared.utils.filesystem.directory import directory_is_empty
DATASET_DATABASE_MODEL_NAME = 'storage.encrypteddataset'
RE_DRAID_DATA_DISKS = re.compile(r':\d*d')
RE_DRAID_SPARE_DISKS = re.compile(r':\d*s')
RE_DRAID_NAME = re.compile(r'draid\d:\d+d:\d+c:\d+s-\d+')
ZFS_CHECKSUM_CHOICES = ['ON', 'OFF', 'FLETCHER2', 'FLETCHER4', 'SHA256', 'SHA512', 'SKEIN', 'EDONR', 'BLAKE3']
ZFS_COMPRESSION_ALGORITHM_CHOICES = [
'ON', 'OFF', 'LZ4', 'GZIP', 'GZIP-1', 'GZIP-9', 'ZSTD', 'ZSTD-FAST', 'ZLE', 'LZJB',
] + [f'ZSTD-{i}' for i in range(1, 20)] + [
f'ZSTD-FAST-{i}' for i in itertools.chain(range(1, 11), range(20, 110, 10), range(500, 1500, 500))
]
ZFS_ENCRYPTION_ALGORITHM_CHOICES = [
'AES-128-CCM', 'AES-192-CCM', 'AES-256-CCM', 'AES-128-GCM', 'AES-192-GCM', 'AES-256-GCM'
]
ZFS_MAX_DATASET_NAME_LEN = 200 # It's really 256, but we should leave some space for snapshot names
ZFS_VOLUME_BLOCK_SIZE_CHOICES = {
'512': 512,
'512B': 512,
'1K': 1024,
'2K': 2048,
'4K': 4096,
'8K': 8192,
'16K': 16384,
'32K': 32768,
'64K': 65536,
'128K': 131072,
}
ZPOOL_CACHE_FILE = '/data/zfs/zpool.cache'
ZPOOL_KILLCACHE = '/data/zfs/killcache'
def none_normalize(x):
if x in (0, None):
return 'none'
return x
def _null(x):
if x == 'none':
return None
return x
def dataset_mountpoint(dataset):
if dataset['mountpoint'] == 'legacy':
return None
return dataset['mountpoint'] or os.path.join('/mnt', dataset['name'])
def dataset_can_be_mounted(ds_name, ds_mountpoint):
mount_error_check = ''
if os.path.isfile(ds_mountpoint):
mount_error_check = f'A file exists at {ds_mountpoint!r} and {ds_name} cannot be mounted'
elif os.path.isdir(ds_mountpoint) and not directory_is_empty(ds_mountpoint):
mount_error_check = f'{ds_mountpoint!r} directory is not empty'
mount_error_check += (
' (please provide "force" flag to override this error and file/directory '
'will be renamed once the dataset is unlocked)' if mount_error_check else ''
)
return mount_error_check
def get_props_of_interest_mapping():
return [
(TNUserProp.DESCRIPTION.value, 'comments', None),
(TNUserProp.QUOTA_WARN.value, 'quota_warning', None),
(TNUserProp.QUOTA_CRIT.value, 'quota_critical', None),
(TNUserProp.REFQUOTA_WARN.value, 'refquota_warning', None),
(TNUserProp.REFQUOTA_CRIT.value, 'refquota_critical', None),
(TNUserProp.MANAGED_BY.value, 'managedby', None),
('dedup', 'deduplication', str.upper),
('mountpoint', None, _null),
('aclmode', None, str.upper),
('acltype', None, str.upper),
('xattr', None, str.upper),
('atime', None, str.upper),
('casesensitivity', None, str.upper),
('checksum', None, str.upper),
('exec', None, str.upper),
('sync', None, str.upper),
('compression', None, str.upper),
('compressratio', None, None),
('origin', None, None),
('quota', None, _null),
('refquota', None, _null),
('reservation', None, _null),
('refreservation', None, _null),
('copies', None, None),
('snapdir', None, str.upper),
('readonly', None, str.upper),
('recordsize', None, None),
('sparse', None, None),
('volsize', None, None),
('volblocksize', None, None),
('keyformat', 'key_format', lambda o: o.upper() if o != 'none' else None),
('encryption', 'encryption_algorithm', lambda o: o.upper() if o != 'off' else None),
('used', None, None),
('usedbychildren', None, None),
('usedbydataset', None, None),
('usedbyrefreservation', None, None),
('usedbysnapshots', None, None),
('available', None, None),
('special_small_blocks', 'special_small_block_size', None),
('pbkdf2iters', None, None),
('creation', None, None),
('snapdev', None, str.upper),
]
def retrieve_keys_from_file(job):
job.check_pipe('input')
try:
data = json.loads(job.pipes.input.r.read(10 * MB))
except json.JSONDecodeError:
raise CallError('Input file must be a valid JSON file')
if not isinstance(data, dict) or any(not isinstance(v, str) for v in data.values()):
raise CallError('Please specify correct format for input file')
return data
def get_dataset_parents(dataset: str) -> list:
return [parent.as_posix() for parent in Path(dataset).parents][:-1]
class ZFSKeyFormat(enum.Enum):
HEX = 'HEX'
PASSPHRASE = 'PASSPHRASE'
RAW = 'RAW'
| 4,877 | Python | .py | 121 | 34.487603 | 110 | 0.631735 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,878 | pool_disk_operations.py | truenas_middleware/src/middlewared/middlewared/plugins/pool_/pool_disk_operations.py | import asyncio
import errno
import itertools
from middlewared.schema import accepts, Bool, Dict, Int, returns, Str
from middlewared.service import CallError, item_method, job, Service, ValidationErrors
class PoolService(Service):
class Config:
cli_namespace = 'storage.pool'
event_send = False
@item_method
@accepts(Int('id'), Dict(
'options',
Str('label', required=True),
))
@returns(Bool('detached'))
async def detach(self, oid, options):
"""
Detach a disk from pool of id `id`.
`label` is the vdev guid or device name.
.. examples(websocket)::
Detach ZFS device.
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "pool.detach,
"params": [1, {
"label": "80802394992848654"
}]
}
"""
pool = await self.middleware.call('pool.get_instance', oid)
verrors = ValidationErrors()
found = await self.middleware.call('pool.find_disk_from_topology', options['label'], pool)
if not found:
verrors.add('options.label', f'Label {options["label"]} not found on this pool.')
verrors.check()
disk = await self.middleware.call(
'disk.label_to_disk', found[1]['path'].replace('/dev/', '')
)
await self.middleware.call('zfs.pool.detach', pool['name'], found[1]['guid'])
if disk:
wipe_job = await self.middleware.call('disk.wipe', disk, 'QUICK')
await wipe_job.wait()
if wipe_job.error:
raise CallError(f'Failed to wipe disk {disk}: {wipe_job.error}')
return True
@item_method
@accepts(Int('id'), Dict(
'options',
Str('label', required=True),
))
@returns(Bool('offline_successful'))
async def offline(self, oid, options):
"""
Offline a disk from pool of id `id`.
`label` is the vdev guid or device name.
.. examples(websocket)::
Offline ZFS device.
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "pool.offline,
"params": [1, {
"label": "80802394992848654"
}]
}
"""
pool = await self.middleware.call('pool.get_instance', oid)
verrors = ValidationErrors()
found = await self.middleware.call('pool.find_disk_from_topology', options['label'], pool)
if not found:
verrors.add('options.label', f'Label {options["label"]} not found on this pool.')
verrors.check()
await self.middleware.call('zfs.pool.offline', pool['name'], found[1]['guid'])
return True
@item_method
@accepts(Int('id'), Dict(
'options',
Str('label', required=True),
))
@returns(Bool('online_successful'))
async def online(self, oid, options):
"""
Online a disk from pool of id `id`.
`label` is the vdev guid or device name.
.. examples(websocket)::
Online ZFS device.
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "pool.online,
"params": [1, {
"label": "80802394992848654"
}]
}
"""
pool = await self.middleware.call('pool.get_instance', oid)
verrors = ValidationErrors()
found = await self.middleware.call('pool.find_disk_from_topology', options['label'], pool)
if not found:
verrors.add('options.label', f'Label {options["label"]} not found on this pool.')
verrors.check()
await self.middleware.call('zfs.pool.online', pool['name'], found[1]['guid'])
return True
@item_method
@accepts(Int('id'), Dict(
'options',
Str('label', required=True),
))
@returns()
@job(lock=lambda args: f'{args[0]}_remove')
async def remove(self, job, oid, options):
"""
Remove a disk from pool of id `id`.
`label` is the vdev guid or device name.
Error codes:
EZFS_NOSPC(2032): out of space to remove a device
EZFS_NODEVICE(2017): no such device in pool
EZFS_NOREPLICAS(2019): no valid replicas
.. examples(websocket)::
Remove ZFS device.
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "pool.remove,
"params": [1, {
"label": "80802394992848654"
}]
}
"""
pool = await self.middleware.call('pool.get_instance', oid)
verrors = ValidationErrors()
found = await self.middleware.call('pool.find_disk_from_topology', options['label'], pool, {
'include_top_level_vdev': True,
})
if not found:
verrors.add('options.label', f'Label {options["label"]} not found on this pool.')
verrors.check()
job.set_progress(20, f'Initiating removal of {options["label"]!r} ZFS device')
await self.middleware.call('zfs.pool.remove', pool['name'], found[1]['guid'])
job.set_progress(40, 'Waiting for removal of ZFS device to complete')
# We would like to wait for the removal to actually complete for cases where the removal might not
# be synchronous like removing top level vdevs except for slog and l2arc
await self.middleware.call('zfs.pool.wait', pool['name'], {'activity_type': 'REMOVE'})
job.set_progress(60, 'Removal of ZFS device complete')
if found[1]['type'] != 'DISK':
disk_paths = [d['path'] for d in found[1]['children']]
else:
disk_paths = [found[1]['path']]
job.set_progress(70, 'Wiping disks')
disks_to_wipe = set()
for disk_path in disk_paths:
disk = await self.middleware.call(
'disk.label_to_disk', disk_path.replace('/dev/', '')
)
if disk:
disks_to_wipe.add(disk)
max_retries = 30
disks_errors = {}
for retry in itertools.count(1):
wipe_jobs = []
for disk in disks_to_wipe:
wipe_job = await self.middleware.call('disk.wipe', disk, 'QUICK', False)
wipe_jobs.append((disk, wipe_job))
disks_errors = {}
for disk, wipe_job in wipe_jobs:
try:
await wipe_job.wait(raise_error=True, raise_error_forward_classes=(OSError,))
except OSError as e:
if not (e.errno == errno.EBUSY and retry < max_retries):
# Sometimes we get this error even after `zfs.pool.wait` confirms the successful device removal
raise
except Exception as e:
disks_errors[disk] = str(e)
disks_to_wipe.remove(disk)
else:
disks_to_wipe.remove(disk)
if not disks_to_wipe or disks_errors:
break
await asyncio.sleep(1)
if disks_errors:
disks_errors = '\n'.join(sorted({f'{disk}: {error}' for disk, error in disks_errors.items()}))
raise CallError(f'Failed to wipe disks:\n{disks_errors}')
job.set_progress(100, 'Successfully completed wiping disks')
| 7,739 | Python | .py | 188 | 29.771277 | 119 | 0.549793 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,879 | dataset_encryption_operations.py | truenas_middleware/src/middlewared/middlewared/plugins/pool_/dataset_encryption_operations.py | import secrets
from middlewared.schema import accepts, Any, Bool, Dict, Int, returns, Str
from middlewared.service import CallError, job, private, Service, ValidationErrors
from middlewared.validators import Range
from .utils import DATASET_DATABASE_MODEL_NAME, ZFSKeyFormat
class PoolDatasetService(Service):
class Config:
namespace = 'pool.dataset'
@private
@accepts(
Dict(
'dataset_db_create',
Any('encryption_key', null=True, default=None),
Int('id', default=None, null=True),
Str('name', required=True, empty=False),
Str('key_format', required=True, null=True),
)
)
async def insert_or_update_encrypted_record(self, data):
key_format = data.pop('key_format') or ZFSKeyFormat.PASSPHRASE.value
if not data['encryption_key'] or ZFSKeyFormat(key_format.upper()) == ZFSKeyFormat.PASSPHRASE:
# We do not want to save passphrase keys - they are only known to the user
return
ds_id = data.pop('id')
ds = await self.middleware.call(
'datastore.query', DATASET_DATABASE_MODEL_NAME,
[['id', '=', ds_id]] if ds_id else [['name', '=', data['name']]]
)
data['encryption_key'] = data['encryption_key']
pk = ds[0]['id'] if ds else None
if ds:
await self.middleware.call(
'datastore.update',
DATASET_DATABASE_MODEL_NAME,
ds[0]['id'], data
)
else:
pk = await self.middleware.call(
'datastore.insert',
DATASET_DATABASE_MODEL_NAME,
data
)
kmip_config = await self.middleware.call('kmip.config')
if kmip_config['enabled'] and kmip_config['manage_zfs_keys']:
await self.middleware.call('kmip.sync_zfs_keys', [pk])
return pk
@private
async def delete_encrypted_datasets_from_db(self, filters):
datasets = await self.middleware.call('datastore.query', DATASET_DATABASE_MODEL_NAME, filters)
for ds in datasets:
if ds['kmip_uid']:
self.middleware.create_task(self.middleware.call('kmip.reset_zfs_key', ds['name'], ds['kmip_uid']))
await self.middleware.call('datastore.delete', DATASET_DATABASE_MODEL_NAME, ds['id'])
@private
def validate_encryption_data(self, job, verrors, encryption_dict, schema):
opts = {}
if not encryption_dict['enabled']:
return opts
key = encryption_dict['key']
passphrase = encryption_dict['passphrase']
passphrase_key_format = bool(encryption_dict['passphrase'])
if passphrase_key_format:
for f in filter(lambda k: encryption_dict[k], ('key', 'key_file', 'generate_key')):
verrors.add(f'{schema}.{f}', 'Must be disabled when dataset is to be encrypted with passphrase.')
else:
provided_opts = [k for k in ('key', 'key_file', 'generate_key') if encryption_dict[k]]
if not provided_opts:
verrors.add(
f'{schema}.key',
'Please provide a key or select generate_key to automatically generate '
'a key when passphrase is not provided.'
)
elif len(provided_opts) > 1:
for k in provided_opts:
verrors.add(f'{schema}.{k}', f'Only one of {", ".join(provided_opts)} must be provided.')
if not verrors:
key = key or passphrase
if encryption_dict['generate_key']:
key = secrets.token_hex(32)
elif not key and job:
job.check_pipe('input')
key = job.pipes.input.r.read(64)
# We would like to ensure key matches specified key format
try:
key = hex(int(key, 16))[2:]
if len(key) != 64:
raise ValueError('Invalid key')
except ValueError:
verrors.add(f'{schema}.key_file', 'Please specify a valid key')
return {}
opts = {
'keyformat': (ZFSKeyFormat.PASSPHRASE if passphrase_key_format else ZFSKeyFormat.HEX).value.lower(),
'keylocation': 'prompt',
'encryption': encryption_dict['algorithm'].lower(),
'key': key,
**({'pbkdf2iters': encryption_dict['pbkdf2iters']} if passphrase_key_format else {}),
}
return opts
@accepts(
Str('id'),
Dict(
'change_key_options',
Bool('generate_key', default=False),
Bool('key_file', default=False),
Int('pbkdf2iters', default=350000, validators=[Range(min_=100000)]),
Str('passphrase', empty=False, default=None, null=True, private=True),
Str('key', validators=[Range(min_=64, max_=64)], default=None, null=True, private=True),
)
)
@returns()
@job(lock=lambda args: f'dataset_change_key_{args[0]}', pipes=['input'], check_pipes=False)
async def change_key(self, job, id_, options):
"""
Change encryption properties for `id` encrypted dataset.
Changing dataset encryption to use passphrase instead of a key is not allowed if:
1) It has encrypted roots as children which are encrypted with a key
2) If it is a root dataset where the system dataset is located
"""
ds = await self.middleware.call('pool.dataset.get_instance_quick', id_, {
'encryption': True,
})
verrors = ValidationErrors()
if not ds['encrypted']:
verrors.add('id', 'Dataset is not encrypted')
elif ds['locked']:
verrors.add('id', 'Dataset must be unlocked before key can be changed')
if not verrors:
if options['passphrase']:
if options['generate_key'] or options['key']:
verrors.add(
'change_key_options.key',
f'Must not be specified when passphrase for {id_} is supplied.'
)
elif any(
d['name'] == d['encryption_root']
for d in await self.middleware.call(
'pool.dataset.query', [
['id', '^', f'{id_}/'], ['encrypted', '=', True],
['key_format.value', '!=', ZFSKeyFormat.PASSPHRASE.value]
]
)
):
verrors.add(
'change_key_options.passphrase',
f'{id_} has children which are encrypted with a key. It is not allowed to have encrypted '
'roots which are encrypted with a key as children for passphrase encrypted datasets.'
)
elif id_ == (await self.middleware.call('systemdataset.config'))['pool']:
verrors.add(
'id',
f'{id_} contains the system dataset. Please move the system dataset to a '
'different pool before changing key_format.'
)
else:
if not options['generate_key'] and not options['key']:
for k in ('key', 'passphrase', 'generate_key'):
verrors.add(
f'change_key_options.{k}',
'Either Key or passphrase must be provided.'
)
elif id_.count('/') and await self.middleware.call(
'pool.dataset.query', [
['id', 'in', [id_.rsplit('/', i)[0] for i in range(1, id_.count('/') + 1)]],
['key_format.value', '=', ZFSKeyFormat.PASSPHRASE.value], ['encrypted', '=', True]
]
):
verrors.add(
'change_key_options.key',
f'{id_} has parent(s) which are encrypted with a passphrase. It is not allowed to have '
'encrypted roots which are encrypted with a key as children for passphrase encrypted datasets.'
)
verrors.check()
encryption_dict = await self.middleware.call(
'pool.dataset.validate_encryption_data', job, verrors, {
'enabled': True, 'passphrase': options['passphrase'],
'generate_key': options['generate_key'], 'key_file': options['key_file'],
'pbkdf2iters': options['pbkdf2iters'], 'algorithm': 'on', 'key': options['key'],
}, 'change_key_options'
)
verrors.check()
encryption_dict.pop('encryption')
key = encryption_dict.pop('key')
await self.middleware.call(
'zfs.dataset.change_key', id_, {
'encryption_properties': encryption_dict,
'key': key, 'load_key': False,
}
)
# TODO: Handle renames of datasets appropriately wrt encryption roots and db - this will be done when
# devd changes are in from the OS end
data = {'encryption_key': key, 'key_format': 'PASSPHRASE' if options['passphrase'] else 'HEX', 'name': id_}
await self.insert_or_update_encrypted_record(data)
if options['passphrase'] and ZFSKeyFormat(ds['key_format']['value']) != ZFSKeyFormat.PASSPHRASE:
await self.middleware.call('pool.dataset.sync_db_keys', id_)
data['old_key_format'] = ds['key_format']['value']
await self.middleware.call_hook('dataset.change_key', data)
@accepts(Str('id'))
@returns()
async def inherit_parent_encryption_properties(self, id_):
"""
Allows inheriting parent's encryption root discarding its current encryption settings. This
can only be done where `id` has an encrypted parent and `id` itself is an encryption root.
"""
ds = await self.middleware.call('pool.dataset.get_instance_quick', id_, {
'encryption': True,
})
if not ds['encrypted']:
raise CallError(f'Dataset {id_} is not encrypted')
elif ds['encryption_root'] != id_:
raise CallError(f'Dataset {id_} is not an encryption root')
elif ds['locked']:
raise CallError('Dataset must be unlocked to perform this operation')
elif '/' not in id_:
raise CallError('Root datasets do not have a parent and cannot inherit encryption settings')
else:
parent = await self.middleware.call(
'pool.dataset.get_instance_quick', id_.rsplit('/', 1)[0], {
'encryption': True,
}
)
if not parent['encrypted']:
raise CallError('This operation requires the parent dataset to be encrypted')
else:
parent_encrypted_root = await self.middleware.call(
'pool.dataset.get_instance_quick', parent['encryption_root'], {
'encryption': True,
}
)
if ZFSKeyFormat(parent_encrypted_root['key_format']['value']) == ZFSKeyFormat.PASSPHRASE.value:
if any(
d['name'] == d['encryption_root']
for d in await self.middleware.call(
'pool.dataset.query', [
['id', '^', f'{id_}/'], ['encrypted', '=', True],
['key_format.value', '!=', ZFSKeyFormat.PASSPHRASE.value]
]
)
):
raise CallError(
f'{id_} has children which are encrypted with a key. It is not allowed to have encrypted '
'roots which are encrypted with a key as children for passphrase encrypted datasets.'
)
await self.middleware.call('zfs.dataset.change_encryption_root', id_, {'load_key': False})
await self.middleware.call('pool.dataset.sync_db_keys', id_)
await self.middleware.call_hook('dataset.inherit_parent_encryption_root', id_)
| 12,428 | Python | .py | 245 | 36.134694 | 119 | 0.541759 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,880 | snapshot_count.py | truenas_middleware/src/middlewared/middlewared/plugins/pool_/snapshot_count.py | from middlewared.schema import accepts, returns, Int, Str
from middlewared.service import Service
class PoolDatasetService(Service):
class Config:
namespace = "pool.dataset"
@accepts(Str("dataset"), roles=['DATASET_READ'])
@returns(Int())
def snapshot_count(self, dataset):
"""
Returns snapshot count for specified `dataset`.
"""
return self.middleware.call_sync("zfs.snapshot.count", [dataset])[dataset]
| 465 | Python | .py | 12 | 32.75 | 82 | 0.690423 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,881 | dataset_processes.py | truenas_middleware/src/middlewared/middlewared/plugins/pool_/dataset_processes.py | import contextlib
import errno
import os
import re
from middlewared.plugins.zfs_.utils import zvol_name_to_path
from middlewared.schema import accepts, Ref, returns, Str
from middlewared.service import CallError, item_method, private, Service
from .utils import dataset_mountpoint
RE_ZD = re.compile(r'^/dev/zd[0-9]+$')
class PoolDatasetService(Service):
class Config:
namespace = 'pool.dataset'
@item_method
@accepts(Str('id', required=True), roles=['READONLY_ADMIN'])
@returns(Ref('processes'))
async def processes(self, oid):
"""
Return a list of processes using this dataset.
Example return value:
[
{
"pid": 2520,
"name": "smbd",
"service": "cifs"
},
{
"pid": 97778,
"name": "minio",
"cmdline": "/usr/local/bin/minio -C /usr/local/etc/minio server --address=0.0.0.0:9000 --quiet /mnt/tank/wk"
}
]
"""
dataset = await self.middleware.call('pool.dataset.get_instance_quick', oid, {'encryption': True})
if dataset['locked']:
return []
paths = [zvol_name_to_path(dataset['name'])]
if mountpoint := dataset_mountpoint(dataset):
paths.append(mountpoint)
return await self.middleware.call('pool.dataset.processes_using_paths', paths)
@private
async def kill_processes(self, oid, control_services, max_tries=5):
need_restart_services = []
need_stop_services = []
midpid = os.getpid()
for process in await self.middleware.call('pool.dataset.processes', oid):
service = process.get('service')
if service is not None:
if any(
attachment_delegate.service == service
for attachment_delegate in await self.middleware.call('pool.dataset.get_attachment_delegates')
):
need_restart_services.append(service)
else:
need_stop_services.append(service)
if (need_restart_services or need_stop_services) and not control_services:
raise CallError('Some services have open files and need to be restarted or stopped', errno.EBUSY, {
'code': 'control_services',
'restart_services': need_restart_services,
'stop_services': need_stop_services,
'services': need_restart_services + need_stop_services,
})
for i in range(max_tries):
processes = await self.middleware.call('pool.dataset.processes', oid)
if not processes:
return
for process in processes:
if process['pid'] == midpid:
self.logger.warning(
'The main middleware process %r (%r) currently is holding dataset %r',
process['pid'], process['cmdline'], oid
)
continue
service = process.get('service')
if service is not None:
if any(
attachment_delegate.service == service
for attachment_delegate in await self.middleware.call('pool.dataset.get_attachment_delegates')
):
self.logger.info('Restarting service %r that holds dataset %r', service, oid)
await self.middleware.call('service.restart', service)
else:
self.logger.info('Stopping service %r that holds dataset %r', service, oid)
await self.middleware.call('service.stop', service)
else:
self.logger.info('Killing process %r (%r) that holds dataset %r', process['pid'],
process['cmdline'], oid)
try:
await self.middleware.call('service.terminate_process', process['pid'])
except CallError as e:
self.logger.warning('Error killing process: %r', e)
processes = await self.middleware.call('pool.dataset.processes', oid)
if not processes:
return
self.logger.info('The following processes don\'t want to stop: %r', processes)
raise CallError('Unable to stop processes that have open files', errno.EBUSY, {
'code': 'unstoppable_processes',
'processes': processes,
})
@private
def processes_using_paths(self, paths, include_paths=False, include_middleware=False):
"""
Find processes using paths supplied via `paths`. Path may be an absolute path for
a directory (e.g. /var/db/system) or a path in /dev/zvol or /dev/zd*
`include_paths`: include paths that are open by the process in output. By default
this is not included in output for performance reasons.
`include_middleware`: include files opened by the middlewared process in output.
These are not included by default.
"""
exact_matches = set()
include_devs = []
for path in paths:
if RE_ZD.match(path):
exact_matches.add(path)
else:
try:
if path.startswith("/dev/zvol/"):
if os.path.isdir(path):
for root, dirs, files in os.walk(path):
for f in files:
exact_matches.add(os.path.realpath(os.path.join(root, f)))
else:
exact_matches.add(os.path.realpath(path))
else:
include_devs.append(os.stat(path).st_dev)
except FileNotFoundError:
continue
result = []
if include_devs or exact_matches:
for pid in os.listdir('/proc'):
if not pid.isdigit() or (not include_middleware and (int(pid) == os.getpid())):
continue
with contextlib.suppress(FileNotFoundError, ProcessLookupError):
# FileNotFoundError for when a process is killed/exits
# while we're iterating
found = False
found_paths = set()
for f in os.listdir(f'/proc/{pid}/fd'):
fd = f'/proc/{pid}/fd/{f}'
is_link = False
realpath = None
with contextlib.suppress(FileNotFoundError):
# Have second suppression here so that we don't lose list of files
# if we have TOCTOU issue on one of files.
#
# We want to include file in list of paths in the following
# situations:
#
# 1. File is regular file and has same device id as specified path
# 2. File is a symbolic link and exactly matches a provided /dev/zvol or /dev/zd path
if (
(include_devs and os.stat(fd).st_dev in include_devs) or
(
exact_matches and
(is_link := os.path.islink(fd)) and
(realpath := os.path.realpath(fd)) in exact_matches
)
):
found = True
if include_paths:
if is_link:
# This is a path in `/dev/zvol` or `/dev/zd*`
found_paths.add(realpath)
else:
# We need to readlink to convert `/proc/<pid>/fd/<fd>` to
# the file's path name.
found_paths.add(os.readlink(fd))
if found:
with open(f'/proc/{pid}/comm') as comm:
name = comm.read().strip()
proc = {'pid': pid, 'name': name}
if svc := self.middleware.call_sync('service.identify_process', name):
proc['service'] = svc
else:
with open(f'/proc/{pid}/cmdline') as cmd:
cmdline = cmd.read().replace('\u0000', ' ').strip()
proc['cmdline'] = cmdline
if include_paths:
proc['paths'] = sorted(found_paths)
result.append(proc)
return result
| 9,024 | Python | .py | 180 | 31.966667 | 120 | 0.498015 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,882 | dataset.py | truenas_middleware/src/middlewared/middlewared/plugins/pool_/dataset.py | import copy
import errno
import os
import middlewared.sqlalchemy as sa
from middlewared.plugins.boot import BOOT_POOL_NAME_VALID
from middlewared.plugins.zfs_.exceptions import ZFSSetPropertyError
from middlewared.plugins.zfs_.validation_utils import validate_dataset_name
from middlewared.schema import (
accepts, Any, Attribute, EnumMixin, Bool, Dict, Int, List, NOT_PROVIDED, Patch, Ref, returns, Str
)
from middlewared.service import (
CallError, CRUDService, filterable, InstanceNotFound, item_method, job, private, ValidationErrors
)
from middlewared.utils import filter_list
from middlewared.validators import Exact, Match, Or, Range
from .utils import (
dataset_mountpoint, get_dataset_parents, get_props_of_interest_mapping, none_normalize,
ZFS_COMPRESSION_ALGORITHM_CHOICES, ZFS_CHECKSUM_CHOICES, ZFSKeyFormat, ZFS_MAX_DATASET_NAME_LEN,
ZFS_VOLUME_BLOCK_SIZE_CHOICES, TNUserProp
)
class Inheritable(EnumMixin, Attribute):
def __init__(self, schema, **kwargs):
self.schema = schema
if not self.schema.has_default and 'default' not in kwargs and kwargs.pop('has_default', True):
kwargs['default'] = 'INHERIT'
super(Inheritable, self).__init__(self.schema.name, **kwargs)
def clean(self, value):
if value == 'INHERIT':
return value
elif value is NOT_PROVIDED and self.has_default:
return copy.deepcopy(self.default)
return self.schema.clean(value)
def validate(self, value):
if value == 'INHERIT':
return
return self.schema.validate(value)
def to_json_schema(self, parent=None):
schema = self.schema.to_json_schema(parent)
type_schema = schema.pop('type')
schema['nullable'] = 'null' in type_schema
if schema['nullable']:
type_schema.remove('null')
if len(type_schema) == 1:
type_schema = type_schema[0]
schema['anyOf'] = [{'type': type_schema}, {'type': 'string', 'enum': ['INHERIT']}]
return schema
class PoolDatasetEncryptionModel(sa.Model):
__tablename__ = 'storage_encrypteddataset'
id = sa.Column(sa.Integer(), primary_key=True)
name = sa.Column(sa.String(255))
encryption_key = sa.Column(sa.EncryptedText(), nullable=True)
kmip_uid = sa.Column(sa.String(255), nullable=True, default=None)
class PoolDatasetService(CRUDService):
ENTRY = Dict(
'pool_dataset_entry',
Str('id', required=True),
Str('type', required=True),
Str('name', required=True),
Str('pool', required=True),
Bool('encrypted'),
Str('encryption_root', null=True),
Bool('key_loaded', null=True),
List('children', required=True),
Dict('user_properties', additional_attrs=True, required=True),
Bool('locked'),
*[Dict(
p[1] or p[0],
Any('parsed', null=True),
Str('rawvalue', null=True),
Str('value', null=True),
Str('source', null=True),
Any('source_info', null=True),
) for p in get_props_of_interest_mapping() if (p[1] or p[0]) != 'mountpoint'],
Str('mountpoint', null=True),
)
class Config:
cli_namespace = 'storage.dataset'
datastore_primary_key_type = 'string'
event_send = False
namespace = 'pool.dataset'
role_prefix = 'DATASET'
role_separate_delete = True
@private
async def get_instance_quick(self, name, options=None):
options = options or {}
properties = set(options.get('properties') or [])
properties.add('mountpoint')
if options.get('encryption'):
properties.update(['encryption', 'keystatus', 'mountpoint', 'keyformat', 'encryptionroot'])
return await self.middleware.call(
'pool.dataset.get_instance', name, {
'extra': {
'retrieve_children': options.get('retrieve_children', False),
'properties': list(properties),
}
}
)
def _internal_user_props(self):
return TNUserProp.values()
def __transform(self, datasets, retrieve_children, children_filters):
"""
We need to transform the data zfs gives us to make it consistent/user-friendly,
making it match whatever pool.dataset.{create,update} uses as input.
"""
def transform(dataset):
for orig_name, new_name, method in get_props_of_interest_mapping():
if orig_name not in dataset['properties']:
continue
i = new_name or orig_name
dataset[i] = dataset['properties'][orig_name]
if method:
dataset[i]['value'] = method(dataset[i]['value'])
if 'mountpoint' in dataset:
# This is treated specially to keep backwards compatibility with API
dataset['mountpoint'] = dataset['mountpoint']['value']
if dataset['type'] == 'VOLUME':
dataset['mountpoint'] = None
dataset['user_properties'] = {
k: v for k, v in dataset['properties'].items() if ':' in k and k not in self._internal_user_props()
}
del dataset['properties']
if all(k in dataset for k in ('encrypted', 'key_loaded')):
dataset['locked'] = dataset['encrypted'] and not dataset['key_loaded']
if retrieve_children:
rv = []
for child in filter_list(dataset['children'], children_filters):
rv.append(transform(child))
dataset['children'] = rv
return dataset
rv = []
for dataset in datasets:
rv.append(transform(dataset))
return rv
@private
async def internal_datasets_filters(self):
# We get filters here which ensure that we don't match an internal dataset
return [
['pool', 'nin', BOOT_POOL_NAME_VALID],
['id', 'rnin', '/.system'],
['id', 'rnin', '/ix-applications/'],
['id', 'rnin', '/ix-apps'],
['id', 'rnin', '/.ix-virt'],
]
@private
async def is_internal_dataset(self, dataset):
pool = dataset.split('/')[0]
return not bool(filter_list([{'id': dataset, 'pool': pool}], await self.internal_datasets_filters()))
@filterable
def query(self, filters, options):
"""
Query Pool Datasets with `query-filters` and `query-options`.
We provide two ways to retrieve datasets. The first is a flat structure (default), where
all datasets in the system are returned as separate objects which contain all data
there is for their children. This retrieval type is slightly slower because of duplicates in each object.
The second type is hierarchical, where only top level datasets are returned in the list. They contain all the
children in the `children` key. This retrieval type is slightly faster.
These options are controlled by the `query-options.extra.flat` attribute (default true).
In some cases it might be desirable to only retrieve details of a dataset itself and not it's children, in this
case `query-options.extra.retrieve_children` should be explicitly specified and set to `false` which will
result in children not being retrieved.
In case only some properties are desired to be retrieved for datasets, consumer should specify
`query-options.extra.properties` which when `null` ( which is the default ) will retrieve all properties
and otherwise a list can be specified like `["type", "used", "available"]` to retrieve selective properties.
If no properties are desired, in that case an empty list should be sent.
`query-options.extra.snapshots` can be set to retrieve snapshot(s) of dataset in question.
`query-options.extra.snapshots_recursive` can be set to retrieve snapshot(s) recursively of dataset in question.
If `query-options.extra.snapshots_recursive` and `query-options.extra.snapshots` are set, snapshot(s) will be
retrieved recursively.
`query-options.extra.snapshots_properties` can be specified to list out properties which should be retrieved
for snapshot(s) related to each dataset. By default only name of the snapshot would be retrieved, however
if `null` is specified all properties of the snapshot would be retrieved in this case.
"""
# Optimization for cases in which they can be filtered at zfs.dataset.query
zfsfilters = []
filters = filters or []
if len(filters) == 1 and len(filters[0]) == 3 and list(filters[0][:2]) == ['id', '=']:
zfsfilters.append(copy.deepcopy(filters[0]))
internal_datasets_filters = self.middleware.call_sync('pool.dataset.internal_datasets_filters')
filters.extend(internal_datasets_filters)
extra = copy.deepcopy(options.get('extra', {}))
retrieve_children = extra.get('retrieve_children', True)
props = extra.get('properties')
snapshots = extra.get('snapshots')
snapshots_recursive = extra.get('snapshots_recursive')
snapshots_count = extra.get('snapshots_count')
return filter_list(
self.__transform(self.middleware.call_sync(
'zfs.dataset.query', zfsfilters, {
'extra': {
'flat': extra.get('flat', True),
'retrieve_children': retrieve_children,
'properties': props,
'snapshots': snapshots,
'snapshots_recursive': snapshots_recursive,
'snapshots_count': snapshots_count,
'snapshots_properties': extra.get('snapshots_properties', [])
}
}
), retrieve_children, internal_datasets_filters,
), filters, options
)
@private
async def get_create_update_user_props(self, user_properties, update=False):
props = {}
for prop in user_properties:
if 'value' in prop:
props[prop['key']] = {'value': prop['value']} if update else prop['value']
elif prop.get('remove'):
props[prop['key']] = {'source': 'INHERIT'}
return props
async def __common_validation(self, verrors, schema, data, mode, parent=None, cur_dataset=None):
assert mode in ('CREATE', 'UPDATE')
if parent is None:
parent = await self.middleware.call(
'pool.dataset.query',
[('id', '=', data['name'].rsplit('/', 1)[0])],
{'extra': {'retrieve_children': False}}
)
if await self.is_internal_dataset(data['name']):
verrors.add(
f'{schema}.name',
f'{data["name"]!r} is using system internal managed dataset. Please specify a different parent.'
)
if not parent:
# This will only be true on dataset creation
if data['create_ancestors']:
verrors.add(
f'{schema}.name',
'Please specify a pool which exists for the dataset/volume to be created'
)
else:
verrors.add(f'{schema}.name', 'Parent dataset does not exist for specified name')
else:
parent = parent[0]
if mode == 'CREATE' and parent['readonly']['rawvalue'] == 'on':
# creating a zvol/dataset when the parent object is set to readonly=on
# is allowed via ZFS. However, if it's a dataset an error will be raised
# stating that it was unable to be mounted. If it's a zvol, then the service
# that tries to open the zvol device will get read only related errors.
# Currently, there is no way to mount a dataset in the webUI so we will
# prevent this scenario from occuring by preventing creation if the parent
# is set to readonly=on.
verrors.add(
f'{schema}.readonly',
f'Turn off readonly mode on {parent["id"]} to create {data["name"].rsplit("/")[0]}'
)
# We raise validation errors here as parent could be used down to validate other aspects of the dataset
verrors.check()
dataset_pool_is_draid = await self.middleware.call('pool.is_draid_pool', parent['pool'])
if data['type'] == 'FILESYSTEM':
to_check = {'acltype': None, 'aclmode': None}
if mode == 'UPDATE':
# Prevent users from changing acltype settings underneath an active SMB share
# If this dataset hosts an SMB share, then prompt the user to first delete the share,
# make the dataset change, the recreate the share.
keys = ('acltype',)
if any([data.get(key) for key in keys]):
ds_attachments = await self.middleware.call('pool.dataset.attachments', data['name'])
if smb_attachments := [share for share in ds_attachments if share['type'] == "SMB Share"]:
share_names = [smb_share['attachments'] for smb_share in smb_attachments]
for key in (k for k in keys if data.get(k)):
if cur_dataset and (cur_dataset[key]['value'] == data.get(key)):
continue
verrors.add(
f'{schema}.{key}',
'This dataset is hosting SMB shares. '
f'Before {key} can be updated the following shares must be disabled: '
f'{share_names[0]}. '
'The shares may be re-enabled after the change.'
)
# Prevent users from setting incorrect combinations of aclmode and acltype parameters
# The final value to be set may have one of several different possible origins
# 1. The parameter may be provided in `data` (explicit creation or update)
# 2. The parameter may be original value stored in dataset and not touched by update payload
# 3. The parameter may be omitted from payload (data) in creation (defaulted to INHERIT)
#
# If result of 1-3 above for aclmode is INHERIT, then value will be retrieved from parent
#
# The configuration options we want to avoid are:
# NFSV4 + DISCARD (this will result in ACL being stripped on chmod operation)
#
# POSIX / OFF + non-DISCARD (this will potentially prevent ZFS_ACL_TRIVAL ZFS pflag from being
# set and may result in spurious permissions errors.
for key in ('acltype', 'aclmode'):
match (val := data.get(key) or (cur_dataset[key]['value'] if cur_dataset else 'INHERIT')):
case 'INHERIT':
to_check[key] = parent[key]['value']
case 'NFSV4' | 'POSIX' | 'OFF' | 'PASSTHROUGH' | 'RESTRICTED' | 'DISCARD':
to_check[key] = val
case _:
raise CallError(f'{val}: unexpected value for {key}')
if to_check['acltype'] in ('POSIX', 'OFF') and to_check['aclmode'] != 'DISCARD':
verrors.add(f'{schema}.aclmode', 'Must be set to DISCARD when acltype is POSIX or OFF')
elif to_check['acltype'] == 'NFSV4' and to_check['aclmode'] == 'DISCARD':
verrors.add(f'{schema}.aclmode', 'DISCARD aclmode may not be set for NFSv4 acl type')
for i in ('force_size', 'sparse', 'volsize', 'volblocksize'):
if i in data:
verrors.add(f'{schema}.{i}', 'This field is not valid for FILESYSTEM')
if (c_value := data.get('special_small_block_size')) is not None:
if c_value != 'INHERIT' and not (
(c_value == 0 or 512 <= c_value <= 1048576) and ((c_value & (c_value - 1)) == 0)
):
verrors.add(
f'{schema}.special_small_block_size',
'This field must be zero or a power of 2 from 512B to 1M'
)
if rs := data.get('recordsize'):
if rs != 'INHERIT' and rs not in await self.middleware.call(
'pool.dataset.recordsize_choices', parent['pool']
):
verrors.add(f'{schema}.recordsize', f'{rs!r} is an invalid recordsize.')
elif mode == 'CREATE' and dataset_pool_is_draid:
# We set recordsize to 1M by default on dataset creation if not explicitly specified
data['recordsize'] = '1M'
elif data['type'] == 'VOLUME':
if mode == 'CREATE':
if 'volsize' not in data:
verrors.add(f'{schema}.volsize', 'This field is required for VOLUME')
if 'volblocksize' not in data:
if dataset_pool_is_draid:
data['volblocksize'] = '128K'
else:
# with openzfs 2.2, zfs sets 16k as default https://github.com/openzfs/zfs/pull/12406
data['volblocksize'] = '16K'
if dataset_pool_is_draid and 'volblocksize' in data:
if ZFS_VOLUME_BLOCK_SIZE_CHOICES[data['volblocksize']] < 32 * 1024:
verrors.add(
f'{schema}.volblocksize',
'Volume block size must be greater than or equal to 32K for dRAID pools'
)
for i in (
'aclmode', 'acltype', 'atime', 'casesensitivity', 'quota', 'refquota', 'recordsize',
):
if i in data:
verrors.add(f'{schema}.{i}', 'This field is not valid for VOLUME')
if 'volsize' in data and parent:
avail_mem = int(parent['available']['rawvalue'])
if mode == 'UPDATE':
avail_mem += int((await self.get_instance(data['name']))['used']['rawvalue'])
if (
data['volsize'] > (avail_mem * 0.80) and
not data.get('force_size', False)
):
verrors.add(
f'{schema}.volsize',
'It is not recommended to use more than 80% of your available space for VOLUME'
)
if 'volblocksize' in data:
if data['volblocksize'][:3] == '512':
block_size = 512
else:
block_size = int(data['volblocksize'][:-1]) * 1024
if data['volsize'] % block_size:
verrors.add(
f'{schema}.volsize',
'Volume size should be a multiple of volume block size'
)
if mode == 'UPDATE':
if data.get('user_properties_update') and not data.get('user_properties'):
for index, prop in enumerate(data['user_properties_update']):
prop_schema = f'{schema}.user_properties_update.{index}'
if 'value' in prop and prop.get('remove'):
verrors.add(f'{prop_schema}.remove', 'When "value" is specified, this cannot be set')
elif not any(k in prop for k in ('value', 'remove')):
verrors.add(f'{prop_schema}.value', 'Either "value" or "remove" must be specified')
elif data.get('user_properties') and data.get('user_properties_update'):
verrors.add(
f'{schema}.user_properties_update',
'Should not be specified when "user_properties" are explicitly specified'
)
elif data.get('user_properties'):
# Let's normalize this so that we create/update/remove user props accordingly
user_props = {p['key'] for p in data['user_properties']}
data['user_properties_update'] = data['user_properties']
for prop_key in [k for k in cur_dataset['user_properties'] if k not in user_props]:
data['user_properties_update'].append({
'key': prop_key,
'remove': True,
})
@accepts(Dict(
'pool_dataset_create',
Str('name', required=True),
Str('type', enum=['FILESYSTEM', 'VOLUME'], default='FILESYSTEM'),
Int('volsize'), # IN BYTES
Str('volblocksize', enum=list(ZFS_VOLUME_BLOCK_SIZE_CHOICES)),
Bool('sparse'),
Bool('force_size'),
Inheritable(Str('comments')),
Inheritable(Str('sync', enum=['STANDARD', 'ALWAYS', 'DISABLED'])),
Inheritable(Str('snapdev', enum=['HIDDEN', 'VISIBLE']), has_default=False),
Inheritable(Str('compression', enum=ZFS_COMPRESSION_ALGORITHM_CHOICES)),
Inheritable(Str('atime', enum=['ON', 'OFF']), has_default=False),
Inheritable(Str('exec', enum=['ON', 'OFF'])),
Inheritable(Str('managedby', empty=False)),
Int('quota', null=True, validators=[Or(Range(min_=1024 ** 3), Exact(0))]),
Inheritable(Int('quota_warning', validators=[Range(0, 100)])),
Inheritable(Int('quota_critical', validators=[Range(0, 100)])),
Int('refquota', null=True, validators=[Or(Range(min_=1024 ** 3), Exact(0))]),
Inheritable(Int('refquota_warning', validators=[Range(0, 100)])),
Inheritable(Int('refquota_critical', validators=[Range(0, 100)])),
Int('reservation'),
Int('refreservation'),
Inheritable(Int('special_small_block_size'), has_default=False),
Inheritable(Int('copies')),
Inheritable(Str('snapdir', enum=['DISABLED', 'VISIBLE', 'HIDDEN'])),
Inheritable(Str('deduplication', enum=['ON', 'VERIFY', 'OFF'])),
Inheritable(Str('checksum', enum=ZFS_CHECKSUM_CHOICES)),
Inheritable(Str('readonly', enum=['ON', 'OFF'])),
Inheritable(Str('recordsize'), has_default=False),
Inheritable(Str('casesensitivity', enum=['SENSITIVE', 'INSENSITIVE']), has_default=False),
Inheritable(Str('aclmode', enum=['PASSTHROUGH', 'RESTRICTED', 'DISCARD']), has_default=False),
Inheritable(Str('acltype', enum=['OFF', 'NFSV4', 'POSIX']), has_default=False),
Str('share_type', default='GENERIC', enum=['GENERIC', 'MULTIPROTOCOL', 'NFS', 'SMB', 'APPS']),
Ref('encryption_options'),
Bool('encryption', default=False),
Bool('inherit_encryption', default=True),
List(
'user_properties',
items=[Dict(
'user_property',
Str('key', required=True, validators=[Match(r'.*:.*')]),
Str('value', required=True),
)],
),
Bool('create_ancestors', default=False),
register=True,
), audit='Pool dataset create', audit_extended=lambda data: data['name'])
async def do_create(self, data):
"""
Creates a dataset/zvol.
`volsize` is required for type=VOLUME and is supposed to be a multiple of the block size.
`sparse` and `volblocksize` are only used for type=VOLUME.
`encryption` when enabled will create an ZFS encrypted root dataset for `name` pool.
There is 1 case where ZFS encryption is not allowed for a dataset:
1) If the parent dataset is encrypted with a passphrase and `name` is being created
with a key for encrypting the dataset.
`encryption_options` specifies configuration for encryption of dataset for `name` pool.
`encryption_options.passphrase` must be specified if encryption for dataset is desired with a passphrase
as a key.
Otherwise a hex encoded key can be specified by providing `encryption_options.key`.
`encryption_options.generate_key` when enabled automatically generates the key to be used
for dataset encryption.
It should be noted that keys are stored by the system for automatic locking/unlocking
on import/export of encrypted datasets. If that is not desired, dataset should be created
with a passphrase as a key.
.. examples(websocket)::
Create a dataset within tank pool.
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "pool.dataset.create,
"params": [{
"name": "tank/myuser",
"comments": "Dataset for myuser"
}]
}
"""
verrors = ValidationErrors()
acl_to_set = None
if '/' not in data['name']:
verrors.add('pool_dataset_create.name', 'You need a full name, e.g. pool/newdataset')
elif not validate_dataset_name(data['name']):
verrors.add('pool_dataset_create.name', 'Invalid dataset name')
elif len(data['name']) > ZFS_MAX_DATASET_NAME_LEN:
verrors.add(
'pool_dataset_create.name',
f'Dataset name length should be less than or equal to {ZFS_MAX_DATASET_NAME_LEN}',
)
elif data['name'][-1] == ' ':
verrors.add(
'pool_dataset_create.name',
'Trailing spaces are not permitted in dataset names'
)
else:
parent_name = data['name'].rsplit('/', 1)[0]
if data['create_ancestors']:
# If we want to create ancestors, let's just ensure that we have at least one parent which exists
while not await self.middleware.call(
'pool.dataset.query',
[['id', '=', parent_name]], {
'extra': {'retrieve_children': False, 'properties': []}
}
):
if '/' not in parent_name:
# Root dataset / pool does not exist
break
parent_name = parent_name.rsplit('/', 1)[0]
parent_ds = await self.middleware.call(
'pool.dataset.query',
[('id', '=', parent_name)],
{'extra': {'retrieve_children': False}}
)
match data['share_type']:
case 'SMB':
data['casesensitivity'] = 'INSENSITIVE'
data['acltype'] = 'NFSV4'
data['aclmode'] = 'RESTRICTED'
case 'APPS' | 'MULTIPROTOCOL' | 'NFS':
data['casesensitivity'] = 'SENSITIVE'
data['atime'] = 'OFF'
data['acltype'] = 'NFSV4'
data['aclmode'] = 'PASSTHROUGH'
case _:
pass
await self.__common_validation(verrors, 'pool_dataset_create', data, 'CREATE', parent_ds)
verrors.check()
parent_ds = parent_ds[0]
parent_mp = parent_ds['mountpoint']
if parent_ds['locked']:
parent_st = {'acl': False}
else:
parent_st = await self.middleware.call('filesystem.stat', parent_mp)
parent_st['acltype'] = await self.middleware.call('filesystem.path_get_acltype', parent_mp)
mountpoint = os.path.join('/mnt', data['name'])
try:
await self.middleware.call('filesystem.stat', mountpoint)
verrors.add('pool_dataset_create.name', f'Path {mountpoint} already exists')
except CallError as e:
if e.errno != errno.ENOENT:
raise
if data['share_type'] == 'SMB':
if parent_st['acl'] and parent_st['acltype'] == 'NFS4':
acl_to_set = await self.middleware.call('filesystem.get_inherited_acl', {
'path': os.path.join('/mnt', parent_name),
})
else:
acl_to_set = (await self.middleware.call('filesystem.acltemplate.by_path', {
'query-filters': [('name', '=', 'NFS4_RESTRICTED')],
'format-options': {'canonicalize': True, 'ensure_builtins': True},
}))[0]['acl']
elif data['share_type'] == 'APPS':
if parent_st['acl'] and parent_st['acltype'] == 'NFS4':
acl_to_set = await self.middleware.call('filesystem.get_inherited_acl', {
'path': os.path.join('/mnt', parent_name),
})
else:
acl_to_set = (await self.middleware.call('filesystem.acltemplate.by_path', {
'query-filters': [('name', '=', 'NFS4_RESTRICTED')],
'format-options': {'canonicalize': True, 'ensure_builtins': True},
}))[0]['acl']
acl_to_set.append({
'tag': 'USER',
'id': 568,
'perms': {'BASIC': 'MODIFY'},
'flags': {'BASIC': 'INHERIT'},
'type': 'ALLOW'
})
elif data['share_type'] in ('MULTIPROTOCOL', 'NFS'):
if parent_st['acl'] and parent_st['acltype'] == 'NFS4':
acl_to_set = await self.middleware.call('filesystem.get_inherited_acl', {
'path': os.path.join('/mnt', parent_name),
})
if acl_to_set:
try:
await self.middleware.call(
'filesystem.check_acl_execute',
mountpoint, acl_to_set, -1, -1
)
except CallError as e:
if e.errno != errno.EPERM:
raise
verrors.add('pool_dataset_create.share_type', e.errmsg)
if data['type'] == 'FILESYSTEM' and data.get('acltype', 'INHERIT') != 'INHERIT':
data['aclinherit'] = 'PASSTHROUGH' if data['acltype'] == 'NFSV4' else 'DISCARD'
if parent_ds['locked']:
verrors.add(
'pool_dataset_create.name',
f'{data["name"].rsplit("/", 1)[0]} must be unlocked to create {data["name"]}.'
)
encryption_dict = {}
inherit_encryption_properties = data.pop('inherit_encryption')
if not inherit_encryption_properties:
encryption_dict = {'encryption': 'off'}
unencrypted_parent = False
for parent in get_dataset_parents(data['name']):
try:
check_ds = await self.middleware.call('pool.dataset.get_instance_quick', parent, {'encryption': True})
except InstanceNotFound:
continue
if check_ds['encrypted']:
if unencrypted_parent:
verrors.add(
'pool_dataset_create.name',
'Creating an encrypted dataset within an unencrypted dataset is not allowed. '
f'In this case, {unencrypted_parent!r} must be moved to an unencrypted dataset.'
)
break
elif data['encryption'] is False and not inherit_encryption_properties:
# This was a design decision when native zfs encryption support was added to provide
# a simple straight workflow not allowing end users to create unencrypted datasets
# within an encrypted dataset.
verrors.add(
'pool_dataset_create.encryption',
f'Cannot create an unencrypted dataset within an encrypted dataset ({parent}).'
)
break
else:
# The unencrypted parent story is pool/encrypted/unencrypted/new_ds so in this case
# we want to make sure user does not specify inherit encryption as it will lead to new_ds
# not getting encryption props from pool/encrypted.
unencrypted_parent = parent
if data['encryption']:
if inherit_encryption_properties:
verrors.add('pool_dataset_create.inherit_encryption', 'Must be disabled when encryption is enabled.')
if not data['encryption_options']['passphrase']:
# We want to ensure that we don't have any parent for this dataset which is encrypted with PASSPHRASE
# because we don't allow children to be unlocked while parent is locked
parent_encryption_root = parent_ds['encryption_root']
if (
parent_encryption_root and ZFSKeyFormat(
(await self.get_instance(parent_encryption_root))['key_format']['value']
) == ZFSKeyFormat.PASSPHRASE
):
verrors.add(
'pool_dataset_create.encryption',
'Passphrase encrypted datasets cannot have children encrypted with a key.'
)
encryption_dict = await self.middleware.call(
'pool.dataset.validate_encryption_data', None, verrors,
{'enabled': data.pop('encryption'), **data.pop('encryption_options'), 'key_file': False},
'pool_dataset_create.encryption_options',
) or encryption_dict
verrors.check()
props = {}
for i, real_name, transform, inheritable in (
('aclinherit', None, str.lower, True),
('aclmode', None, str.lower, True),
('acltype', None, str.lower, True),
('atime', None, str.lower, True),
('casesensitivity', None, str.lower, True),
('checksum', None, str.lower, True),
('comments', TNUserProp.DESCRIPTION.value, None, True),
('compression', None, str.lower, True),
('copies', None, str, True),
('deduplication', 'dedup', str.lower, True),
('exec', None, str.lower, True),
('managedby', TNUserProp.MANAGED_BY.value, None, True),
('quota', None, none_normalize, True),
('quota_warning', TNUserProp.QUOTA_WARN.value, str, True),
('quota_critical', TNUserProp.QUOTA_CRIT.value, str, True),
('readonly', None, str.lower, True),
('recordsize', None, None, True),
('refquota', None, none_normalize, True),
('refquota_warning', TNUserProp.REFQUOTA_WARN.value, str, True),
('refquota_critical', TNUserProp.REFQUOTA_CRIT.value, str, True),
('refreservation', None, none_normalize, False),
('reservation', None, none_normalize, False),
('snapdir', None, str.lower, True),
('snapdev', None, str.lower, True),
('sparse', None, None, False),
('sync', None, str.lower, True),
('volblocksize', None, None, False),
('volsize', None, lambda x: str(x), False),
('special_small_block_size', 'special_small_blocks', None, True),
):
if i not in data or (inheritable and data[i] == 'INHERIT'):
continue
name = real_name or i
props[name] = data[i] if not transform else transform(data[i])
props.update(
**encryption_dict,
**(await self.get_create_update_user_props(data['user_properties']))
)
await self.middleware.call('zfs.dataset.create', {
'name': data['name'],
'type': data['type'],
'properties': props,
'create_ancestors': data['create_ancestors'],
})
dataset_data = {
'name': data['name'], 'encryption_key': encryption_dict.get('key'),
'key_format': encryption_dict.get('keyformat')
}
await self.middleware.call('pool.dataset.insert_or_update_encrypted_record', dataset_data)
await self.middleware.call_hook('dataset.post_create', {'encrypted': bool(encryption_dict), **dataset_data})
data['id'] = data['name']
await self.middleware.call('zfs.dataset.mount', data['name'])
created_ds = await self.get_instance(data['id'])
if acl_to_set:
# We're potentially auto-inheriting an ACL containing nested
# security groups and so we need to skip the ACL validation
acl_job = await self.middleware.call('filesystem.setacl', {
'path': mountpoint,
'dacl': acl_to_set,
'options': {'validate_effective_acl': False}
})
await acl_job.wait(raise_error=True)
self.middleware.send_event('pool.dataset.query', 'ADDED', id=data['id'], fields=created_ds)
return created_ds
@accepts(Str('id', required=True), Patch(
'pool_dataset_create', 'pool_dataset_update',
('rm', {'name': 'name'}),
('rm', {'name': 'type'}),
('rm', {'name': 'casesensitivity'}), # Its a readonly attribute
('rm', {'name': 'share_type'}), # This is something we should only do at create time
('rm', {'name': 'sparse'}), # Create time only attribute
('rm', {'name': 'volblocksize'}), # Create time only attribute
('rm', {'name': 'encryption'}), # Create time only attribute
('rm', {'name': 'encryption_options'}), # Create time only attribute
('rm', {'name': 'inherit_encryption'}), # Create time only attribute
('add', List(
'user_properties_update',
items=[Dict(
'user_property',
Str('key', required=True, validators=[Match(r'.*:.*')]),
Str('value'),
Bool('remove'),
)],
)),
('attr', {'update': True}),
), audit='Pool dataset update', audit_callback=True)
async def do_update(self, audit_callback, id_, data):
"""
Updates a dataset/zvol `id`.
.. examples(websocket)::
Update the `comments` for "tank/myuser".
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "pool.dataset.update,
"params": ["tank/myuser", {
"comments": "Dataset for myuser, UPDATE #1"
}]
}
"""
verrors = ValidationErrors()
dataset = await self.middleware.call(
'pool.dataset.query', [('id', '=', id_)], {'extra': {'retrieve_children': False}}
)
if not dataset:
verrors.add('id', f'{id_} does not exist', errno.ENOENT)
else:
data['type'] = dataset[0]['type']
data['name'] = dataset[0]['name']
audit_callback(data['name'])
if data['type'] == 'VOLUME':
data['volblocksize'] = dataset[0]['volblocksize']['value']
await self.__common_validation(verrors, 'pool_dataset_update', data, 'UPDATE', cur_dataset=dataset[0])
if 'volsize' in data:
if data['volsize'] < dataset[0]['volsize']['parsed']:
verrors.add('pool_dataset_update.volsize',
'You cannot shrink a zvol from GUI, this may lead to data loss.')
if dataset[0]['type'] == 'VOLUME':
existing_snapdev_prop = dataset[0]['snapdev']['parsed'].upper()
snapdev_prop = data.get('snapdev') or existing_snapdev_prop
if existing_snapdev_prop != snapdev_prop and snapdev_prop in ('INHERIT', 'HIDDEN'):
if await self.middleware.call(
'zfs.dataset.unlocked_zvols_fast',
[['attachment', '!=', None], ['ro', '=', True], ['name', '^', f'{id_}@']],
{}, ['RO', 'ATTACHMENT']
):
verrors.add(
'pool_dataset_update.snapdev',
f'{id_!r} has snapshots which have attachments being used. Before marking it '
'as HIDDEN, remove attachment usages.'
)
verrors.check()
properties_definitions = (
('aclinherit', None, str.lower, True),
('aclmode', None, str.lower, True),
('acltype', None, str.lower, True),
('atime', None, str.lower, True),
('checksum', None, str.lower, True),
('comments', TNUserProp.DESCRIPTION.value, None, False),
('sync', None, str.lower, True),
('compression', None, str.lower, True),
('deduplication', 'dedup', str.lower, True),
('exec', None, str.lower, True),
('managedby', TNUserProp.MANAGED_BY.value, None, True),
('quota', None, none_normalize, False),
('quota_warning', TNUserProp.QUOTA_WARN.value, str, True),
('quota_critical', TNUserProp.QUOTA_CRIT.value, str, True),
('refquota', None, none_normalize, False),
('refquota_warning', TNUserProp.REFQUOTA_WARN.value, str, True),
('refquota_critical', TNUserProp.REFQUOTA_CRIT.value, str, True),
('reservation', None, none_normalize, False),
('refreservation', None, none_normalize, False),
('copies', None, str, True),
('snapdir', None, str.lower, True),
('snapdev', None, str.lower, True),
('readonly', None, str.lower, True),
('recordsize', None, None, True),
('volsize', None, lambda x: str(x), False),
('special_small_block_size', 'special_small_blocks', None, True),
)
props = {}
for i, real_name, transform, inheritable in properties_definitions:
if i not in data:
continue
name = real_name or i
if inheritable and data[i] == 'INHERIT':
props[name] = {'source': 'INHERIT'}
else:
props[name] = {'value': data[i] if not transform else transform(data[i])}
if data.get('user_properties_update'):
props.update(await self.get_create_update_user_props(data['user_properties_update'], True))
if 'acltype' in props and (acltype_value := props['acltype'].get('value')):
if acltype_value == 'nfsv4':
props.update({
'aclinherit': {'value': 'passthrough'}
})
elif acltype_value in ['posix', 'off']:
props.update({
'aclmode': {'value': 'discard'},
'aclinherit': {'value': 'discard'}
})
elif props['acltype'].get('source') == 'INHERIT':
props.update({
'aclmode': {'source': 'INHERIT'},
'aclinherit': {'source': 'INHERIT'}
})
try:
await self.middleware.call('zfs.dataset.update', id_, {'properties': props})
except ZFSSetPropertyError as e:
verrors = ValidationErrors()
verrors.add_child('pool_dataset_update', self.__handle_zfs_set_property_error(e, properties_definitions))
raise verrors
if data['type'] == 'VOLUME' and 'volsize' in data and data['volsize'] > dataset[0]['volsize']['parsed']:
# means the zvol size has increased so we need to check if this zvol is shared via SCST (iscsi)
# and if it is, resync it so the connected initiators can see the new size of the zvol
await self.middleware.call('iscsi.global.resync_lun_size_for_zvol', id_)
updated_ds = await self.get_instance(id_)
self.middleware.send_event('pool.dataset.query', 'CHANGED', id=id_, fields=updated_ds)
return updated_ds
@accepts(Str('id'), Dict(
'dataset_delete',
Bool('recursive', default=False),
Bool('force', default=False),
), audit='Pool dataset delete', audit_callback=True)
async def do_delete(self, audit_callback, id_, options):
"""
Delete dataset/zvol `id`.
`recursive` will also delete/destroy all children datasets.
`force` will force delete busy datasets.
When root dataset is specified as `id` with `recursive`, it will destroy all the children of the
root dataset present leaving root dataset intact.
.. examples(websocket)::
Delete "tank/myuser" dataset.
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "pool.dataset.delete",
"params": ["tank/myuser"]
}
"""
if not options['recursive'] and await self.middleware.call('zfs.dataset.query', [['id', '^', f'{id_}/']]):
raise CallError(
f'Failed to delete dataset: cannot destroy {id_!r}: filesystem has children', errno.ENOTEMPTY
)
dataset = await self.get_instance(id_)
audit_callback(dataset['name'])
if mountpoint := dataset_mountpoint(dataset):
for delegate in await self.middleware.call('pool.dataset.get_attachment_delegates'):
attachments = await delegate.query(mountpoint, True)
if attachments:
await delegate.delete(attachments)
if dataset['locked'] and mountpoint and os.path.exists(mountpoint):
# We would like to remove the immutable flag in this case so that it's mountpoint can be
# cleaned automatically when we delete the dataset
await self.middleware.call('filesystem.set_immutable', False, mountpoint)
result = await self.middleware.call('zfs.dataset.delete', id_, {
'force': options['force'],
'recursive': options['recursive'],
})
return result
def __handle_zfs_set_property_error(self, e, properties_definitions):
zfs_name_to_api_name = {i[1]: i[0] for i in properties_definitions}
api_name = zfs_name_to_api_name.get(e.property) or e.property
verrors = ValidationErrors()
verrors.add(api_name, e.error)
return verrors
@item_method
@accepts(Str('id'))
@returns()
async def promote(self, id_):
"""
Promote the cloned dataset `id`.
"""
dataset = await self.middleware.call('zfs.dataset.query', [('id', '=', id_)])
if not dataset:
raise CallError(f'Dataset "{id_}" does not exist.', errno.ENOENT)
if not dataset[0]['properties']['origin']['value']:
raise CallError('Only cloned datasets can be promoted.', errno.EBADMSG)
return await self.middleware.call('zfs.dataset.promote', id_)
| 47,223 | Python | .py | 887 | 39.525366 | 120 | 0.55979 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,883 | dataset_attachments.py | truenas_middleware/src/middlewared/middlewared/plugins/pool_/dataset_attachments.py | from middlewared.schema import accepts, Ref, returns, Str
from middlewared.service import item_method, private, Service
from .utils import dataset_mountpoint
class PoolDatasetService(Service):
attachment_delegates = []
class Config:
namespace = 'pool.dataset'
@item_method
@accepts(Str('id', required=True), roles=['DATASET_READ'])
@returns(Ref('attachments'))
async def attachments(self, oid):
"""
Return a list of services dependent of this dataset.
Responsible for telling the user whether there is a related
share, asking for confirmation.
Example return value:
[
{
"type": "NFS Share",
"service": "nfs",
"attachments": ["/mnt/tank/work"]
}
]
"""
dataset = await self.middleware.call('pool.dataset.get_instance_quick', oid)
if mountpoint := dataset_mountpoint(dataset):
return await self.attachments_with_path(mountpoint)
return []
@private
async def attachments_with_path(self, path, check_parent=False, exact_match=False):
result = []
if isinstance(path, str) and not path.startswith('/mnt/'):
self.logger.warning('%s: unexpected path not located within pool mountpoint', path)
if path:
options = {'check_parent': check_parent, 'exact_match': exact_match}
for delegate in self.attachment_delegates:
attachments = {'type': delegate.title, 'service': delegate.service, 'attachments': []}
for attachment in await delegate.query(path, True, options):
attachments['attachments'].append(await delegate.get_attachment_name(attachment))
if attachments['attachments']:
result.append(attachments)
return result
@private
def register_attachment_delegate(self, delegate):
self.attachment_delegates.append(delegate)
@private
async def query_attachment_delegate(self, name, path, enabled):
for delegate in self.attachment_delegates:
if delegate.name == name:
return await delegate.query(path, enabled)
raise RuntimeError(f'Unknown attachment delegate {name!r}')
@private
async def get_attachment_delegates(self):
return self.attachment_delegates
| 2,388 | Python | .py | 54 | 34.888889 | 102 | 0.64569 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,884 | pool_operations.py | truenas_middleware/src/middlewared/middlewared/plugins/pool_/pool_operations.py | from datetime import datetime
from middlewared.schema import accepts, Bool, Int, returns, Str
from middlewared.service import item_method, job, private, Service
class PoolService(Service):
class Config:
cli_namespace = 'storage.pool'
event_send = False
@private
def configure_resilver_priority(self):
"""
Configure resilver priority based on user selected off-peak hours.
"""
resilver = self.middleware.call_sync('datastore.config', 'storage.resilver')
if not resilver['enabled'] or not resilver['weekday']:
return
higher_prio = False
weekdays = map(lambda x: int(x), resilver['weekday'].split(','))
now = datetime.now()
now_t = now.time()
# end overlaps the day
if resilver['begin'] > resilver['end']:
if now.isoweekday() in weekdays and now_t >= resilver['begin']:
higher_prio = True
else:
lastweekday = now.isoweekday() - 1
if lastweekday == 0:
lastweekday = 7
if lastweekday in weekdays and now_t < resilver['end']:
higher_prio = True
# end does not overlap the day
else:
if now.isoweekday() in weekdays and now_t >= resilver['begin'] and now_t < resilver['end']:
higher_prio = True
if higher_prio:
resilver_min_time_ms = 9000
nia_credit = 10
nia_delay = 2
scrub_max_active = 8
else:
resilver_min_time_ms = 3000
nia_credit = 5
nia_delay = 5
scrub_max_active = 3
with open('/sys/module/zfs/parameters/zfs_resilver_min_time_ms', 'w') as f:
f.write(str(resilver_min_time_ms))
with open('/sys/module/zfs/parameters/zfs_vdev_nia_credit', 'w') as f:
f.write(str(nia_credit))
with open('/sys/module/zfs/parameters/zfs_vdev_nia_delay', 'w') as f:
f.write(str(nia_delay))
with open('/sys/module/zfs/parameters/zfs_vdev_scrub_max_active', 'w') as f:
f.write(str(scrub_max_active))
@item_method
@accepts(
Int('id', required=True),
Str('action', enum=['START', 'STOP', 'PAUSE'], required=True)
)
@job(transient=True)
async def scrub(self, job, oid, action):
"""
Performs a scrub action to pool of `id`.
`action` can be either of "START", "STOP" or "PAUSE".
.. examples(websocket)::
Start scrub on pool of id 1.
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "pool.scrub",
"params": [1, "START"]
}
"""
pool = await self.middleware.call('pool.get_instance', oid)
return await job.wrap(await self.middleware.call('pool.scrub.scrub', pool['name'], action))
@accepts(Int('id'))
@returns(Bool('upgraded'))
@item_method
async def upgrade(self, oid):
"""
Upgrade pool of `id` to latest version with all feature flags.
.. examples(websocket)::
Upgrade pool of id 1.
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "pool.upgrade",
"params": [1]
}
"""
pool = await self.middleware.call('pool.get_instance', oid)
# Should we check first if upgrade is required ?
await self.middleware.call('zfs.pool.upgrade', pool['name'])
await self.middleware.call('alert.oneshot_delete', 'PoolUpgraded', pool['name'])
return True
| 3,789 | Python | .py | 94 | 29.62766 | 103 | 0.559151 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,885 | topology.py | truenas_middleware/src/middlewared/middlewared/plugins/pool_/topology.py | from collections import deque
from middlewared.service import private, Service
from .utils import RE_DRAID_SPARE_DISKS, RE_DRAID_DATA_DISKS, RE_DRAID_NAME
class PoolService(Service):
class Config:
cli_namespace = 'storage.pool'
event_send = False
@private
def flatten_topology(self, topology):
d = deque(sum(topology.values(), []))
result = []
while d:
vdev = d.popleft()
result.append(vdev)
d.extend(vdev['children'])
return result
@private
async def transform_topology_lightweight(self, x):
return await self.middleware.call('pool.transform_topology', x, {'device_disk': False, 'unavail_disk': False})
@private
def transform_topology(self, x, options=None):
"""
Transform topology output from libzfs to add `device` and make `type` uppercase.
"""
options = options or {}
if isinstance(x, dict):
if options.get('device_disk', True):
path = x.get('path')
if path is not None:
device = disk = None
if path.startswith('/dev/'):
args = [path[5:]]
device = self.middleware.call_sync('disk.label_to_dev', *args)
disk = self.middleware.call_sync('disk.label_to_disk', *args)
x['device'] = device
x['disk'] = disk
if options.get('unavail_disk', True):
guid = x.get('guid')
if guid is not None:
unavail_disk = None
if x.get('status') != 'ONLINE':
unavail_disk = self.middleware.call_sync('disk.disk_by_zfs_guid', guid)
x['unavail_disk'] = unavail_disk
for key in x:
if key == 'type' and isinstance(x[key], str):
x[key] = x[key].upper()
elif key == 'name' and RE_DRAID_NAME.match(x[key]) and isinstance(x.get('stats'), dict):
x['stats'].update({
'draid_spare_disks': int(RE_DRAID_SPARE_DISKS.findall(x['name'])[0][1:-1]),
'draid_data_disks': int(RE_DRAID_DATA_DISKS.findall(x['name'])[0][1:-1]),
'draid_parity': int(x['name'][len('draid'):len('draid') + 1]),
})
else:
x[key] = self.transform_topology(x[key], dict(options, geom_scan=False))
elif isinstance(x, list):
for i, entry in enumerate(x):
x[i] = self.transform_topology(x[i], dict(options, geom_scan=False))
return x
| 2,715 | Python | .py | 58 | 32.896552 | 118 | 0.521526 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,886 | info.py | truenas_middleware/src/middlewared/middlewared/plugins/pool_/info.py | import errno
from middlewared.schema import accepts, Bool, Dict, Int, List, returns, Str
from middlewared.service import CallError, item_method, no_authz_required, private, Service, ValidationError
class PoolService(Service):
@private
def find_disk_from_topology(self, label, pool, options=None):
options = options or {}
include_top_level_vdev = options.get('include_top_level_vdev', False)
include_siblings = options.get('include_siblings', False)
check = []
found = None
for root, children in pool['topology'].items():
check.append((root, children))
while check and not found:
root, children = check.pop()
for c in children:
if c['type'] == 'DISK':
if label in (c['path'].replace('/dev/', ''), c['guid']):
found = (root, c)
break
elif include_top_level_vdev and c['guid'] == label:
found = (root, c)
break
if c['children']:
check.append((root, c['children']))
if found is not None and include_siblings:
found = (found[0], found[1], children)
return found
@item_method
@accepts(Int('id'), roles=['READONLY_ADMIN'])
@returns(List(items=[Dict(
'attachment',
Str('type', required=True),
Str('service', required=True, null=True),
List('attachments', items=[Str('attachment_name')]),
)], register=True))
async def attachments(self, oid):
"""
Return a list of services dependent of this pool.
Responsible for telling the user whether there is a related
share, asking for confirmation.
"""
pool = await self.middleware.call('pool.get_instance', oid)
return await self.middleware.call('pool.dataset.attachments_with_path', pool['path'])
@item_method
@accepts(Int('id'), roles=['READONLY_ADMIN'])
@returns(List(items=[Dict(
'process',
Int('pid', required=True),
Str('name', required=True),
Str('service'),
Str('cmdline', max_length=None),
)], register=True))
async def processes(self, oid):
"""
Returns a list of running processes using this pool.
"""
pool = await self.middleware.call('pool.get_instance', oid)
processes = []
try:
processes = await self.middleware.call('pool.dataset.processes', pool['name'])
except ValidationError as e:
if e.errno == errno.ENOENT:
# Dataset might not exist (e.g. not online), this is not an error
pass
else:
raise
return processes
@item_method
@accepts(Int('id', required=False, default=None, null=True), roles=['READONLY_ADMIN'])
@returns(List('pool_disks', items=[Str('disk')]))
async def get_disks(self, oid):
"""
Get all disks in use by pools.
If `id` is provided only the disks from the given pool `id` will be returned.
"""
disks = []
for pool in await self.middleware.call('pool.query', [] if not oid else [('id', '=', oid)]):
if pool['status'] != 'OFFLINE':
disks.extend(await self.middleware.call('zfs.pool.get_disks', pool['name']))
return disks
@accepts(
List('types', items=[Str('type', enum=['FILESYSTEM', 'VOLUME'])], default=['FILESYSTEM', 'VOLUME']),
roles=['DATASET_READ']
)
@returns(List(items=[Str('filesystem_name')]))
async def filesystem_choices(self, types):
"""
Returns all available datasets, except the following:
1. system datasets
2. application(s) internal datasets
.. examples(websocket)::
Get all datasets.
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "pool.filesystem_choices",
"params": []
}
Get only filesystems (exclude volumes).
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "pool.filesystem_choices",
"params": [["FILESYSTEM"]]
}
"""
vol_names = [vol['name'] for vol in await self.middleware.call('pool.query')]
return [
y['name'] for y in await self.middleware.call(
'zfs.dataset.query',
[
('pool', 'in', vol_names),
('type', 'in', types),
] + await self.middleware.call('pool.dataset.internal_datasets_filters'),
{'extra': {'retrieve_properties': False}, 'order_by': ['name']},
)
]
@accepts(Int('id', required=True), roles=['READONLY_ADMIN'])
@returns(Bool('pool_is_upgraded'))
@item_method
async def is_upgraded(self, oid):
"""
Returns whether or not the pool of `id` is on the latest version and with all feature
flags enabled.
.. examples(websocket)::
Check if pool of id 1 is upgraded.
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "pool.is_upgraded",
"params": [1]
}
"""
return await self.is_upgraded_by_name((await self.middleware.call('pool.get_instance', oid))['name'])
@private
async def is_upgraded_by_name(self, name):
try:
return await self.middleware.call('zfs.pool.is_upgraded', name)
except CallError:
return False
| 5,863 | Python | .py | 144 | 29.75 | 109 | 0.550931 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,887 | expand.py | truenas_middleware/src/middlewared/middlewared/plugins/pool_/expand.py | import logging
import os
from middlewared.service import item_method, job, private, Service
from middlewared.schema import accepts, Int, returns
from middlewared.utils import run
logger = logging.getLogger(__name__)
class PoolService(Service):
@item_method
@accepts(
Int('id'),
)
@returns()
@job(lock='pool_expand')
async def expand(self, job, id_):
"""
Expand pool to fit all available disk space.
"""
pool = await self.middleware.call('pool.get_instance', id_)
all_partitions = {p['name']: p for p in await self.middleware.call('disk.list_all_partitions')}
vdevs = []
for vdev in sum(pool['topology'].values(), []):
if vdev['status'] != 'ONLINE':
logger.debug('Not expanding vdev(%r) that is %r', vdev['guid'], vdev['status'])
continue
c_vdevs = []
disks = vdev['children'] if vdev['type'] != 'DISK' else [vdev]
skip_vdev = None
for child in disks:
if child['status'] != 'ONLINE':
skip_vdev = f'Device "{child["device"]}" status is not ONLINE ' \
f'(Reported status is {child["status"]})'
break
part_data = all_partitions.get(child['device'])
if not part_data:
skip_vdev = f'Unable to find partition data for {child["device"]}'
elif not part_data['partition_number']:
skip_vdev = f'Could not parse partition number from {child["device"]}'
elif part_data['disk'] != child['disk']:
skip_vdev = f'Retrieved partition data for device {child["device"]} ' \
f'({part_data["disk"]}) does not match with disk ' \
f'reported by ZFS ({child["disk"]})'
if skip_vdev:
break
else:
c_vdevs.append((child['guid'], part_data))
if skip_vdev:
logger.debug('Not expanding vdev(%r): %r', vdev['guid'], skip_vdev)
continue
for guid, part_data in c_vdevs:
await self.expand_partition(part_data)
vdevs.append(guid)
# spare/cache devices cannot be expanded
# We resize them anyway, for cache devices, whenever we are going to import the pool
# next, it will register the new capacity. For spares, whenever that spare is going to
# be used, it will register the new capacity as desired.
for topology_type in filter(
lambda t: t not in ('spare', 'cache') and pool['topology'][t], pool['topology']
):
for vdev in pool['topology'][topology_type]:
for c_vd in filter(
lambda v: v['guid'] in vdevs, vdev['children'] if vdev['type'] != 'DISK' else [vdev]
):
await self.middleware.call('zfs.pool.online', pool['name'], c_vd['guid'], True)
@private
async def expand_partition(self, part_data):
partition_number = part_data['partition_number']
start = part_data['start_sector']
await run(
'sgdisk', '-d', str(partition_number), '-n', f'{partition_number}:{start}:0', '-t',
f'{partition_number}:BF01', '-u', f'{partition_number}:{part_data["partition_uuid"]}',
os.path.join('/dev', part_data['disk'])
)
await run('partprobe', os.path.join('/dev', part_data['disk']))
| 3,590 | Python | .py | 73 | 36.452055 | 104 | 0.545506 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,888 | import_pool.py | truenas_middleware/src/middlewared/middlewared/plugins/pool_/import_pool.py | import contextlib
import errno
import os
import subprocess
from middlewared.schema import accepts, Bool, Dict, List, returns, Str
from middlewared.service import CallError, InstanceNotFound, job, private, Service
from .utils import ZPOOL_CACHE_FILE
class PoolService(Service):
class Config:
cli_namespace = 'storage.pool'
event_send = False
@accepts()
@returns(List(
'pools_available_for_import',
title='Pools Available For Import',
items=[Dict(
'pool_info',
Str('name', required=True),
Str('guid', required=True),
Str('status', required=True),
Str('hostname', required=True),
)]
))
@job()
async def import_find(self, job):
"""
Returns a job id which can be used to retrieve a list of pools available for
import with the following details as a result of the job:
name, guid, status, hostname.
"""
existing_guids = [i['guid'] for i in await self.middleware.call('pool.query')]
result = []
for pool in await self.middleware.call('zfs.pool.find_import'):
if pool['status'] == 'UNAVAIL':
continue
# Exclude pools with same guid as existing pools (in database)
# It could be the pool is in the database but was exported/detached for some reason
# See #6808
if pool['guid'] in existing_guids:
continue
entry = {}
for i in ('name', 'guid', 'status', 'hostname'):
entry[i] = pool[i]
result.append(entry)
return result
@private
async def disable_shares(self, ds):
await self.middleware.call('zfs.dataset.update', ds, {
'properties': {
'sharenfs': {'value': "off"},
'sharesmb': {'value': "off"},
}
})
@accepts(Dict(
'pool_import',
Str('guid', required=True),
Str('name'),
Bool('enable_attachments'),
))
@returns(Bool('successful_import'))
@job(lock='import_pool')
async def import_pool(self, job, data):
"""
Import a pool found with `pool.import_find`.
If a `name` is specified the pool will be imported using that new name.
If `enable_attachments` is set to true, attachments that were disabled during pool export will be
re-enabled.
Errors:
ENOENT - Pool not found
.. examples(websocket)::
Import pool of guid 5571830764813710860.
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "pool.import_pool,
"params": [{
"guid": "5571830764813710860"
}]
}
"""
guid = data['guid']
new_name = data.get('name')
# validate
imported_pools = await self.middleware.call('zfs.pool.query_imported_fast')
if guid in imported_pools:
raise CallError(f'Pool with guid: "{guid}" already imported', errno.EEXIST)
elif new_name and new_name in imported_pools.values():
err = f'Cannot import pool using new name: "{new_name}" because a pool is already imported with that name'
raise CallError(err, errno.EEXIST)
# import zpool
opts = {'altroot': '/mnt', 'cachefile': ZPOOL_CACHE_FILE}
any_host = True
use_cachefile = None
await self.middleware.call('zfs.pool.import_pool', guid, opts, any_host, use_cachefile, new_name)
# get the zpool name
if not new_name:
pool_name = (await self.middleware.call('zfs.pool.query_imported_fast'))[guid]['name']
else:
pool_name = new_name
# Let's umount any datasets if root dataset of the new pool is locked, and it has unencrypted datasets
# beneath it. This is to prevent the scenario where the root dataset is locked and the child datasets
# get mounted
await self.handle_unencrypted_datasets_on_import(pool_name)
# set acl properties correctly for given top-level dataset's acltype
await self.middleware.call('pool.normalize_root_dataset_properties', pool_name, guid)
# Recursively reset dataset mountpoints for the zpool.
recursive = True
for child in await self.middleware.call('zfs.dataset.child_dataset_names', pool_name):
if child in (os.path.join(pool_name, k) for k in ('ix-applications', 'ix-apps')):
# We exclude `ix-applications` dataset since resetting it will
# cause PVC's to not mount because "mountpoint=legacy" is expected.
# We exclude `ix-apps` dataset since it has a custom mountpoint in place
continue
try:
# Reset all mountpoints
await self.middleware.call('zfs.dataset.inherit', child, 'mountpoint', recursive)
except CallError as e:
if e.errno != errno.EPROTONOSUPPORT:
self.logger.warning('Failed to inherit mountpoints recursively for %r dataset: %r', child, e)
continue
try:
await self.disable_shares(child)
self.logger.warning('%s: disabling ZFS dataset property-based shares', child)
except Exception:
self.logger.warning('%s: failed to disable share: %s.', child, str(e), exc_info=True)
except Exception as e:
# Let's not make this fatal
self.logger.warning('Failed to inherit mountpoints recursively for %r dataset: %r', child, e)
# We want to set immutable flag on all of locked datasets
for encrypted_ds in await self.middleware.call(
'pool.dataset.query_encrypted_datasets', pool_name, {'key_loaded': False}
):
encrypted_mountpoint = os.path.join('/mnt', encrypted_ds)
if os.path.exists(encrypted_mountpoint):
try:
await self.middleware.call('filesystem.set_immutable', True, encrypted_mountpoint)
except Exception as e:
self.logger.warning('Failed to set immutable flag at %r: %r', encrypted_mountpoint, e)
# update db
for pool in await self.middleware.call('datastore.query', 'storage.volume', [['vol_name', '=', pool_name]]):
await self.middleware.call('datastore.delete', 'storage.volume', pool['id'])
pool_id = await self.middleware.call('datastore.insert', 'storage.volume', {
'vol_name': pool_name,
'vol_guid': guid,
})
await self.middleware.call('pool.scrub.create', {'pool': pool_id})
# re-enable/restart any services dependent on this pool
pool = await self.middleware.call('pool.query', [('id', '=', pool_id)], {'get': True})
key = f'pool:{pool["name"]}:enable_on_import'
if await self.middleware.call('keyvalue.has_key', key):
for name, ids in (await self.middleware.call('keyvalue.get', key)).items():
for delegate in await self.middleware.call('pool.dataset.get_attachment_delegates'):
if delegate.name == name:
attachments = await delegate.query(pool['path'], False)
attachments = [attachment for attachment in attachments if attachment['id'] in ids]
if attachments:
await delegate.toggle(attachments, True)
await self.middleware.call('keyvalue.delete', key)
await self.middleware.call_hook('pool.post_import', pool)
await self.middleware.call('pool.dataset.sync_db_keys', pool['name'])
self.middleware.send_event('pool.query', 'ADDED', id=pool_id, fields=pool)
return True
@private
def recursive_mount(self, name):
cmd = [
'zfs', 'mount',
'-R', # recursive flag
name, # name of the zpool / root dataset
]
try:
self.logger.debug('Going to mount root dataset recusively: %r', name)
cp = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
if cp.returncode != 0:
self.logger.error(
'Failed to mount datasets for pool: %r with error: %r',
name, cp.stdout.decode()
)
return False
return True
except Exception:
self.logger.error(
'Unhandled exception while mounting datasets for pool: %r',
name, exc_info=True
)
return False
@private
def encryption_is_active(self, name):
cmd = [
'zfs', 'get',
'-H', # use in script
'-o', 'value', # retrieve the value
'encryption', # property to retrieve
name, # name of the zpool
]
try:
self.logger.debug('Checking if root dataset is encrypted: %r', name)
cp = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
if cp.returncode != 0:
self.logger.error(
'Failed to see if root dataset is encrypted for pool: %r with error: %r',
name, cp.stdout.decode()
)
return False
if cp.stdout.decode().strip() == 'off':
return False
else:
return True
except Exception:
self.logger.error(
'Unhandled exception while checking on feature@encryption for pool: %r',
name, exc_info=True
)
return False
@private
def normalize_root_dataset_properties(self, vol_name, vol_guid):
try:
self.logger.debug('Calling zfs.dataset.query on %r with guid %r', vol_name, vol_guid)
ds = self.middleware.call_sync(
'zfs.dataset.query',
[['id', '=', vol_name]],
{'get': True, 'extra': {'retrieve_children': False}}
)['properties']
except Exception:
self.logger.warning('Unexpected failure querying root-level properties for %r', vol_name, exc_info=True)
return True
else:
self.logger.debug('Done calling zfs.dataset.query on %r with guid %r', vol_name, vol_guid)
opts = {'properties': dict()}
if ds['acltype']['value'] == 'nfsv4':
if ds['aclinherit']['value'] != 'passthrough':
opts['properties'].update({'aclinherit': {'value': 'passthrough'}})
if ds['aclmode']['value'] != 'passthrough':
opts['properties'].update({'aclmode': {'value': 'passthrough'}})
else:
if ds['aclinherit']['value'] != 'discard':
opts['properties'].update({'aclinherit': {'value': 'discard'}})
if ds['aclmode']['value'] != 'discard':
opts['properties'].update({'aclmode': {'value': 'discard'}})
if ds['sharenfs']['value'] != 'off':
opts['properties'].update({'sharenfs': {'value': 'off'}})
if ds['sharesmb']['value'] != 'off':
opts['properties'].update({'sharesmb': {'value': 'off'}})
if opts['properties']:
try:
self.logger.debug('Calling zfs.dateset.update on %r with opts %r', vol_name, opts['properties'])
self.middleware.call_sync('zfs.dataset.update', vol_name, opts)
except Exception:
self.logger.warning('%r: failed to normalize properties of root-level dataset', vol_name, exc_info=True)
else:
self.logger.debug('Done calling zfs.dateset.update on %r', vol_name)
@private
def import_on_boot_impl(self, vol_name, vol_guid, set_cachefile=False):
cmd = [
'zpool', 'import',
vol_guid, # the GUID of the zpool
'-R', '/mnt', # altroot
'-m', # import pool with missing log device(s)
'-N', # do not mount the datasets
'-f', # force import since hostid can change (upgrade from CORE to SCALE changes it, for example)
'-o', f'cachefile={ZPOOL_CACHE_FILE}' if set_cachefile else 'cachefile=none',
]
try:
self.logger.debug('Importing %r with guid: %r', vol_name, vol_guid)
cp = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
if cp.returncode != 0:
self.logger.error(
'Failed to import %r with guid: %r with error: %r',
vol_name, vol_guid, cp.stdout.decode()
)
return False
except Exception:
self.logger.error('Unhandled exception importing %r', vol_name, exc_info=True)
return False
else:
self.logger.debug('Done importing %r with guid %r', vol_name, vol_guid)
# normalize ZFS dataset properties on boot. Pool may be foreign to SCALE
# (including those created on CORE)
self.normalize_root_dataset_properties(vol_name, vol_guid)
return True
@private
def unlock_on_boot_impl(self, vol_name):
zpool_info = self.middleware.call_sync('pool.handle_unencrypted_datasets_on_import', vol_name)
if not zpool_info:
self.logger.error(
'Unable to retrieve %r root dataset information required for unlocking any relevant encrypted datasets',
vol_name
)
return
umount_root_short_circuit = False
if zpool_info['key_format']['parsed'] == 'passphrase':
# passphrase encrypted zpools will _always_ fail to be unlocked at
# boot time because we don't store the users passphrase on disk
# anywhere.
#
# NOTE: To have a passphrase encrypted zpool (the root dataset is passphrase encrypted)
# is considered an edge-case (or is someone upgrading from an old version of SCALE where
# we mistakenly allowed this capability). There is also possibility to update existing
# root dataset encryption from key based to passphrase based. Again, an edge-case but
# documenting it here for posterity sake.
self.logger.debug(
'Passphrase encrypted zpool detected %r, passphrase required before unlock', vol_name
)
umount_root_short_circuit = True
if not umount_root_short_circuit:
# the top-level dataset could be unencrypted but there could be any number
# of child datasets that are encrypted. This will try to recursively unlock
# those datasets (including the parent if necessary).
# If we fail to unlock the parent, then the method short-circuits and exits
# early.
opts = {'recursive': True, 'toggle_attachments': False}
uj = self.middleware.call_sync('pool.dataset.unlock', vol_name, opts)
uj.wait_sync()
if uj.error:
self.logger.error('FAILED unlocking encrypted dataset(s) for %r with error %r', vol_name, uj.error)
elif uj.result['failed']:
self.logger.error(
'FAILED unlocking the following datasets: %r for pool %r',
', '.join(uj.result['failed']), vol_name
)
else:
self.logger.debug('SUCCESS unlocking encrypted dataset(s) (if any) for %r', vol_name)
if any((
umount_root_short_circuit,
self.middleware.call_sync(
'pool.dataset.get_instance_quick', vol_name, {'encryption': True}
)['locked']
)):
# We umount the zpool in the following scenarios:
# 1. we came across a passphrase encrypted root dataset (i.e. /mnt/tank)
# 2. we failed to unlock the key based encrypted root dataset
#
# It's important to understand how this operates at zfs level since this
# can be painfully confusing.
# 1. when system boots, we call zpool import
# 2. zpool impot has no notion of encryption and will simply mount
# the datasets as necessary (INCLUDING ALL CHILDREN)
# 3. if the root dataset is passphrase encrypted OR we fail to unlock
# the root dataset that is using key based encryption, then the child
# datasets ARE STILL MOUNTED DURING IMPORT PHASE (this includes
# encrypted children or unencrypted children)
#
# In the above scenario, the root dataset wouldn't be mounted but any number
# of children would be. If the end-user is sharing one of the unencrypted children
# via a sharing service, then what happens is that a parent DIRECTORY is created
# in place of the root dataset and all files get written OUTSIDE of the zfs
# mountpoint. That's an unpleasant experience because it is perceived as data loss
# since mounting the dataset will just mount over-top of said directory.
# (i.e. /mnt/tank/datasetA/datasetB/childds/, The "datasetA", "datasetB", "childds"
# path components would be created as directories and I/O would continue without
# any problems but the data is not going to that zfs dataset.
#
# To account for this edge-case (we now no longer allow the creation of unencrypted child
# datasets where any upper path component is encrypted) (i.e. no more /mnt/zz/unencrypted/encrypted).
# However, we still need to take into consideration the other users that manged to get themselves
# into this scenario.
if not umount_root_short_circuit:
with contextlib.suppress(CallError):
self.logger.debug('Forcefully umounting %r', vol_name)
self.middleware.call_sync('zfs.dataset.umount', vol_name, {'force': True})
self.logger.debug('Successfully umounted %r', vol_name)
pool_mount = f'/mnt/{vol_name}'
if os.path.exists(pool_mount):
try:
# setting the root path as immutable, in a perfect world, will prevent
# the scenario that is describe above
self.logger.debug('Setting immutable flag at %r', pool_mount)
self.middleware.call_sync('filesystem.set_immutable', True, pool_mount)
except CallError as e:
self.logger.error('Unable to set immutable flag at %r: %s', pool_mount, e)
@private
@job()
def import_on_boot(self, job):
if self.middleware.call_sync('failover.licensed'):
# HA systems pools are imported using the failover
# event logic
return
if self.middleware.call_sync('truenas.is_ix_hardware'):
# Attach NVMe/RoCE - wait up to 10 seconds
self.logger.info('Start bring up of NVMe/RoCE')
try:
jbof_job = self.middleware.call_sync('jbof.configure_job')
jbof_job.wait_sync(timeout=10)
if jbof_job.error:
self.logger.error(f'Error attaching JBOFs: {jbof_job.error}')
elif jbof_job.result['failed']:
self.logger.error(f'Failed to attach JBOFs:{jbof_job.result["message"]}')
else:
self.logger.info(jbof_job.result['message'])
except TimeoutError:
self.logger.error('Timed out attaching JBOFs - will continue in background')
except Exception:
self.logger.error('Unexpected error', exc_info=True)
set_cachefile_property = True
dir_name = os.path.dirname(ZPOOL_CACHE_FILE)
try:
self.logger.debug('Creating %r (if it doesnt already exist)', dir_name)
os.makedirs(dir_name, exist_ok=True)
except Exception:
self.logger.warning('FAILED unhandled exception creating %r', dir_name, exc_info=True)
set_cachefile_property = False
else:
try:
self.logger.debug('Creating %r (if it doesnt already exist)', ZPOOL_CACHE_FILE)
with open(ZPOOL_CACHE_FILE, 'x'):
pass
except FileExistsError:
# cachefile already exists on disk which is fine
pass
except Exception:
self.logger.warning('FAILED unhandled exception creating %r', ZPOOL_CACHE_FILE, exc_info=True)
set_cachefile_property = False
# We need to do as little zfs I/O as possible since this method
# is being called by a systemd service at boot-up. First step of
# doing this is to simply try to import all zpools that are in our
# database. Handle each error accordingly instead of trying to be
# fancy and determine which ones are "offline" since...in theory...
# all zpools should be offline at this point.
for i in self.middleware.call_sync('datastore.query', 'storage.volume'):
name, guid = i['vol_name'], i['vol_guid']
if not self.import_on_boot_impl(name, guid, set_cachefile_property):
continue
if not self.encryption_is_active(name):
self.recursive_mount(name)
self.unlock_on_boot_impl(name)
# TODO: we need to fix this. There is 0 reason to do all this stuff
# and block the entire boot-up process.
self.logger.debug('Calling pool.post_import')
self.middleware.call_hook_sync('pool.post_import', None)
self.logger.debug('Finished calling pool.post_import')
@private
async def handle_unencrypted_datasets_on_import(self, pool_name):
try:
root_ds = await self.middleware.call('pool.dataset.get_instance_quick', pool_name, {
'encryption': True,
})
except InstanceNotFound:
# We don't really care about this case, it means that pool did not get imported for some reason
return
if not root_ds['encrypted']:
return root_ds
# If root ds is encrypted, at this point we know that root dataset has not been mounted yet and neither
# unlocked, so if there are any children it has which were unencrypted - we force umount them
try:
await self.middleware.call('zfs.dataset.umount', pool_name, {'force': True})
self.logger.debug('Successfully umounted any unencrypted datasets under %r dataset', pool_name)
except Exception:
self.logger.error('Failed to umount any unencrypted datasets under %r dataset', pool_name, exc_info=True)
return root_ds
| 23,251 | Python | .py | 449 | 39.200445 | 120 | 0.589177 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,889 | krb5.py | truenas_middleware/src/middlewared/middlewared/plugins/activedirectory_/krb5.py | from middlewared.plugins.smb import SMBCmd
from middlewared.plugins.kerberos import krb5ccache
from middlewared.plugins.activedirectory_.dns import SRV
from middlewared.service import private, job, Service
from middlewared.service_exception import CallError
from middlewared.plugins.directoryservices import DSStatus
from middlewared.utils import run
class ActiveDirectoryService(Service):
class Config:
service = "activedirectory"
@private
async def net_keytab_add_update_ads(self, service_class):
if not (await self.middleware.call('nfs.config'))['v4_krb']:
return False
cmd = [
SMBCmd.NET.value,
'--use-kerberos', 'required',
'--use-krb5-ccache', krb5ccache.SYSTEM.value,
'ads', 'keytab',
'add_update_ads', service_class
]
netads = await run(cmd, check=False)
if netads.returncode != 0:
raise CallError('failed to set spn entry '
f'[{service_class}]: {netads.stdout.decode().strip()}')
return True
@private
async def get_spn_list(self):
"""
Return list of kerberos SPN entries registered for the server's Active
Directory computer account. This may not reflect the state of the
server's current kerberos keytab.
"""
await self.middleware.call("kerberos.check_ticket")
spnlist = []
cmd = [
SMBCmd.NET.value,
'--use-kerberos', 'required',
'--use-krb5-ccache', krb5ccache.SYSTEM.value,
'ads', 'setspn', 'list'
]
netads = await run(cmd, check=False)
if netads.returncode != 0:
raise CallError(
f"Failed to generate SPN list: [{netads.stderr.decode().strip()}]"
)
for spn in netads.stdout.decode().splitlines():
if len(spn.split('/')) != 2:
continue
spnlist.append(spn.strip())
return spnlist
@private
async def get_kerberos_servers(self, ad=None):
"""
This returns at most 3 kerberos servers located in our AD site. This is to optimize
kerberos configuration for locations where kerberos servers may span the globe and
have equal DNS weighting. Since a single kerberos server may represent an unacceptable
single point of failure, fall back to relying on normal DNS queries in this case.
"""
if ad is None:
ad = await self.middleware.call('activedirectory.config')
res = await self.middleware.call(
'activedirectory.get_n_working_servers',
ad['domainname'],
SRV.KERBEROSDOMAINCONTROLLER.name,
ad['site'],
3,
ad['dns_timeout'],
ad['verbose_logging'],
)
if len(res) != 3:
return None
return [i['host'] for i in res]
@private
async def set_kerberos_servers(self, ad=None):
if not ad:
ad = await self.middleware.call_sync('activedirectory.config')
site_indexed_kerberos_servers = await self.get_kerberos_servers(ad)
if site_indexed_kerberos_servers:
await self.middleware.call(
'kerberos.realm.update',
ad['kerberos_realm'],
{'kdc': site_indexed_kerberos_servers}
)
| 3,415 | Python | .py | 84 | 30.678571 | 94 | 0.610793 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,890 | dns.py | truenas_middleware/src/middlewared/middlewared/plugins/activedirectory_/dns.py | import dns
import enum
import errno
import ipaddress
import socket
from middlewared.service import CallError, private, Service
from middlewared.utils import filter_list
class SRV(enum.Enum):
DOMAINCONTROLLER = '_ldap._tcp.dc._msdcs.'
FORESTGLOBALCATALOG = '_ldap._tcp.gc._msdcs.'
GLOBALCATALOG = '_gc._tcp.'
KERBEROS = '_kerberos._tcp.'
KERBEROSDOMAINCONTROLLER = '_kerberos._tcp.dc._msdcs.'
KPASSWD = '_kpasswd._tcp.'
LDAP = '_ldap._tcp.'
PDC = '_ldap._tcp.pdc._msdcs.'
class ActiveDirectoryService(Service):
class Config:
service = "activedirectory"
@private
async def unregister_dns(self, ad):
if not ad['allow_dns_updates']:
return
netbiosname = (await self.middleware.call('smb.config'))['netbiosname_local']
domain = ad['domainname']
hostname = f'{netbiosname}.{domain}'
try:
dns_addresses = set([x['address'] for x in await self.middleware.call('dnsclient.forward_lookup', {
'names': [hostname]
})])
except dns.resolver.NXDOMAIN:
self.logger.warning(
f'DNS lookup of {hostname}. failed with NXDOMAIN. '
'This may indicate that DNS entries for the computer account have already been deleted; '
'however, it may also indicate the presence of larger underlying DNS configuration issues.'
)
return
ips_in_use = set([x['address'] for x in await self.middleware.call('interface.ip_in_use')])
if not dns_addresses & ips_in_use:
# raise a CallError here because we don't want someone fat-fingering
# input and removing an unrelated computer in the domain.
raise CallError(
f'DNS records indicate that {hostname} may be associated '
'with a different computer in the domain. Forward lookup returned the '
f'following results: {", ".join(dns_addresses)}.'
)
payload = []
for ip in dns_addresses:
addr = ipaddress.ip_address(ip)
payload.append({
'command': 'DELETE',
'name': hostname,
'address': str(addr),
'type': 'A' if addr.version == 4 else 'AAAA'
})
try:
await self.middleware.call('dns.nsupdate', {'ops': payload})
except CallError as e:
self.logger.warning(f'Failed to update DNS with payload [{payload}]: {e.errmsg}')
@private
async def ipaddresses_to_register(self, data, valid_only=True):
validated_ips = []
ips = [i['address'] for i in (await self.middleware.call('interface.ip_in_use'))]
if data['bindip']:
to_check = set(data['bindip']) & set(ips)
else:
to_check = set(ips)
for ip in to_check:
try:
result = await self.middleware.call('dnsclient.reverse_lookup', {
'addresses': [ip]
})
except dns.resolver.NXDOMAIN:
# This may simply mean entry was not found
validated_ips.append(ip)
except dns.resolver.LifetimeTimeout:
self.logger.warning(
'%s: DNS operation timed out while trying to resolve reverse pointer '
'for IP address.',
ip
)
except dns.resolver.NoNameservers:
self.logger.warning(
'No nameservers configured to handle reverse pointer for %s. '
'Omitting from list of addresses to use for Active Directory purposes.',
ip
)
continue
except Exception:
# DNS for this IP may be simply wildly misconfigured and time out
self.logger.warning(
'Reverse lookup of %s failed, omitting from list '
'of addresses to use for Active Directory purposes.',
ip, exc_info=True
)
continue
else:
if result[0]['target'].casefold() != data['hostname'].casefold():
errmsg = f'Reverse lookup of {ip} points to {result[0]["target"]}, expected {data["hostname"]}.'
self.logger.warning(errmsg)
if valid_only:
continue
validated_ips.append(ip)
return validated_ips
@private
async def get_ipaddresses(self, ad, smb, is_ha):
if not ad['allow_dns_updates']:
return None
if is_ha and not smb['bindip']:
bindip = await self.middleware.call('smb.bindip_choices')
else:
bindip = smb['bindip']
hostname = f'{smb["netbiosname_local"]}.{ad["domainname"]}.'
return await self.ipaddresses_to_register({
'bindip': bindip,
'hostname': hostname,
})
@private
async def register_dns(self):
ad = await self.middleware.call('activedirectory.config')
smb = await self.middleware.call('smb.config')
is_ha = await self.middleware.call('failover.licensed')
if not ad['allow_dns_updates']:
return None
if not (to_register := await self.get_ipaddresses(ad, smb, is_ha)):
raise CallError(
'No server IP addresses passed DNS validation. '
'This may indicate an improperly configured reverse zone. '
'Review middleware log files for details regarding errors encountered.',
errno.EINVAL
)
payload = []
hostname = f'{smb["netbiosname_local"]}.{ad["domainname"]}.'
for ip in to_register:
addr = ipaddress.ip_address(ip)
payload.append({
'command': 'ADD',
'name': hostname,
'address': str(addr),
'type': 'A' if addr.version == 4 else 'AAAA'
})
try:
await self.middleware.call('dns.nsupdate', {'ops': payload})
except CallError as e:
self.logger.warning(f'Failed to update DNS with payload [{payload}]: {e.errmsg}')
@private
def port_is_listening(self, host, port, timeout=1):
ret = False
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if timeout:
s.settimeout(timeout)
try:
s.connect((host, port))
ret = True
except Exception as e:
self.logger.debug("connection to %s failed with error: %s",
host, e)
ret = False
finally:
s.close()
return ret
@private
async def check_nameservers(self, domain, site=None, lifetime=10):
def get_host(srv_prefix):
if site and site != 'Default-First-Site-Name':
if 'msdcs' in srv_prefix.value:
parts = srv_prefix.value.split('.')
srv = '.'.join([parts[0], parts[1]])
msdcs = '.'.join([parts[2], parts[3]])
return f"{srv}.{site}._sites.{msdcs}.{domain}"
else:
return f"{srv_prefix.value}{site}._sites.{domain}."
return f"{srv_prefix.value}{domain}."
targets = [get_host(srv_record) for srv_record in [SRV.KERBEROS, SRV.LDAP]]
for entry in await self.middleware.call('dns.query'):
servers = []
for name in targets:
try:
resp = await self.middleware.call('dnsclient.forward_lookup', {
'names': [name],
'record_types': ['SRV'],
'dns_client_options': {
'nameservers': [entry['nameserver']],
'lifetime': lifetime,
}
})
except dns.resolver.NXDOMAIN:
raise CallError(
f'{name}: Nameserver {entry["nameserver"]} failed to resolve SRV '
f'record for domain {domain}. This may indicate a DNS misconfiguration '
'on the TrueNAS server. NOTE: When configuring with Active Directory, all '
'registered nameservers must be nameservers for the Active Directory domain.',
errno.EINVAL
)
except Exception as e:
raise CallError(
f'{name}: Nameserver {entry["nameserver"]} failed to resolve SRV '
f'record for domain {domain} : {e}',
errno.EINVAL
)
else:
servers.extend(resp)
for name in targets:
if not filter_list(servers, [['name', 'C=', name]]):
raise CallError(
f'Forward lookup of "{name}" failed with nameserver {entry["nameserver"]}. '
'This may indicate a DNS misconfiguration on the remote nameserver.',
errno.ENOENT
)
@private
def get_n_working_servers(self, domain, srv=SRV.DOMAINCONTROLLER.name, site=None, cnt=1, timeout=10, verbose=False):
srv_prefix = SRV[srv]
if site and site != 'Default-First-Site-Name':
if 'msdcs' in srv_prefix.value:
parts = srv_prefix.value.split('.')
srv = '.'.join([parts[0], parts[1]])
msdcs = '.'.join([parts[2], parts[3]])
host = f"{srv}.{site}._sites.{msdcs}.{domain}"
else:
host = f"{srv_prefix.value}{site}._sites.{domain}."
else:
host = f"{srv_prefix.value}{domain}."
servers = self.middleware.call_sync('dnsclient.forward_lookup', {
'names': [host],
'record_types': ['SRV'],
'query-options': {'order_by': ['priority', 'weight']},
'dns_client_options': {'lifetime': timeout},
})
output = []
for server in servers:
if len(output) == cnt:
break
if self.port_is_listening(server['target'], server['port'], timeout=timeout):
output.append({'host': server['target'], 'port': server['port']})
if verbose:
self.logger.debug('Request for %d of server type [%s] returned: %s',
cnt, srv, output)
return output
@private
async def netbiosname_is_ours(self, netbios_name, domain_name, lifetime=10):
try:
dns_addresses = set([x['address'] for x in await self.middleware.call('dnsclient.forward_lookup', {
'names': [f'{netbios_name}.{domain_name}'],
'dns_client_options': {'lifetime': lifetime},
})])
except dns.resolver.NXDOMAIN:
raise CallError(f'DNS forward lookup of [{netbios_name}] failed.', errno.ENOENT)
except dns.resolver.NoNameservers as e:
raise CallError(f'DNS forward lookup of netbios name failed: {e}', errno.EFAULT)
ips_in_use = set((await self.middleware.call('smb.bindip_choices')).keys())
return bool(dns_addresses & ips_in_use)
| 11,459 | Python | .py | 251 | 31.912351 | 120 | 0.537296 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,891 | trigger_migration.py | truenas_middleware/src/middlewared/middlewared/plugins/kubernetes_to_docker/trigger_migration.py | import middlewared.sqlalchemy as sa
from middlewared.service import private, Service
from middlewared.service_exception import MatchNotFound
from .utils import get_sorted_backups
class KubernetesModel(sa.Model):
__tablename__ = 'services_kubernetes'
id = sa.Column(sa.Integer(), primary_key=True)
pool = sa.Column(sa.String(255), default=None, nullable=True)
class K8stoDockerMigrationService(Service):
class Config:
namespace = 'k8s_to_docker'
cli_namespace = 'k8s_to_docker'
@private
async def trigger_migration(self):
try:
k8s_pool = (await self.middleware.call('datastore.config', 'services.kubernetes'))['pool']
except MatchNotFound:
return
if not k8s_pool:
return
# We would like to wait for interfaces like bridge to come up before we proceed with migration
# because they are notorious and can take some time to actually come up and if they are the default
# interface, then migration is bound to fail as catalog won't sync because of no network
# connectivity and us not able to see if an app is available in newer catalog. If the default interface
# is not up, then we will fail the migration here and early
await self.middleware.call('docker.setup.validate_interfaces')
list_backup_job = await self.middleware.call('k8s_to_docker.list_backups', k8s_pool)
await list_backup_job.wait()
if list_backup_job.error or list_backup_job.result['error']:
self.logger.error(
'Failed to list backups for %r pool: %s', k8s_pool,
list_backup_job.error or list_backup_job.result['error']
)
return
if not list_backup_job.result['backups']:
self.logger.debug('No backups found for %r pool', k8s_pool)
await self.unset_kubernetes_pool()
return
# We will get latest backup now and execute it
backups = get_sorted_backups(list_backup_job.result)
if not backups:
self.logger.debug('No backups found with releases which can be migrated for %r pool', k8s_pool)
await self.unset_kubernetes_pool()
return
latest_backup = backups[-1]
migrate_job = await self.middleware.call(
'k8s_to_docker.migrate', k8s_pool, {'backup_name': latest_backup['name']}
)
await migrate_job.wait()
if migrate_job.error:
self.logger.error(
'Failed to migrate %r backup for %r pool: %s', latest_backup['name'], k8s_pool, migrate_job.error
)
return
await self.unset_kubernetes_pool()
self.logger.debug('Successfully migrated %r backup for %r pool', latest_backup['name'], k8s_pool)
@private
async def unset_kubernetes_pool(self):
config = await self.middleware.call('datastore.config', 'services.kubernetes')
self.logger.debug('Unsetting kubernetes pool for %r', config['pool'])
await self.middleware.call('datastore.update', 'services.kubernetes', config['id'], {'pool': None})
async def _event_system_ready(middleware, event_type, args):
# we ignore the 'ready' event on an HA system since the failover event plugin
# is responsible for starting this service
if await middleware.call('failover.licensed'):
return
middleware.create_task(middleware.call('k8s_to_docker.trigger_migration'))
async def setup(middleware):
middleware.event_subscribe('system.ready', _event_system_ready)
| 3,589 | Python | .py | 69 | 43.115942 | 113 | 0.670763 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,892 | migrate_config_utils.py | truenas_middleware/src/middlewared/middlewared/plugins/kubernetes_to_docker/migrate_config_utils.py | import tempfile
import yaml
from middlewared.plugins.apps.utils import run
def migrate_chart_release_config(release_data: dict) -> dict | str:
with tempfile.NamedTemporaryFile(mode='w') as f:
f.write(yaml.dump(release_data))
f.flush()
cp = run([release_data['migrate_file_path'], f.name])
if cp.returncode:
return f'Failed to migrate config: {cp.stderr}'
if not cp.stdout:
error = 'No output from migration script'
else:
try:
new_config = yaml.safe_load(cp.stdout)
except yaml.YAMLError:
error = 'Failed to parse migrated config'
else:
if new_config:
return new_config
else:
error = 'No migrated config found'
return f'Failed to migrate config: {error}'
| 885 | Python | .py | 23 | 27.434783 | 67 | 0.577778 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,893 | list_utils.py | truenas_middleware/src/middlewared/middlewared/plugins/kubernetes_to_docker/list_utils.py | import os
import yaml
from catalog_reader.train_utils import get_train_path
from middlewared.plugins.docker.state_utils import catalog_ds_path
from .secrets_utils import list_secrets
from .yaml import SerializedDatesFullLoader
HELM_SECRET_PREFIX = 'sh.helm.release'
K8s_BACKUP_NAME_PREFIX = 'ix-applications-backup-'
def get_backup_dir(k8s_ds: str) -> str:
return os.path.join('/mnt', k8s_ds, 'backups')
def get_release_metadata(release_path: str) -> dict:
try:
with open(os.path.join(release_path, 'namespace.yaml')) as f:
return yaml.load(f.read(), Loader=SerializedDatesFullLoader)
except (FileNotFoundError, yaml.YAMLError):
return {}
def get_default_release_details(release_name: str) -> dict:
return {
'error': None,
'helm_secret': {},
'release_secrets': {},
'train': None,
'app_name': None,
'app_version': None,
'release_name': release_name,
'migrate_file_path': None,
}
def release_details(
release_name: str, release_path: str, catalog_path: str, apps_mapping: dict, installed_apps: dict,
) -> dict:
config = get_default_release_details(release_name)
if not (release_metadata := get_release_metadata(release_path)) or not all(
k in release_metadata.get('metadata', {}).get('labels', {})
for k in ('catalog', 'catalog_branch', 'catalog_train')
):
return config | {'error': 'Unable to read release metadata'}
metadata_labels = release_metadata['metadata']['labels']
if metadata_labels['catalog'] != 'TRUENAS' or metadata_labels['catalog_branch'] != 'master':
return config | {'error': 'Release is not from TrueNAS catalog'}
if release_name in installed_apps:
return config | {'error': 'App with same name is already installed'}
release_train = metadata_labels['catalog_train'] if metadata_labels['catalog_train'] != 'charts' else 'stable'
config['train'] = release_train
if release_train not in apps_mapping:
return config | {'error': 'Unable to locate release\'s train'}
secrets_dir = os.path.join(release_path, 'secrets')
try:
secrets = list_secrets(secrets_dir)
except FileNotFoundError:
return config | {'error': 'Unable to list release secrets'}
if secrets['helm_secret']['name'] is None:
return config | {'error': 'Unable to read helm secret details'}
config.update({
'app_name': secrets['helm_secret']['name'],
**secrets,
})
if config['app_name'] == 'ix-chart' and release_train == 'stable':
config['app_name'] = 'ix-app'
if config['app_name'] not in apps_mapping[release_train]:
return config | {'error': 'Unable to locate release\'s app'}
config['app_version'] = apps_mapping[release_train][config['app_name']]['version']
migrate_tail_file_path = os.path.join(
release_train, config['app_name'], config['app_version'], 'migrations/migrate_from_kubernetes'
)
to_test_migrate_file_path = os.path.join(get_train_path(catalog_path), migrate_tail_file_path)
if os.path.exists(to_test_migrate_file_path):
config['migrate_file_path'] = os.path.join(get_train_path(catalog_ds_path()), migrate_tail_file_path)
else:
config['error'] = 'Unable to locate release\'s app\'s migration file'
return config
| 3,370 | Python | .py | 70 | 41.957143 | 114 | 0.668904 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,894 | secrets_utils.py | truenas_middleware/src/middlewared/middlewared/plugins/kubernetes_to_docker/secrets_utils.py | import binascii
import contextlib
import gzip
import json
import os
from base64 import b64decode
import yaml
from .yaml import SerializedDatesFullLoader
HELM_SECRET_PREFIX = 'sh.helm.release'
def list_secrets(secrets_dir: str) -> dict[str, dict[str, dict]]:
secrets = {
'helm_secret': {
'secret_name': None,
'name': None,
},
'release_secrets': {},
}
with os.scandir(secrets_dir) as it:
for entry in it:
if not entry.is_file():
continue
if entry.name.startswith(HELM_SECRET_PREFIX):
if secrets['helm_secret']['secret_name'] is None or entry.name > secrets['helm_secret']['secret_name']:
secret_contents = get_secret_contents(entry.path, True).get('release', {})
secrets['helm_secret'].update({
'secret_name': entry.name,
**(secret_contents if all(
k in secret_contents and k for k in ('appVersion', 'config', 'name')
) else {}),
})
else:
secrets['release_secrets'][entry.name] = get_secret_contents(entry.path)
return secrets
def get_secret_contents(secret_path: str, helm_secret: bool = False) -> dict:
with open(secret_path, 'r') as f:
secret = yaml.load(f.read(), Loader=SerializedDatesFullLoader)
if isinstance(secret.get('data'), dict) is False:
return {}
contents = {}
for k, v in secret['data'].items():
with contextlib.suppress(binascii.Error, gzip.BadGzipFile, KeyError):
if helm_secret:
v = json.loads(gzip.decompress(b64decode(b64decode(v))).decode())
for pop_k in ('manifest', 'info', 'version', 'namespace'):
v.pop(pop_k)
chart = v.pop('chart')['metadata']
for add_k in ('appVersion', 'name'):
v[add_k] = chart[add_k]
else:
v = b64decode(v).decode()
contents[k] = v
return contents
| 2,124 | Python | .py | 52 | 29.653846 | 119 | 0.55345 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,895 | utils.py | truenas_middleware/src/middlewared/middlewared/plugins/kubernetes_to_docker/utils.py | import os
def get_sorted_backups(backups_config: dict) -> list:
"""
Returns a list of backups sorted by their creation date with latest backups at the end of the list.
"""
if backups_config['error'] or not backups_config['backups']:
return []
return sorted(
[backup for backup in backups_config['backups'].values() if backup['releases']],
key=lambda backup: backup['created_on'],
)
def get_k8s_ds(pool_name: str) -> str:
return os.path.join(pool_name, 'ix-applications')
| 528 | Python | .py | 13 | 35.230769 | 103 | 0.672549 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,896 | yaml.py | truenas_middleware/src/middlewared/middlewared/plugins/kubernetes_to_docker/yaml.py | import yaml
class SerializedDatesFullLoader(yaml.FullLoader):
@classmethod
def remove_implicit_resolver(cls, tag_to_remove):
"""
Remove implicit resolvers for a particular tag
Takes care not to modify resolvers in super classes.
We want to load datetimes as strings, not dates, because we
go on to serialise as json which doesn't have the advanced types
of yaml, and leads to incompatibilities down the track.
"""
if 'yaml_implicit_resolvers' not in cls.__dict__:
cls.yaml_implicit_resolvers = cls.yaml_implicit_resolvers.copy()
for first_letter, mappings in cls.yaml_implicit_resolvers.items():
cls.yaml_implicit_resolvers[first_letter] = [
(tag, regexp) for tag, regexp in mappings if tag != tag_to_remove
]
class SafeDumper(yaml.SafeDumper):
pass
SerializedDatesFullLoader.remove_implicit_resolver('tag:yaml.org,2002:timestamp')
# We would like to customize safe dumper here so that when it dumps values, we quote strings
# why we want to do this is for instances when strings like 'y' are treated as boolean true
# by yaml and if we don't dump this enclosed with quotes, helm treats 'y' as true and we get inconsistent
# usage
yaml.add_representer(
str, lambda dumper, data: dumper.represent_scalar('tag:yaml.org,2002:str', data, style='"'), SafeDumper
)
| 1,409 | Python | .py | 27 | 45.62963 | 107 | 0.712828 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,897 | migrate.py | truenas_middleware/src/middlewared/middlewared/plugins/kubernetes_to_docker/migrate.py | import logging
import os.path
import shutil
from middlewared.plugins.apps.ix_apps.path import get_app_parent_volume_ds_name, get_installed_app_path
from middlewared.plugins.docker.state_utils import DatasetDefaults
from middlewared.schema import accepts, Bool, Dict, List, returns, Str
from middlewared.service import CallError, job, Service
from .migrate_config_utils import migrate_chart_release_config
from .utils import get_k8s_ds, get_sorted_backups
logger = logging.getLogger('app_migrations')
class K8stoDockerMigrationService(Service):
class Config:
namespace = 'k8s_to_docker'
cli_namespace = 'k8s_to_docker'
@accepts(
Str('kubernetes_pool'),
Dict(
'options',
Str('backup_name', null=True, default=None),
),
roles=['DOCKER_WRITE']
)
@returns(List(
'app_migration_details',
items=[Dict(
'app_migration_detail',
Str('name'),
Bool('successfully_migrated'),
Str('error', null=True),
)]
))
@job(lock='k8s_to_docker_migrate')
def migrate(self, job, kubernetes_pool, options):
"""
Migrate kubernetes backups to docker.
"""
# The workflow for the migration would be
# 1) Ensuring the specified backup exists
# 2) Map apps which are supported atm and will actually reflect in the UI
# 3) Setup filesystem appropriately for docker
# 4) Migrate the config of apps
# 5) Create relevant filesystem bits for apps and handle cases like ix-volumes
# 6) Redeploy apps
backup_config_job = self.middleware.call_sync('k8s_to_docker.list_backups', kubernetes_pool)
backup_config_job.wait_sync()
if backup_config_job.error:
raise CallError(f'Failed to list backups: {backup_config_job.error}')
backups = backup_config_job.result
if backups['error']:
raise CallError(f'Failed to list backups for {kubernetes_pool!r}: {backups["error"]}')
if options['backup_name'] is None:
# We will get latest backup now and execute it
if not backups['backups']:
raise CallError(f'No backups found for {kubernetes_pool!r}')
sorted_backups = get_sorted_backups(backups)
if not sorted_backups:
raise CallError(
f'Latest backup for {kubernetes_pool!r} does not have any releases which can be migrated'
)
options['backup_name'] = sorted_backups[-1]['name']
if options['backup_name'] not in backups['backups']:
raise CallError(f'Backup {options["backup_name"]} not found')
backup_config = backups['backups'][options['backup_name']]
job.set_progress(10, f'Located {options["backup_name"]} backup')
if not backup_config['releases']:
raise CallError(f'No old apps found in {options["backup_name"]!r} backup which can be migrated')
k8s_ds_encrypted = bool(self.middleware.call_sync(
'zfs.dataset.get_instance',
get_k8s_ds(kubernetes_pool),
{'extra': {'properties': ['encryption'], 'retrieve_children': False}}
)['encrypted'])
docker_config = self.middleware.call_sync('docker.config')
if docker_config['pool'] and docker_config['pool'] != kubernetes_pool:
# For good measure we stop docker service and unset docker pool if any configured
self.middleware.call_sync('service.stop', 'docker')
job.set_progress(15, 'Un-configuring docker service if configured')
docker_job = self.middleware.call_sync('docker.update', {'pool': None})
docker_job.wait_sync()
if docker_job.error:
raise CallError(f'Failed to un-configure docker: {docker_job.error}')
if docker_config['pool'] is None or docker_config['pool'] != kubernetes_pool:
# We will now configure docker service
docker_job = self.middleware.call_sync('docker.update', {'pool': kubernetes_pool})
docker_job.wait_sync()
if docker_job.error:
raise CallError(f'Failed to configure docker: {docker_job.error}')
self.middleware.call_sync('catalog.sync').wait_sync()
installed_apps = {app['id']: app for app in self.middleware.call_sync('app.query')}
job.set_progress(25, f'Rolling back to {backup_config["snapshot_name"]!r} snapshot')
self.middleware.call_sync(
'zfs.snapshot.rollback', backup_config['snapshot_name'], {
'force': True,
'recursive': True,
'recursive_clones': True,
'recursive_rollback': True,
}
)
job.set_progress(30, 'Starting migrating old apps to new apps')
# We will now iterate over each chart release which can be migrated and try to migrate it's config
# If we are able to migrate it's config, we will proceed with setting up relevant filesystem bits
# for the app and finally redeploy it
total_releases = len(backup_config['releases'])
app_percentage = ((70 - 30) / total_releases)
percentage = 30
release_details = []
migrate_context = {'gpu_choices': self.middleware.call_sync('app.gpu_choices_internal')}
dummy_job = type('dummy_job', (object,), {'set_progress': lambda *args: None})()
for chart_release in backup_config['releases']:
percentage += app_percentage
job.set_progress(percentage, f'Migrating {chart_release["release_name"]!r} app')
release_config = {
'name': chart_release['release_name'],
'error': 'Unable to complete migration',
'successfully_migrated': False,
}
release_details.append(release_config)
if release_config['name'] in installed_apps:
# Ideally we won't come to this case at all, but this case will only be true in the following case
# User configured docker pool
# Installed X app with same name
# Unset docker pool
# Tried restoring backup on the same pool
# We will run into this case because when we were listing out chart releases which can be migrated
# we were not able to deduce installed apps at all as pool was unset atm and docker wasn't running
release_config['error'] = 'App with same name is already installed'
continue
new_config = migrate_chart_release_config(chart_release | migrate_context)
if isinstance(new_config, str) or not new_config:
release_config['error'] = f'Failed to migrate config: {new_config}'
continue
complete_app_details = self.middleware.call_sync('catalog.get_app_details', chart_release['app_name'], {
'train': chart_release['train'],
})
try:
self.middleware.call_sync(
'app.create_internal', dummy_job, chart_release['release_name'],
chart_release['app_version'], new_config, complete_app_details, True, True,
)
except Exception as e:
release_config['error'] = f'Failed to create app: {e}'
continue
# At this point we have just not instructed docker to start the app and ix volumes normalization is left
release_user_config = chart_release['helm_secret']['config']
snapshot = backup_config['snapshot_name'].split('@')[-1]
available_snapshots = set()
for ix_volume in release_user_config.get('ixVolumes', []):
ds_name = ix_volume.get('hostPath', '')[5:] # remove /mnt/
ds_snap = f'{ds_name}@{snapshot}'
if not self.middleware.call_sync('zfs.snapshot.query', [['id', '=', ds_snap]]):
continue
available_snapshots.add(ds_snap)
if available_snapshots:
self.middleware.call_sync('app.schema.action.update_volumes', chart_release['release_name'], [])
try:
if k8s_ds_encrypted:
raise CallError('Encrypted ix-volumes are not supported for migration')
app_volume_ds = get_app_parent_volume_ds_name(
os.path.join(kubernetes_pool, 'ix-apps'), chart_release['release_name']
)
for snapshot in available_snapshots:
# We will do a zfs clone and promote here
destination_ds = os.path.join(app_volume_ds, snapshot.split('@')[0].split('/')[-1])
self.middleware.call_sync('zfs.snapshot.clone', {
'snapshot': snapshot,
'dataset_dst': destination_ds,
'dataset_properties': DatasetDefaults.update_only(os.path.basename(destination_ds)),
})
self.middleware.call_sync('zfs.dataset.promote', destination_ds)
self.middleware.call_sync('zfs.dataset.mount', destination_ds)
except CallError as e:
if k8s_ds_encrypted:
release_config['error'] = 'App is using encrypted ix-volumes which are not supported for migration'
else:
release_config['error'] = f'Failed to clone and promote ix-volumes: {e}'
# We do this to make sure it does not show up as installed in the UI
shutil.rmtree(get_installed_app_path(chart_release['release_name']), ignore_errors=True)
else:
release_config.update({
'error': None,
'successfully_migrated': True,
})
self.middleware.call_sync('app.metadata.generate').wait_sync(raise_error=True)
job.set_progress(75, 'Deploying migrated apps')
bulk_job = self.middleware.call_sync(
'core.bulk', 'app.redeploy', [
[r['name']] for r in filter(lambda r: r['error'] is None, release_details)
]
)
bulk_job.wait_sync()
if bulk_job.error:
raise CallError(f'Failed to redeploy apps: {bulk_job.error}')
# We won't check here if the apps are working or not, as the idea of this endpoint is to migrate
# apps from k8s to docker which is complete at this point. If the app is not running at this point,
# that does not mean the migration didn't work - it's an app problem and we need to fix/investigate
# it accordingly. User will see the app is not working in the UI and can raise a ticket accordingly
# or consult app lifecycle logs.
job.set_progress(100, 'Migration completed')
failures = False
# Let's log the results to a separate log file
logger.debug('Migration details for %r backup on %r pool', options['backup_name'], kubernetes_pool)
for release in release_details:
if release['successfully_migrated']:
logger.debug('%r app migrated successfully', release['name'])
else:
failures = True
logger.debug('%r app failed to migrate successfully: %r', release['name'], release['error'])
if failures:
self.middleware.call_sync('alert.oneshot_create', 'FailuresInAppMigration', None)
else:
self.middleware.call_sync('alert.oneshot_delete', 'FailuresInAppMigration')
return release_details
| 11,731 | Python | .py | 211 | 42.914692 | 119 | 0.606026 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,898 | list_k8s_backups.py | truenas_middleware/src/middlewared/middlewared/plugins/kubernetes_to_docker/list_k8s_backups.py | import os
from middlewared.schema import accepts, Dict, returns, Str
from middlewared.service import job, Service
from .list_utils import get_backup_dir, get_default_release_details, K8s_BACKUP_NAME_PREFIX, release_details
from .utils import get_k8s_ds
class K8stoDockerMigrationService(Service):
class Config:
namespace = 'k8s_to_docker'
cli_namespace = 'k8s_to_docker'
@accepts(Str('kubernetes_pool'), roles=['DOCKER_READ'])
@returns(Dict(
'backups',
Str('error', null=True),
Dict('backups', additional_attrs=True),
))
@job(lock=lambda args: f'k8s_to_docker_list_backups_{args[0]}')
def list_backups(self, job, kubernetes_pool):
"""
List existing kubernetes backups
"""
backup_config = {
'error': None,
'backups': {},
}
k8s_ds = get_k8s_ds(kubernetes_pool)
if not self.middleware.call_sync('pool.dataset.query', [['id', '=', k8s_ds]]):
return backup_config | {'error': f'Unable to locate {k8s_ds!r} dataset'}
backup_base_dir = get_backup_dir(k8s_ds)
if not os.path.exists(backup_base_dir):
return backup_config | {'error': f'Unable to locate {backup_base_dir!r} backups directory'}
self.middleware.call_sync('catalog.sync').wait_sync()
backups = backup_config['backups']
snapshots = self.middleware.call_sync(
'zfs.snapshot.query', [['name', '^', f'{k8s_ds}@{K8s_BACKUP_NAME_PREFIX}']], {'select': ['name']}
)
releases_datasets = set(
ds['id'].split('/', 3)[-1].split('/', 1)[0]
for ds in self.middleware.call_sync('zfs.dataset.get_instance', f'{k8s_ds}/releases')['children']
)
apps_mapping = self.middleware.call_sync('catalog.train_to_apps_version_mapping')
catalog_path = self.middleware.call_sync('catalog.config')['location']
docker_config = self.middleware.call_sync('docker.config')
if docker_config['pool'] and docker_config['pool'] != kubernetes_pool:
return backup_config | {
'error': f'Docker pool if configured must be set only to {kubernetes_pool!r} or unset'
}
installed_apps = {}
if docker_config['pool'] == kubernetes_pool:
installed_apps = {app['id']: app for app in self.middleware.call_sync('app.query')}
for snapshot in snapshots:
backup_name = snapshot['name'].split('@', 1)[-1].split(K8s_BACKUP_NAME_PREFIX, 1)[-1]
backup_path = os.path.join(backup_base_dir, backup_name)
if not os.path.exists(backup_path):
continue
backup_data = {
'name': backup_name,
'releases': [],
'skipped_releases': [],
'snapshot_name': snapshot['name'],
'created_on': self.middleware.call_sync(
'zfs.snapshot.get_instance', snapshot['name']
)['properties']['creation']['parsed'],
'backup_path': backup_path,
}
with os.scandir(backup_path) as entries:
for release in entries:
if release.name not in releases_datasets:
backup_data['skipped_releases'].append(get_default_release_details(release.name) | {
'error': 'Release dataset not found in releases dataset',
})
continue
config = release_details(
release.name, release.path, catalog_path, apps_mapping, installed_apps,
)
if config['error']:
backup_data['skipped_releases'].append(config)
else:
backup_data['releases'].append(config)
backups[backup_name] = backup_data
job.set_progress(100, 'Retrieved backup config')
return backup_config
| 4,018 | Python | .py | 81 | 37.135802 | 109 | 0.571684 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,899 | utils.py | truenas_middleware/src/middlewared/middlewared/plugins/rsync_/utils.py | def get_host_key_file_contents_from_ssh_credentials(credentials: dict) -> str:
return '\n'.join([
(
f'{credentials["host"]} {host_key}' if credentials['port'] == 22
else f'[{credentials["host"]}]:{credentials["port"]} {host_key}'
)
for host_key in credentials['remote_host_key'].split('\n')
if host_key.strip() and not host_key.strip().startswith('#')
])
| 419 | Python | .py | 9 | 38.444444 | 78 | 0.585366 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |