id int64 0 458k | file_name stringlengths 4 119 | file_path stringlengths 14 227 | content stringlengths 24 9.96M | size int64 24 9.96M | language stringclasses 1 value | extension stringclasses 14 values | total_lines int64 1 219k | avg_line_length float64 2.52 4.63M | max_line_length int64 5 9.91M | alphanum_fraction float64 0 1 | repo_name stringlengths 7 101 | repo_stars int64 100 139k | repo_forks int64 0 26.4k | repo_open_issues int64 0 2.27k | repo_license stringclasses 12 values | repo_extraction_date stringclasses 433 values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
24,200 | sharing_tasks.py | truenas_middleware/src/middlewared/middlewared/alert/source/sharing_tasks.py | from middlewared.alert.base import AlertClass, AlertCategory, AlertLevel, Alert, OneShotAlertClass
class ShareLockedAlertClass(AlertClass, OneShotAlertClass):
deleted_automatically = False
level = AlertLevel.WARNING
category = AlertCategory.SHARING
title = 'Share Is Unavailable Because It Uses A Locked Dataset'
text = '%(type)s share "%(identifier)s" is unavailable because it uses a locked dataset.'
async def create(self, args):
return Alert(ShareLockedAlertClass, args, key=f'{args["type"]}_{args["id"]}')
async def delete(self, alerts, query):
return list(filter(lambda alert: alert.key != query, alerts))
class TaskLockedAlertClass(AlertClass, OneShotAlertClass):
deleted_automatically = False
level = AlertLevel.WARNING
category = AlertCategory.TASKS
title = 'Task Is Unavailable Because It Uses A Locked Dataset'
text = '%(type)s task "%(identifier)s" will not be executed because it uses a locked dataset.'
async def create(self, args):
return Alert(TaskLockedAlertClass, args, key=f'{args["type"]}_{args["id"]}')
async def delete(self, alerts, query):
return list(filter(lambda alert: alert.key != query, alerts))
| 1,221 | Python | .py | 21 | 52.571429 | 98 | 0.730705 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,201 | origin.py | truenas_middleware/src/middlewared/middlewared/utils/origin.py | from dataclasses import dataclass
from socket import AF_INET, AF_INET6, AF_UNIX, SO_PEERCRED, SOL_SOCKET
from struct import calcsize, unpack
from pyroute2 import DiagSocket
__all__ = ('ConnectionOrigin',)
HA_HEARTBEAT_IPS = ('169.254.10.1', '169.254.10.2')
UIDS_TO_CHECK = (33, 0)
@dataclass(slots=True, frozen=True, kw_only=True)
class ConnectionOrigin:
family: AF_INET | AF_INET6 | AF_UNIX
"""The address family associated to the API connection"""
loc_addr: str | None = None
"""If `family` is not of type AF_UNIX, this represents
the local IP address associated to the TCP/IP connection"""
loc_port: int | None = None
"""If `family` is not of type AF_UNIX, this represents
the local port associated to the TCP/IP connection"""
rem_addr: str | None = None
"""If `family` is not of type AF_UNIX, this represents
the remote IP address associated to the TCP/IP connection"""
rem_port: int | None = None
"""If `family` is not of type AF_UNIX, this represents
the remote port associated to the TCP/IP connection"""
pid: int | None = None
"""If `family` is of type AF_UNIX, this represents
the process id associated to the unix datagram connection"""
uid: int | None = None
"""If `family` is of type AF_UNIX, this represents
the user id associated to the unix datagram connection"""
gid: int | None = None
"""If `family` is of type AF_UNIX, this represents
the group id associated to the unix datagram connection"""
@classmethod
def create(cls, request):
try:
sock = request.transport.get_extra_info("socket")
if sock.family == AF_UNIX:
pid, uid, gid = unpack("3i", sock.getsockopt(SOL_SOCKET, SO_PEERCRED, calcsize("3i")))
return cls(
family=sock.family,
pid=pid,
uid=uid,
gid=gid
)
elif sock.family in (AF_INET, AF_INET6):
la, lp, ra, rp = get_tcp_ip_info(sock, request)
return cls(
family=sock.family,
loc_addr=la,
loc_port=lp,
rem_addr=ra,
rem_port=rp,
)
except AttributeError:
# request.transport can be None by the time this is
# called on HA systems because remote node could
# have been rebooted
return
def __str__(self) -> str:
if self.is_unix_family:
return f"UNIX socket (pid={self.pid} uid={self.uid} gid={self.gid})"
elif self.family == AF_INET:
return f"{self.rem_addr}:{self.rem_port}"
elif self.family == AF_INET6:
return f"[{self.rem_addr}]:{self.rem_port}"
def match(self, origin) -> bool:
if self.is_unix_family:
return self.uid == origin.uid and self.gid == origin.gid
else:
return self.rem_addr == origin.rem_addr
@property
def repr(self) -> str:
return f"pid:{self.pid}" if self.is_unix_family else self.rem_addr
@property
def is_tcp_ip_family(self) -> bool:
return self.family in (AF_INET, AF_INET6)
@property
def is_unix_family(self) -> bool:
return self.family == AF_UNIX
@property
def is_ha_connection(self) -> bool:
return (
self.family in (AF_INET, AF_INET6) and
self.rem_port and self.rem_port <= 1024 and
self.rem_addr and self.rem_addr in HA_HEARTBEAT_IPS
)
def get_tcp_ip_info(sock, request) -> tuple:
# All API connections are terminated by nginx reverse
# proxy so the remote address is always 127.0.0.1. The
# only exceptions to this are:
# 1. Someone connects directly to 127.0.0.1 via a local
# shell session
# 2. Someone connects directly to heartbeat IP port 6000
# via a local shell session on a TrueNAS HA system
# 3. We connect directly to the other controller on an HA
# machine via heartbeat IP for intra-node communication.
# (this is done by us)
try:
# These headers are set by nginx or a user trying to do
# (potentially) nefarious things. If these are set then
# we need to check if the UID of the socket is owned by
# 0 (root) or 33 (www-data (nginx forks workers))
ra = request.headers["X-Real-Remote-Addr"]
rp = int(request.headers["X-Real-Remote-Port"])
check_uids = True
except (KeyError, ValueError):
ra, rp = sock.getpeername()
check_uids = False
with DiagSocket() as ds:
ds.bind()
for i in ds.get_sock_stats(family=sock.family):
if i['idiag_dst'] == ra and i['idiag_dport'] == rp:
if check_uids:
if i['idiag_uid'] in UIDS_TO_CHECK:
return i['idiag_src'], i['idiag_sport'], i['idiag_dst'], i['idiag_dport']
else:
return i['idiag_src'], i['idiag_sport'], i['idiag_dst'], i['idiag_dport']
return (None, None, None, None)
| 5,159 | Python | .py | 118 | 34.59322 | 102 | 0.598289 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,202 | db.py | truenas_middleware/src/middlewared/middlewared/utils/db.py | import sqlite3
FREENAS_DATABASE = '/data/freenas-v1.db'
FREENAS_DATABASE_MODE = 0o600
def dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
def query_config_table(table, database_path=None, prefix=None):
return query_table(table, database_path, prefix)[0]
def query_table(table, database_path=None, prefix=None):
database_path = database_path or FREENAS_DATABASE
conn = sqlite3.connect(database_path)
result = []
try:
conn.row_factory = dict_factory
c = conn.cursor()
try:
for row in c.execute(f"SELECT * FROM {table}").fetchall():
row = dict(row)
if prefix:
row = {k.removeprefix(prefix): v for k, v in row.items()}
result.append(row)
finally:
c.close()
finally:
conn.close()
return result
def update_table(query, params, database_path=None):
database_path = database_path or FREENAS_DATABASE
conn = sqlite3.connect(database_path)
try:
c = conn.cursor()
try:
c.execute(query, params)
finally:
c.close()
conn.commit()
finally:
conn.close()
| 1,270 | Python | .py | 40 | 24.025 | 77 | 0.604423 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,203 | profile.py | truenas_middleware/src/middlewared/middlewared/utils/profile.py | import asyncio
import cProfile
import io
import pstats
from pstats import SortKey
def profile_wrap(func):
if asyncio.iscoroutinefunction(func):
async def wrapper(*args, **kwargs):
pr = cProfile.Profile()
pr.enable()
rv = await func(*args, **kwargs)
pr.disable()
s = io.StringIO()
pstats.Stats(pr, stream=s).sort_stats(SortKey.CUMULATIVE).print_stats()
return s.getvalue() + '\n' + str(rv)
else:
def wrapper(*args, **kwargs):
pr = cProfile.Profile()
pr.enable()
rv = func(*args, **kwargs)
pr.disable()
s = io.StringIO()
pstats.Stats(pr, stream=s).sort_stats(SortKey.CUMULATIVE).print_stats()
return s.getvalue() + '\n' + str(rv)
return wrapper
| 841 | Python | .py | 25 | 24.72 | 83 | 0.568796 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,204 | license.py | truenas_middleware/src/middlewared/middlewared/utils/license.py | LICENSE_ADDHW_MAPPING = {
1: "E16",
2: "E24",
3: "E60",
4: "ES60",
5: "ES12",
6: "ES24",
7: "ES24F",
8: "ES60S",
9: "ES102",
10: "ES102G2",
11: "ES60G2",
12: "ES24N",
13: "ES60G3",
}
| 235 | Python | .py | 15 | 11.2 | 25 | 0.436364 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,205 | security_descriptor.py | truenas_middleware/src/middlewared/middlewared/utils/security_descriptor.py | # Utilites related to SMB security descriptors
#
# These are primarily used for manipulating the SMB share ACL, which
# is stored in samba's share_info.tdb file as a packed security descriptor
# In principle, this code can also be used to decode SDDL strings and to
# parse security descriptors from remote servers.
#
# Tests are provided in pytest/unit/utils/test_security_descriptor.py
import enum
from samba.ndr import ndr_pack, ndr_unpack
from samba.dcerpc import security
class SDDLAceType(enum.Enum):
""" defined in MS-DTYP """
ALLOWED = 'A'
DENIED = 'D'
class SDDLAccessMaskStandard(enum.IntEnum):
""" defined in source3/lib/util_sd.c """
FULL = security.SEC_RIGHTS_DIR_ALL
CHANGE = 0x1301ff # DIR_READ | STD_DELETE | DELETE_CHILD | WRITE | TRAVERSE
READ = security.SEC_RIGHTS_DIR_READ | security.SEC_RIGHTS_DIR_EXECUTE
def security_descriptor_from_bytes(sd_buf: bytes) -> security.descriptor:
"""
method to convert bytes to security descriptor. This is particularly
relevant for security descriptor we store in share config containing
a backup copy of the share ACL.
"""
return ndr_unpack(security.descriptor, sd_buf)
def security_descriptor_to_bytes(sd: security.descriptor) -> bytes:
"""
method to convert security descriptor to bytes for insertion into
share_info.tdb and SMB share configuration
"""
return ndr_pack(sd)
def share_acl_to_sd_bytes(share_acl: list[dict]) -> bytes:
""" Convert share_acl list to SDDL string and then to security descriptor bytes """
sddl_str = 'D:'
for ace in share_acl:
sddl_ace_type = SDDLAceType[ace['ae_type']].value
sddl_access = hex(SDDLAccessMaskStandard[ace['ae_perm']].value)
sddl_sid = ace['ae_who_sid']
sddl_ace = f'({sddl_ace_type};;{sddl_access};;;{sddl_sid})'
sddl_str += sddl_ace
sd_obj = security.descriptor().from_sddl(sddl_str, security.dom_sid())
if sd_obj.dacl is None:
raise ValueError(f'{sddl_str}, malformed sddl string')
return security_descriptor_to_bytes(sd_obj)
def sd_bytes_to_share_acl(sd_bytes: bytes) -> list[dict]:
""" Convert security descriptor bytes to share middleware SMB share ACL """
sd = security_descriptor_from_bytes(sd_bytes)
share_acl = []
for ace in sd.dacl.aces:
dom_sid = str(ace.trustee)
perm = SDDLAccessMaskStandard(ace.access_mask).name
match ace.type:
case security.SEC_ACE_TYPE_ACCESS_ALLOWED:
ace_type = 'ALLOWED'
case security.SEC_ACE_TYPE_ACCESS_DENIED:
ace_type = 'DENIED'
case _:
raise ValueError(f'{ace.type}: unexpected ACE type')
share_acl.append({
'ae_who_sid': dom_sid,
'ae_perm': perm,
'ae_type': ace_type
})
return share_acl
| 2,875 | Python | .py | 66 | 37.363636 | 87 | 0.680287 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,206 | time_utils.py | truenas_middleware/src/middlewared/middlewared/utils/time_utils.py | from datetime import datetime, UTC
def utc_now(naive=True):
"""Wrapper for `datetime.now(UTC)`. Exclude timezone if `naive=True`."""
dt = datetime.now(UTC)
return dt.replace(tzinfo=None) if naive else dt
| 218 | Python | .py | 5 | 39.8 | 76 | 0.71564 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,207 | pci.py | truenas_middleware/src/middlewared/middlewared/utils/pci.py | import contextlib
import os
# get capability classes for relevant pci devices from
# https://github.com/pciutils/pciutils/blob/3d2d69cbc55016c4850ab7333de8e3884ec9d498/lib/header.h#L1429
SENSITIVE_PCI_DEVICE_TYPES = {
'0x0604': 'PCI Bridge',
'0x0601': 'ISA Bridge',
'0x0500': 'RAM memory',
'0x0c05': 'SMBus',
}
def get_pci_device_class(pci_path: str) -> str:
with contextlib.suppress(FileNotFoundError):
with open(os.path.join(pci_path, 'class'), 'r') as r:
return r.read().strip()
return ''
| 541 | Python | .py | 15 | 31.8 | 103 | 0.700576 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,208 | io.py | truenas_middleware/src/middlewared/middlewared/utils/io.py | # NOTE: tests are provided in src/middlewared/middlewared/pytest/unit/utils/test_write_if_changed.py
# Any updates to this file should have corresponding updates to tests
import fcntl
import os
import enum
import stat
from tempfile import NamedTemporaryFile
ID_MAX = 2 ** 32 - 2
class FileChanges(enum.IntFlag):
CONTENTS = enum.auto()
UID = enum.auto()
GID = enum.auto()
PERMS = enum.auto()
def dump(mask):
if unmapped := mask & ~int(FileChanges.CONTENTS | FileChanges.UID | FileChanges.GID | FileChanges.PERMS):
raise ValueError(f'{unmapped}: unsupported flags in mask')
return [
change.name for change in FileChanges if mask & change
]
class UnexpectedFileChange(Exception):
def __init__(self, path, changes):
self.changes = changes
self.path = path
self.changes_str = ', '.join(FileChanges.dump(self.changes))
def __str__(self):
return f'{self.path}: unexpected change in the following file attributes: {self.changes_str}'
def write_if_changed(path, data, uid=0, gid=0, perms=0o755, dirfd=None, raise_error=False):
"""
Commit changes to a configuration file.
`path` - path to configuration file. May be relative to a specified `dirfd`
`data` - expected file contents. May be bytes or string
`uid` - expected numeric UID for file
`gid` - expected numeric GID for file
`perms` - numeric permissions that file should have
`dirfd` - optional open file descriptor (may be O_PATH or O_DIRECTORY) if `path` is
relative.
`raise_error` - raise an UnexpectedFileChange exception if file ownership or
permissions have unexpectedly changed.
"""
if isinstance(data, str):
data = data.encode()
if not isinstance(perms, int):
raise ValueError('perms must be an integer')
if perms & ~(stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO):
raise ValueError(f'{perms}: invalid mode. Supported bits are RWX for UGO.')
for xid in ((uid, 'uid'), (gid, 'gid')):
value, name = xid
if not isinstance(value, int):
raise ValueError(f'{name} must be an integer')
if value < 0 or value > ID_MAX:
raise ValueError(f'{name} must be between 0 and {ID_MAX}')
if dirfd is not None:
if not isinstance(dirfd, int):
raise ValueError('dirfd must be a valid file descriptor')
if not os.path.exists(f'/proc/self/fd/{dirfd}'):
raise ValueError(f'{dirfd}: file descriptor not found')
if os.path.isabs(path):
raise ValueError(f'{path}: absolute paths may not be used with a `dirfd`')
if fcntl.fcntl(dirfd, fcntl.F_GETFL) & (os.O_DIRECTORY | os.O_PATH) == 0:
raise ValueError('dirfd must be opened via O_DIRECTORY or O_PATH')
# tempfile API does not permit using a file descriptor
# so we'll get the underlying directory name from procfs
parent_dir = os.readlink(f'/proc/self/fd/{dirfd}')
else:
if not os.path.isabs(path):
raise ValueError(f'{path}: relative paths may not be used without a `dirfd`')
parent_dir = os.path.dirname(path)
changes = 0
try:
with open(os.open(path, os.O_RDONLY, dir_fd=dirfd), 'rb+') as f:
current = f.read()
if current != data:
changes |= FileChanges.CONTENTS
# The following cannot be skipped if we're changing file contents
# because we want accurate list of what has changed in file.
st = os.fstat(f.fileno())
if stat.S_IMODE(st.st_mode) != perms:
changes |= FileChanges.PERMS
if st.st_uid != uid:
changes |= FileChanges.UID
if st.st_gid != gid:
changes |= FileChanges.GID
if changes & (FileChanges.UID | FileChanges.GID):
os.fchown(f.fileno(), uid, gid)
if changes & FileChanges.PERMS:
os.fchmod(f.fileno(), perms)
except FileNotFoundError:
# Do not specify we're changing permissions on file
# because we're creating it new
changes = FileChanges.CONTENTS
if changes & FileChanges.CONTENTS:
with NamedTemporaryFile(mode='wb+', dir=parent_dir, delete=False) as tmp:
tmp.file.write(data)
tmp.file.flush()
os.fsync(tmp.file.fileno())
os.fchmod(tmp.file.fileno(), perms)
os.fchown(tmp.file.fileno(), uid, gid)
source_path = tmp.name
if dirfd is not None:
os.rename(source_path, path, src_dir_fd=dirfd, dst_dir_fd=dirfd)
else:
os.rename(source_path, path)
elif changes != 0 and raise_error:
raise UnexpectedFileChange(path, changes)
return changes
| 4,854 | Python | .py | 105 | 37.352381 | 113 | 0.633659 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,209 | limits.py | truenas_middleware/src/middlewared/middlewared/utils/limits.py | # This file provides constants and methods related to general middleware limits.
# Currently tests are provided in ./src/middlewared/middlewared/pytest/unit/utils/test_limits.py
import enum
from aiohttp.http_websocket import WSCloseCode
from truenas_api_client import json as ejson
# WARNING: below methods must _not_ be audited. c.f. comment in parse_message() below
MSG_SIZE_EXTENDED_METHODS = set((
'filesystem.file_receive',
))
class MsgSizeLimit(enum.IntEnum):
UNAUTHENTICATED = 8192 # maximum size of message processed from unauthenticated session
AUTHENTICATED = 65536 # maximum size of message processed from authentication session
EXTENDED = 2097152 # maximum size of message that sends a file
class MsgSizeError(Exception):
def __init__(self, limit, datalen, method_name=None):
self.limit = limit
self.datalen = datalen
self.errmsg = f'Message length [{self.datalen}] exceeded maximum size of {self.limit}'
self.method_name = method_name or ''
if limit is MsgSizeLimit.UNAUTHENTICATED:
# This preserves legacy server behavior
self.ws_close_code = WSCloseCode.INVALID_TEXT
self.ws_errmsg = 'Anonymous connection max message length is 8 kB'
else:
self.ws_close_code = WSCloseCode.MESSAGE_TOO_BIG
self.ws_errmsg = 'Max message length is 64 kB'
def __str__(self):
return self.errmsg
def parse_message(authenticated: bool, msg_data: str) -> dict:
"""
Check given message to determine whether it exceeds size limits
WARNING: RFC5424 (syslog) specifies that SDATA of message should never
exceed 64 KiB. The default syslog-ng configuration will not parse messages
larger than this, hence, going above this value can potentially break
auditing (either locally or sending to remote syslog server).
The exception to this is for particular whitelisted methods (for example
filesystem.file_receive) that must process very large amounts of data and
are not audited
parameters:
authenticated - whether session is authenticated
msg_data - data sent by client
returns:
JSON loads output of msg_data (dictionary)
raises:
JSONDecodeError (subclass of ValueError)
MsgSizeError
"""
datalen = len(msg_data)
if not authenticated and datalen > MsgSizeLimit.UNAUTHENTICATED.value:
raise MsgSizeError(MsgSizeLimit.UNAUTHENTICATED, datalen)
if datalen > MsgSizeLimit.EXTENDED.value:
raise MsgSizeError(MsgSizeLimit.EXTENDED, datalen)
message = ejson.loads(msg_data)
if (method := message.get('method')) in MSG_SIZE_EXTENDED_METHODS:
return message
if datalen > MsgSizeLimit.AUTHENTICATED:
raise MsgSizeError(MsgSizeLimit.AUTHENTICATED, datalen, method)
return message
| 2,868 | Python | .py | 58 | 43.034483 | 96 | 0.730631 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,210 | prctl.py | truenas_middleware/src/middlewared/middlewared/utils/prctl.py | import contextlib
import ctypes
import enum
import signal
__all__ = ['set_name', 'set_pdeath_sig']
class Prctl(enum.IntEnum):
# from linux/prctl.h
SET_PDEATHSIG = 1
SET_NAME = 15
@contextlib.contextmanager
def load_libc():
libc = None
try:
libc = ctypes.CDLL('libc.so.6')
yield libc
finally:
# probably not needed but rather be safe than sorry
del libc
def set_name(name):
if isinstance(name, str):
name = name.encode()
with load_libc() as libc:
return libc.prctl(Prctl.SET_NAME.value, ctypes.c_char_p(name), 0, 0, 0)
def set_pdeath_sig(sig=signal.SIGKILL):
with load_libc() as libc:
libc.prctl(Prctl.SET_PDEATHSIG.value, signal.Signals(sig).value, 0, 0, 0)
def die_with_parent():
set_pdeath_sig()
| 806 | Python | .py | 28 | 23.928571 | 81 | 0.664491 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,211 | lock.py | truenas_middleware/src/middlewared/middlewared/utils/lock.py | import asyncio
class SoftHardSemaphoreLimit(Exception):
pass
class SoftHardSemaphore(object):
def __init__(self, softlimit, hardlimit):
self.softlimit = softlimit
self.hardlimit = hardlimit
self.softsemaphore = asyncio.Semaphore(value=softlimit)
self.counter = 0
async def __aenter__(self):
if self.counter >= self.hardlimit:
raise SoftHardSemaphoreLimit(self.hardlimit)
self.counter += 1
await self.softsemaphore.acquire()
async def __aexit__(self, exc_type, exc, tb):
self.counter -= 1
self.softsemaphore.release()
| 625 | Python | .py | 17 | 29.411765 | 63 | 0.671667 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,212 | path.py | truenas_middleware/src/middlewared/middlewared/utils/path.py | # -*- coding=utf-8 -*-
import errno
import enum
import fcntl
import logging
import os
import stat
from contextlib import contextmanager
from pathlib import Path
logger = logging.getLogger(__name__)
__all__ = ["pathref_open", "pathref_reopen", "is_child", "is_child_realpath", "path_location", "strip_location_prefix"]
EXTERNAL_PATH_PREFIX = 'EXTERNAL:'
CLUSTER_PATH_PREFIX = 'CLUSTER:'
class FSLocation(enum.Enum):
CLUSTER = enum.auto()
EXTERNAL = enum.auto()
LOCAL = enum.auto()
def path_location(path):
if path.startswith(CLUSTER_PATH_PREFIX):
return FSLocation.CLUSTER
if path.startswith(EXTERNAL_PATH_PREFIX):
return FSLocation.EXTERNAL
return FSLocation.LOCAL
def strip_location_prefix(path):
return path.lstrip(f'{path_location(path).name}:')
def pathref_reopen(fd_in: int, flags: int, **kwargs) -> int:
# use procfs to reopen a file with new flags
if fcntl.fcntl(fd_in, fcntl.F_GETFL) & os.O_PATH == 0:
raise ValueError('Not an O_PATH open')
close = kwargs.get('close_fd', False)
mode = kwargs.get('mode', 0o777)
pathref = f'/proc/self/fd/{fd_in}'
fd_out = os.open(pathref, flags, mode)
if close:
os.close(fd_in)
return fd_out
@contextmanager
def pathref_open(path: str, **kwargs) -> int:
"""
Get O_PATH open for specified `path`. Supports following kwargs:
`dir_fd` - fileno for open of to use as dir_fd for open of path.
`expected_mode` - expected permissions on open of `path`. If
the `force` kwarg is also set then the file's permissions
will be changed to the specified value. Otherwise ValueError
will be raised on permissions mismatch.
`force` - If path is a symbolic link, then the symlink will be
removed and replaced with a directory. In case of mismatch
between expected_mode and stat() output, chmod(2) will be called.
`mkdir` - if `path` does not exist, then it will be created.
"""
dir_fd = kwargs.get('dir_fd')
flags = os.O_PATH | os.O_NOFOLLOW | kwargs.get('additional_flags', 0)
expected_mode = kwargs.get('mode')
force = kwargs.get('force')
mkdir = kwargs.get('mkdir', False)
fd = -1
try:
fd = os.open(path, flags, dir_fd=dir_fd)
except FileNotFoundError:
if not mkdir:
raise
os.mkdir(path, expected_mode or 0o755, dir_fd=dir_fd)
fd = os.open(path, flags, dir_fd=dir_fd)
except OSError as e:
# open will fail with ELOOP if last component is symlink
# due to O_NOFOLLOW
if e.errno != errno.ELOOP or not force:
raise
os.unlink(path, dir_fd=dir_fd)
os.mkdir(path, expected_mode or 0o755, dir_fd=dir_fd)
fd = os.open(path, flags, dir_fd=dir_fd)
st = os.fstat(fd)
if not stat.S_ISDIR(st.st_mode):
os.close(fd)
raise NotADirectoryError(path)
if expected_mode and stat.S_IMODE(st.st_mode) != expected_mode:
if not force:
raise ValueError(
f'{stat.S_IMODE(st.st_mode)} does not match expected mode: '
f'{expected_mode}, and "force" was not specified.'
)
try:
tmp_fd = pathref_reopen(fd, os.O_DIRECTORY, dir_fd=dir_fd)
try:
os.fchmod(tmp_fd, expected_mode)
finally:
os.close(tmp_fd)
except Exception:
os.close(fd)
raise
try:
yield fd
finally:
os.close(fd)
def is_child_realpath(child: str, parent: str):
"""
This method blocks, but uses realpath to determine
whether the specified path is a child of another.
Python realpath checks each path component for whether
it's a symlink, but may not do so in a race-free way.
For internal purposes though, this is sufficient for
how we use it (primarily to determine whether a share
path is locked, etc).
"""
c = Path(child)
p = Path(parent)
if c == p:
return True
return c.resolve().is_relative_to(p.resolve())
def is_child(child: str, parent: str):
"""
This method is asyncio safe, but should not be used
to check whether one local path is a child of another.
An example where it may be useful is determining whether
a dataset name is a child of another.
"""
if os.path.isabs(child) or os.path.isabs(parent):
raise ValueError(f'Symlink-unsafe method called with absolute path(s): {child}, {parent}')
rel = os.path.relpath(child, parent)
return rel == "." or not rel.startswith("..")
def should_exclude(dataset: str, exclude: [str]):
return any(is_child(dataset, excl) for excl in exclude)
| 4,703 | Python | .py | 122 | 32.188525 | 119 | 0.654109 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,213 | size.py | truenas_middleware/src/middlewared/middlewared/utils/size.py | import humanfriendly
MB = 1048576
def format_size(size):
return humanfriendly.format_size(size, binary=True)
| 116 | Python | .py | 4 | 26.25 | 55 | 0.807339 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,214 | cron.py | truenas_middleware/src/middlewared/middlewared/utils/cron.py | # -*- coding=utf-8 -*-
import re
from croniter import croniter
CRON_FIELDS = ["minute", "hour", "dom", "month", "dow"]
def croniter_for_schedule(schedule, *args, **kwargs):
cron_expression = ''
for field in CRON_FIELDS:
value = schedule.get(field) or '*'
if '/' in value and not re.match(r'^(\*|[0-9]+-[0-9]+)/([0-9]+)$', value):
raise ValueError("Only range or `*` are allowed before `/`")
cron_expression += f'{value} '
return croniter(cron_expression, *args, **kwargs)
| 526 | Python | .py | 12 | 38.333333 | 82 | 0.602362 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,215 | disk_stats.py | truenas_middleware/src/middlewared/middlewared/utils/disk_stats.py | import contextlib
import logging
import os
from .disks import get_disk_names, get_disks_with_identifiers
logger = logging.getLogger(__name__)
def get_disk_stats(disk_identifier_mapping: dict | None = None) -> dict[str, dict]:
disk_identifier_mapping = disk_identifier_mapping or get_disks_with_identifiers()
available_disks = get_disk_names()
stats = {}
with contextlib.suppress(IOError):
with open('/proc/diskstats', 'r') as disk_stats_fd:
for entry in disk_stats_fd:
parts = entry.strip().split()
if len(parts) < 14:
continue # skip lines that don't have all the fields
disk_name = parts[2]
if disk_name not in available_disks:
continue
sector_size = 512 # default sector size if we are not able to find it keeping in line with netdata
with contextlib.suppress(FileNotFoundError, ValueError):
with open(os.path.join('/sys/block', disk_name, 'queue/hw_sector_size'), 'r') as f:
sector_size = int(f.read().strip())
try:
read_ops = int(parts[3])
read_sectors = int(parts[5])
write_ops = int(parts[7])
write_sectors = int(parts[9])
busy_time = int(parts[12])
except (IndexError, ValueError) as e:
logger.error('Failed to parse disk stats for %r: %r', disk_name, e)
continue
stats[disk_identifier_mapping.get(disk_name, disk_name)] = {
'reads': (read_sectors * sector_size) / 1024, # convert to kb
'writes': (write_sectors * sector_size) / 1024, # convert to kb
'read_ops': read_ops,
'write_ops': write_ops,
'busy': busy_time,
}
return stats
| 1,977 | Python | .py | 39 | 36 | 115 | 0.540975 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,216 | plugins.py | truenas_middleware/src/middlewared/middlewared/utils/plugins.py | import functools
import importlib
import inspect
import itertools
import logging
import os
import sys
from middlewared.schema import Schemas
logger = logging.getLogger(__name__)
def load_modules(directory, base=None, depth=0):
directory = os.path.normpath(directory)
if base is None:
middlewared_root = os.path.dirname(os.path.dirname(__file__))
if os.path.commonprefix((f'{directory}/', f'{middlewared_root}/')) == f'{middlewared_root}/':
base = '.'.join(
['middlewared'] +
os.path.relpath(directory, middlewared_root).split('/')
)
else:
for new_module_path in sys.path:
if os.path.commonprefix((f'{directory}/', f'{new_module_path}/')) == f'{new_module_path}/':
break
else:
new_module_path = os.path.dirname(directory)
logger.debug("Registering new module path %r", new_module_path)
sys.path.insert(0, new_module_path)
base = '.'.join(os.path.relpath(directory, new_module_path).split('/'))
_, dirs, files = next(os.walk(directory))
for f in filter(lambda x: x[-3:] == '.py' and x.find('_freebsd') == -1, files):
yield importlib.import_module(base if f == '__init__.py' else f'{base}.{f[:-3]}')
for f in filter(lambda x: x.find('_freebsd') == -1, dirs):
if depth > 0:
path = os.path.join(directory, f)
yield from load_modules(path, f'{base}.{f}', depth - 1)
def load_classes(module, base, blacklist):
classes = []
for attr in dir(module):
attr = getattr(module, attr)
if inspect.isclass(attr):
if issubclass(attr, base):
if attr is not base and attr not in blacklist:
classes.append(attr)
return classes
class SchemasMixin:
def __init__(self):
self._schemas = Schemas()
def _resolve_methods(self, services, events):
from middlewared.schema import resolve_methods # Lazy import so namespace match
to_resolve = []
for service in services:
for attr in dir(service):
method = getattr(service, attr)
if not callable(method):
continue
to_resolve.append({
'name': attr,
'type': 'method',
'keys': ['accepts', 'returns'],
'has_key': functools.partial(hasattr, method),
'get_attr': functools.partial(getattr, method),
})
for name, attrs in events:
to_resolve.append({
'name': name,
'type': 'event',
'keys': ['accepts', 'returns'],
'has_key': lambda k: k in attrs,
'get_attr': functools.partial(dict.get, attrs),
})
resolve_methods(self._schemas, to_resolve)
class LoadPluginsMixin(SchemasMixin):
def __init__(self):
self._services = {}
self._services_aliases = {}
super().__init__()
def _load_plugins(self, on_module_begin=None, on_module_end=None, on_modules_loaded=None):
from middlewared.service import Service, CompoundService, ABSTRACT_SERVICES
services = []
plugins_dir = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'plugins'))
if not os.path.exists(plugins_dir):
raise ValueError(f'plugins dir not found: {plugins_dir}')
for mod in load_modules(plugins_dir, depth=1):
if on_module_begin:
on_module_begin(mod)
services.extend(load_classes(mod, Service, ABSTRACT_SERVICES))
if on_module_end:
on_module_end(mod)
def key(service):
return service._config.namespace
for name, parts in itertools.groupby(sorted(set(services), key=key), key=key):
parts = list(parts)
if len(parts) == 1:
service = parts[0](self)
else:
service = CompoundService(self, [part(self) for part in parts])
if not service._config.private and not service._config.cli_private and not service._config.cli_namespace:
raise RuntimeError(f'Service {service!r} does not have CLI namespace set')
self.add_service(service)
if on_modules_loaded:
on_modules_loaded()
# Now that all plugins have been loaded we can resolve all method params
# to make sure every schema is patched and references match
self._resolve_methods(list(self._services.values()), self.get_events())
def add_service(self, service):
self._services[service._config.namespace] = service
if service._config.namespace_alias:
self._services_aliases[service._config.namespace_alias] = service
def get_service(self, name):
service = self._services.get(name)
if service:
return service
return self._services_aliases[name]
def get_services(self):
return self._services
| 5,165 | Python | .py | 114 | 34.298246 | 117 | 0.585293 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,217 | itertools.py | truenas_middleware/src/middlewared/middlewared/utils/itertools.py | import itertools
def grouper(iterable, n, *, incomplete='fill', fillvalue=None):
"Collect data into non-overlapping fixed-length chunks or blocks"
# grouper('ABCDEFG', 3, fillvalue='x') --> ABC DEF Gxx
# grouper('ABCDEFG', 3, incomplete='strict') --> ABC DEF ValueError
# grouper('ABCDEFG', 3, incomplete='ignore') --> ABC DEF
args = [iter(iterable)] * n
if incomplete == 'fill':
return itertools.zip_longest(*args, fillvalue=fillvalue)
if incomplete == 'strict':
return zip(*args, strict=True)
if incomplete == 'ignore':
return zip(*args)
else:
raise ValueError('Expected fill, strict, or ignore')
def infinite_multiplier_generator(multiplier, max_value, initial_value):
cur = initial_value
while True:
yield cur
next_val = cur * multiplier
if next_val <= max_value:
cur = next_val
def batched(iterable, n):
"""
Batch data from the `iterable` into tuples of length `n`. The
last batch may be shorter than `n`.
batched iter recipe from python 3.11 documentation. Python 3.12 adds a
cpython variant of this to `itertools` and so this method should be
replaced when TrueNAS python version upgrades to 3.12.
"""
if n < 1:
raise ValueError('n must be at least one')
it = iter(iterable)
while batch := tuple(itertools.islice(it, n)):
yield batch
| 1,417 | Python | .py | 35 | 34.457143 | 74 | 0.661572 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,218 | interface.py | truenas_middleware/src/middlewared/middlewared/utils/interface.py | import contextlib
import os
import time
IFACE_LINK_STATE_MAX_WAIT: int = 60
RTF_GATEWAY: int = 0x0002
RTF_UP: int = 0x0001
def get_default_interface() -> str | None:
with contextlib.suppress(FileNotFoundError):
with open('/proc/net/route', 'r') as f:
for entry in filter(lambda i: len(i) == 11, map(str.split, f.readlines()[1:])):
with contextlib.suppress(ValueError):
if int(entry[3], 16) == (RTF_UP | RTF_GATEWAY):
return entry[0].strip()
def wait_on_interface_link_state_up(interface: str) -> bool:
sleep_interval = 1
time_waited = 0
while time_waited < IFACE_LINK_STATE_MAX_WAIT:
with contextlib.suppress(FileNotFoundError):
with open(os.path.join('/sys/class/net', interface, 'operstate'), 'r') as f:
if f.read().strip().lower() == 'up':
return True
time.sleep(sleep_interval)
time_waited += sleep_interval
return False
def wait_for_default_interface_link_state_up() -> tuple[str | None, bool]:
default_interface = get_default_interface()
if default_interface is None:
return default_interface, False
return default_interface, wait_on_interface_link_state_up(default_interface)
| 1,286 | Python | .py | 29 | 36.344828 | 91 | 0.639647 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,219 | serial.py | truenas_middleware/src/middlewared/middlewared/utils/serial.py | import re
import os
import glob
import subprocess
RE_PORT_UART = re.compile(r'at\s*(\w*).*is a\s*(\w+)')
def serial_port_choices():
devices = []
for tty in map(lambda t: os.path.basename(t), glob.glob('/dev/ttyS*')):
serial_dev = {
'name': None,
'location': None,
'drivername': 'uart',
'description': None,
'start': None,
}
tty_sys_path = os.path.join('/sys/class/tty', tty)
dev_path = os.path.join(tty_sys_path, 'device')
if (
os.path.exists(dev_path) and os.path.basename(
os.path.realpath(os.path.join(dev_path, 'subsystem'))
) == 'platform'
) or not os.path.exists(dev_path):
continue
cp = subprocess.Popen(
['setserial', '-b', os.path.join('/dev', tty)], stderr=subprocess.DEVNULL, stdout=subprocess.PIPE
)
stdout, stderr = cp.communicate()
if cp.returncode or not stdout:
continue
entry = RE_PORT_UART.findall(stdout.decode(errors='ignore'))
if not entry:
continue
serial_dev.update({
'start': hex(int(entry[0][0], 16)),
'description': entry[0][1],
})
path_file = os.path.join(tty_sys_path, 'device/firmware_node/path')
if not os.path.exists(path_file):
continue
with open(path_file, 'r') as f:
serial_dev['location'] = f'handle={f.read().strip()}'
serial_dev['name'] = tty
devices.append(serial_dev)
return devices
| 1,590 | Python | .py | 44 | 26.772727 | 109 | 0.550715 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,220 | user_context.py | truenas_middleware/src/middlewared/middlewared/utils/user_context.py | import concurrent.futures
import functools
import logging
import os
import subprocess
from typing import Any, Callable, Optional
logger = logging.getLogger(__name__)
__all__ = ["run_command_with_user_context", "run_with_user_context", "set_user_context"]
def set_user_context(user_details: dict) -> None:
if os.geteuid() != 0:
# We need to reset to UID 0 before setgroups is called
os.seteuid(0)
os.setgroups(user_details['grouplist'])
# We must preserve the saved uid of zero so that we can call this multiple times
# in same child process.
gids = (user_details['pw_gid'], user_details['pw_gid'], 0)
uids = (user_details['pw_uid'], user_details['pw_uid'], 0)
os.setresgid(*gids)
os.setresuid(*uids)
new_gids = os.getresgid()
new_uids = os.getresuid()
if new_gids != gids:
raise Exception(f'{user_details["pw_name"]}: Unable to set gids for user context received {new_gids}, expected {gids}')
if new_uids != uids:
raise Exception(f'{user_details["pw_name"]}: Unable to set uids for user context received {new_uids}, expected {uids}')
try:
os.chdir(user_details['pw_dir'])
except Exception:
os.chdir('/var/empty')
os.environ.update({
'HOME': user_details['pw_dir'],
'PATH': '/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin:/root/bin',
})
def run_with_user_context(func: Callable, user_details: dict, func_args: Optional[list] = None) -> Any:
assert {'pw_uid', 'pw_gid', 'pw_dir', 'pw_name', 'grouplist'} - set(user_details) == set()
with concurrent.futures.ProcessPoolExecutor(
max_workers=1, initializer=functools.partial(set_user_context, user_details)
) as exc:
return exc.submit(func, *(func_args or [])).result()
def run_command_with_user_context(
commandline: str, user: str, *, output: bool = True, callback: Optional[Callable] = None,
timeout: Optional[int] = None,
) -> subprocess.CompletedProcess:
if output or callback:
kwargs = {"stdout": subprocess.PIPE, "stderr": subprocess.STDOUT}
else:
kwargs = {"stdout": subprocess.DEVNULL, "stderr": subprocess.DEVNULL}
timeout_args = ["timeout", "-k", str(timeout), str(timeout)] if timeout else []
p = subprocess.Popen(timeout_args + ["sudo", "-H", "-u", user, "sh", "-c", commandline], **kwargs)
stdout = b""
if output or callback:
while True:
line = p.stdout.readline()
if not line:
break
if output:
stdout += line
if callback:
callback(line)
p.communicate()
return subprocess.CompletedProcess(commandline, stdout=stdout, returncode=p.returncode)
| 2,753 | Python | .py | 61 | 38.606557 | 127 | 0.648072 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,221 | tdb.py | truenas_middleware/src/middlewared/middlewared/utils/tdb.py | import os
import tdb
import enum
import json
from base64 import b64encode, b64decode
from collections import defaultdict, namedtuple
from collections.abc import Iterable
from contextlib import contextmanager
from dataclasses import dataclass
from middlewared.plugins.system_dataset.utils import SYSDATASET_PATH
from middlewared.service_exception import MatchNotFound
from threading import RLock
FD_CLOSED = -1
# Robust mutex support was added to libtdb after py-tdb was written and flags
# weren't updated. See lib/tdb/include/tdb.h
MUTEX_LOCKING = 4096
TDB_LOCKS = defaultdict(RLock)
TDBOptions = namedtuple('TdbFileOptions', ['backend', 'data_type'])
TDB_HANDLES = {}
class TDBPathType(enum.Enum):
"""
Type of path for TDB file
VOLATILE - cleared on reboot (tmpfs)
PERSISTENT - persist across reboots
CUSTOM - arbitrary filesystem path (used for interacting with service TDB files)
"""
VOLATILE = '/var/run/tdb/volatile'
PERSISTENT = '/root/tdb/persistent'
CUSTOM = ''
class TDBDataType(enum.Enum):
"""
Types of data to encode in TDB file
BYTES - binary data. API consumer submits as b64encoded string that is decoded before insertion.
This is particularly relevant when interacting with TDB files used by 3rd party applications.
JSON - submit as python dictionary and converted into JSON before insertion
STRING - submit as python string and inserted as-is
"""
BYTES = enum.auto()
JSON = enum.auto()
STRING = enum.auto()
class TDBBatchAction(enum.Enum):
""" Types of actions to use for batch operations """
GET = enum.auto()
SET = enum.auto()
DEL = enum.auto()
class TDBError(enum.IntEnum):
""" TDB errors are included in RuntimeError raised by TDB library """
SUCCESS = 0
CORRUPT = enum.auto()
IO = enum.auto()
LOCK = enum.auto()
OOM = enum.auto()
EXISTS = enum.auto()
NOLOCK = enum.auto()
TIMEOUT = enum.auto()
NOEXIST = enum.auto()
EINVAL = enum.auto()
RDONLY = enum.auto()
NESTING = enum.auto()
@dataclass
class TDBBatchOperation:
"""
Dataclass for batch operation on TDB file
key - target TDB key
value - value to set. This is required for SET operations,
but not evaluated for GET and DEL operations.
"""
action: TDBBatchAction
key: str
value: str | dict = None
class TDBHandle:
hdl = None
name = None
data_type = None
path_type = None
full_path = None
opath_fd = FD_CLOSED
keys_null_terminated = False
def __enter__(self):
return self
def __exit__(self, tp, val, traceback):
self.close()
def close(self):
""" Close the TDB handle and O_PATH open for the file """
if self.opath_fd == FD_CLOSED and self.hdl is None:
return
if self.hdl is not None:
self.hdl.close()
self.hdl = None
if self.opath_fd != FD_CLOSED:
os.close(self.opath_fd)
self.opath_fd = FD_CLOSED
def validate_handle(self) -> bool:
"""
Check whether the TDB handle is still valid
If it is invalid, then a new handle object should be created.
"""
if self.opath_fd == FD_CLOSED:
return False
if not os.path.exists(f'/proc/self/fd/{self.opath_fd}'):
return False
# if file has been renamed or deleted from under us, readlink will show different path
return os.readlink(f'/proc/self/fd/{self.opath_fd}') == self.full_path
def get(self, key: str) -> dict | str:
"""
Retrieve the specified key
Returns:
dict if TDBDatatype is JSON
str if TDBDatatype is BYTES or STRING
Raises:
MatchNotFound
RuntimeError
"""
tdb_key = key.encode()
if self.keys_null_terminated:
tdb_key += b"\x00"
if (tdb_val := self.hdl.get(tdb_key)) is None:
raise MatchNotFound(key)
match self.data_type:
case TDBDataType.BYTES:
out = b64encode(tdb_val).decode()
case TDBDataType.JSON:
out = json.loads(tdb_val.decode())
case TDBDataType.STRING:
out = tdb_val.decode()
case _:
raise ValueError(f'{self.data_type}: unknown data type')
return out
def store(self, key: str, value: str | dict) -> None:
"""
Set the specified `key` to the specified `value`.
Raises:
RuntimeError
ValueError
"""
tdb_key = key.encode()
if self.keys_null_terminated:
tdb_key += b'\x00'
match self.data_type:
case TDBDataType.BYTES:
tdb_val = b64decode(value)
case TDBDataType.JSON:
tdb_val = json.dumps(value).encode()
case TDBDataType.STRING:
tdb_val = value.encode()
case _:
raise ValueError(f'{self.data_type}: unknown data type')
self.hdl.store(tdb_key, tdb_val)
def delete(self, key: str) -> None:
"""
Delete the specified `key`
Raises:
RuntimeError
"""
tdb_key = key.encode()
if self.keys_null_terminated:
tdb_key += b"\x00"
self.hdl.delete(tdb_key)
def clear(self) -> None:
"""
Clear all entries from the specified TDB file
Raises:
RuntimeError
"""
self.hdl.clear()
def entries(self, include_keys: bool = True, key_prefix: str = None) -> Iterable[dict]:
"""
Iterate entries in TDB file:
include_keys - yield entries as dictionary containing `key` and `value`
otherwise only value will be yielded.
value - may be str or dict
Raises:
RuntimeError
"""
for key in self.hdl.keys():
tdb_key = key.decode()
if self.keys_null_terminated:
tdb_key = tdb_key[:-1]
if key_prefix and not tdb_key.startswith(key_prefix):
continue
tdb_val = self.get(tdb_key)
if include_keys:
yield {
'key': tdb_key,
'value': tdb_val
}
else:
yield tdb_val
def batch_op(self, ops: list[TDBBatchOperation]) -> dict:
"""
Perform a list of operations under a transaction lock so that
they are automatically rolled back if any one of operations fails.
Returns:
dictionary containing results of all `GET` operations.
Raises:
RuntimeError
MatchNotFound
ValueError
"""
output = {}
try:
self.hdl.transaction_start()
except RuntimeError:
self.close()
raise
try:
for op in ops:
match op.action:
case TDBBatchAction.SET:
self.store(op.key, op.value)
case TDBBatchAction.DEL:
self.delete(op.key)
case TDBBatchAction.GET:
output[op.key] = self.get(op.key)
case _:
raise ValueError(f'{op.action}: unknown batch operation type')
self.hdl.transaction_commit()
except Exception:
self.hdl.transaction_cancel()
raise
return output
def __init__(
self,
name: str,
options: TDBOptions
):
self.name = name
self.path_type = TDBPathType(options.backend)
self.data_type = TDBDataType(options.data_type)
match os.path.basename(name):
case 'gencache.tdb':
# See gencache_init() in source3/lib/gencache.c in Samba
tdb_flags = tdb.INCOMPATIBLE_HASH | tdb.NOSYNC | MUTEX_LOCKING
self.keys_null_terminated = True
open_flags = os.O_CREAT | os.O_RDWR
open_mode = 0o644
case 'secrets.tdb':
tdb_flags = tdb.DEFAULT
open_flags = os.O_RDWR
open_mode = 0o600
case 'group_mapping.tdb' | 'group_mapping_rejects.tdb' | 'passdb.tdb':
tdb_flags = tdb.DEFAULT
open_flags = os.O_RDWR
self.keys_null_terminated = True
open_flags = os.O_CREAT | os.O_RDWR
open_mode = 0o600
case _:
tdb_flags = tdb.DEFAULT
# Typically tdb files will have NULL-terminated keys
self.keys_null_terminated = options.data_type is TDBDataType.BYTES
open_flags = os.O_CREAT | os.O_RDWR
open_mode = 0o600
match self.path_type:
case TDBPathType.CUSTOM:
if not os.path.isabs(name):
raise ValueError(
f'{name}: must be an absolute path when using custom TDB path'
)
self.full_path = name
case _:
if not os.path.exists(self.path_type.value):
os.makedirs(self.path_type.value, mode=0o700, exist_ok=True)
self.full_path = f'{self.path_type.value}/{name}.tdb'
self.hdl = tdb.Tdb(self.full_path, 0, tdb_flags, open_flags, open_mode)
self.opath_fd = os.open(self.full_path, os.O_PATH)
self.options = options
@contextmanager
def get_tdb_handle(name, tdb_options: TDBOptions):
""" Open handle on TDB file under a threading lock """
lock = TDB_LOCKS[name]
with lock:
if (entry := TDB_HANDLES.get(name)) is None:
entry = TDB_HANDLES.setdefault(name, TDBHandle(name, tdb_options))
if entry.options != tdb_options:
raise ValueError('Inconsistent options')
if not entry.validate_handle():
entry.close()
entry = TDBHandle(name, tdb_options)
TDB_HANDLES[name] = entry
yield entry
def close_sysdataset_tdb_handles():
"""
Some samba / winbind-related TDB files are located in system dataset
This method provides machanism to close them when moving system dataset path
"""
for tdb_file in list(TDB_HANDLES.keys()):
if not tdb_file.startswith(SYSDATASET_PATH):
continue
with TDB_LOCKS[tdb_file]:
entry = TDB_HANDLES[tdb_file]
entry.close()
| 10,621 | Python | .py | 291 | 26.594502 | 100 | 0.582708 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,222 | git.py | truenas_middleware/src/middlewared/middlewared/utils/git.py | import subprocess
import shutil
import textwrap
import typing
from middlewared.service import CallError
def clone_repository(
repository_uri: str, destination: str, branch: typing.Optional[str] = None, depth: typing.Optional[int] = None
) -> None:
shutil.rmtree(destination, ignore_errors=True)
args = []
for arg, var in filter(
lambda e: e[1] is not None, (
(['--branch', branch], branch),
(['--depth', str(depth)], depth),
)
):
args.extend(arg)
cp = subprocess.run(['git', 'clone'] + args + [repository_uri, destination], capture_output=True)
if cp.returncode:
error_message = textwrap.shorten(cp.stderr.decode(), width=50, placeholder='...')
raise CallError(
f'Failed to clone {repository_uri!r} repository at {destination!r} destination: {error_message}'
)
def checkout_repository(destination: str, branch: str) -> None:
cp = subprocess.run(['git', '-C', destination, 'checkout', branch], capture_output=True)
if cp.returncode:
error_message = textwrap.shorten(cp.stderr.decode(), width=50, placeholder='...')
raise CallError(
f'Failed to checkout {branch!r} branch for {destination!r} repository: {error_message}'
)
def update_repo(destination: str, branch: str) -> None:
cp = subprocess.run(['git', '-C', destination, 'pull', 'origin', branch], capture_output=True)
if cp.returncode:
error_message = textwrap.shorten(cp.stderr.decode(), width=50, placeholder='...')
raise CallError(
f'Failed to update {destination!r} repository: {error_message}'
)
def validate_git_repo(destination: str) -> bool:
cp = subprocess.run(['git', '-C', destination, 'status'], capture_output=True)
return cp.returncode == 0
| 1,827 | Python | .py | 40 | 39.225 | 114 | 0.654474 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,223 | cgroups.py | truenas_middleware/src/middlewared/middlewared/utils/cgroups.py | # -*- coding=utf-8 -*-
def move_to_root_cgroups(pid):
with open(f"/proc/{pid}/cgroup") as f:
for line in f.readlines():
_, _, value = line.strip().split(":")
if value == "/system.slice/middlewared.service":
with open("/sys/fs/cgroup/cgroup.procs", "w") as f2:
f2.write(f"{pid}\n")
break
| 380 | Python | .py | 9 | 30.888889 | 68 | 0.497297 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,224 | vendor.py | truenas_middleware/src/middlewared/middlewared/utils/vendor.py | from enum import StrEnum
class Vendors(StrEnum):
"""The set of possible vendor names in /data/.vendor"""
TRUENAS_SCALE = "TrueNAS Scale"
HEXOS = "HexOS"
| 167 | Python | .py | 5 | 29.6 | 59 | 0.70625 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,225 | sid.py | truenas_middleware/src/middlewared/middlewared/utils/sid.py | import enum
from secrets import randbits
from middlewared.plugins.idmap_.idmap_constants import BASE_SYNTHETIC_DATASTORE_ID, IDType
DOM_SID_PREFIX = 'S-1-5-21-'
DOM_SID_SUBAUTHS = 3
MAX_VALUE_SUBAUTH = 2 ** 32
BASE_RID_USER = 20000
BASE_RID_GROUP = 200000
class DomainRid(enum.IntEnum):
""" Defined in MS-DTYP Section 2.4.2.4
This is subsest of well-known RID values defined in above document
focused on ones that are of particular significance to permissions and
SMB server behavior
"""
ADMINISTRATOR = 500 # local administrator account
GUEST = 501 # guest account
ADMINS = 512 # domain admins account (local or joined)
USERS = 513
GUESTS = 514
COMPUTERS = 515
class WellKnownSid(enum.Enum):
""" Defined in MS-DTYP Section 2.4.2.4 """
WORLD = 'S-1-1-0'
CREATOR_OWNER = 'S-1-3-0'
CREATOR_GROUP = 'S-1-3-1'
OWNER_RIGHTS = 'S-1-3-4'
AUTHENTICATED_USERS = 'S-1-5-11'
SYSTEM = 'S-1-5-18'
NT_AUTHORITY = 'S-1-5-19'
BUILTIN_ADMINISTRATORS = 'S-1-5-32-544'
BUILTIN_USERS = 'S-1-5-32-545'
BUILTIN_GUESTS = 'S-1-5-32-546'
@property
def sid(self):
return self.value
class lsa_sidtype(enum.IntEnum):
""" librpc/idl/lsa.idl
used for passdb and group mapping databases
"""
USE_NONE = 0 # NOTUSED
USER = 1 # user
DOM_GRP = 2 # domain group
DOMAIN = 3
ALIAS = 4 # local group
WKN_GRP = 5 # well-known group
DELETED = 6 # deleted account
INVALID = 7 # invalid account
UNKNOWN = 8
COMPUTER = 9
LABEL = 10 # mandatory label
def random_sid() -> str:
""" See MS-DTYP 2.4.2 SID """
subauth_1 = randbits(32)
subauth_2 = randbits(32)
subauth_3 = randbits(32)
return f'S-1-5-21-{subauth_1}-{subauth_2}-{subauth_3}'
def sid_is_valid(sid: str) -> bool:
"""
This is validation function should be used with some caution
as it only applies to SID values we reasonably expect to be used
in SMB ACLs or for local user / group accounts
"""
if not isinstance(sid, str):
return False
# Whitelist some well-known SIDs user may have
if sid in (
WellKnownSid.WORLD.sid,
WellKnownSid.OWNER_RIGHTS.sid,
WellKnownSid.BUILTIN_ADMINISTRATORS.sid,
WellKnownSid.BUILTIN_USERS.sid,
WellKnownSid.BUILTIN_GUESTS.sid,
):
return True
if not sid.startswith(DOM_SID_PREFIX):
# not a domain sid
return False
subauths = sid[len(DOM_SID_PREFIX):].split('-')
# SID may have a RID component appended
if len(subauths) < DOM_SID_SUBAUTHS or len(subauths) > DOM_SID_SUBAUTHS + 1:
return False
for subauth in subauths:
if not subauth.isdigit():
return False
subauth_val = int(subauth)
if subauth_val < 1 or subauth_val > MAX_VALUE_SUBAUTH:
return False
return True
def get_domain_rid(sid: str) -> int:
""" get rid component of the specified SID """
if not sid_is_valid(sid):
raise ValueError(f'{sid}: not a valid SID')
if not sid.startswith(DOM_SID_PREFIX):
raise ValueError(f'{sid}: not a domain SID')
subauths = sid[len(DOM_SID_PREFIX):].split('-')
if len(subauths) == DOM_SID_SUBAUTHS:
raise ValueError(f'{sid}: does not contain a RID component')
return int(subauths[-1])
def db_id_to_rid(id_type: IDType, db_id: int) -> int:
"""
Simple algorithm to convert a datastore ID into RID value. Has been
in use since TrueNAS 12. May not be changed because it will break
SMB share ACLs
"""
if not isinstance(db_id, int):
raise ValueError(f'{db_id}: Not an int')
if db_id >= BASE_SYNTHETIC_DATASTORE_ID:
raise ValueError('Not valid for users and groups from directory services')
match id_type:
case IDType.USER:
return db_id + BASE_RID_USER
case IDType.GROUP:
return db_id + BASE_RID_GROUP
case _:
raise ValueError(f'{id_type}: unknown ID type')
| 4,047 | Python | .py | 114 | 29.631579 | 90 | 0.650692 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,226 | auth.py | truenas_middleware/src/middlewared/middlewared/utils/auth.py | import enum
from dataclasses import dataclass
LEGACY_API_KEY_USERNAME = 'LEGACY_API_KEY'
MAX_OTP_ATTEMPTS = 3
class AuthMech(enum.StrEnum):
API_KEY_PLAIN = 'API_KEY_PLAIN'
PASSWORD_PLAIN = 'PASSWORD_PLAIN'
TOKEN_PLAIN = 'TOKEN_PLAIN'
OTP_TOKEN = 'OTP_TOKEN'
class AuthResp(enum.StrEnum):
SUCCESS = 'SUCCESS'
AUTH_ERR = 'AUTH_ERR'
EXPIRED = 'EXPIRED'
OTP_REQUIRED = 'OTP_REQUIRED'
# NIST SP 800-63B provides documentation Authenticator Assurance Levels (AAL)
# https://nvlpubs.nist.gov/nistpubs/specialpublications/nist.sp.800-63b.pdf
#
# NIST SP 800-63-3 Section 6.2 provides guidance on how to select an AAL
# https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-63-3.pdf
@dataclass(frozen=True, slots=True)
class AuthenticatorAssuranceLevel:
max_session_age: int
max_inactivity: int | None
mechanisms: tuple[AuthMech] | None
otp_mandatory: bool
@dataclass(slots=True)
class ServerAAL:
level: AuthenticatorAssuranceLevel
def aal_auth_mechanism_check(mechanism_str: str, aal: AuthenticatorAssuranceLevel) -> bool:
""" This method checks whether the specified mechanism is permitted under the
specified authenticator assurance level """
mechanism = AuthMech[mechanism_str]
if mechanism is AuthMech.OTP_TOKEN:
# OTP tokens are always permitted
return True
return mechanism in aal.mechanisms
# NIST SP 800-63B Section 4.1 Authenticator Assurance Level 1
# Reauthentication should be performed every 30 days after which session should be
# logged out.
#
# NOTE: this is baseline for TrueNAS authentication
AA_LEVEL1 = AuthenticatorAssuranceLevel(
max_session_age=86400 * 30,
max_inactivity=None,
mechanisms=(AuthMech.API_KEY_PLAIN, AuthMech.TOKEN_PLAIN, AuthMech.PASSWORD_PLAIN),
otp_mandatory=False
)
# NIST SP 800-63B Section 4.2 Authenticator Assurance Level 2
# Reauthentication shall be performed at least once per 12 hours.
# Reauthentication shall be repeated after any period of inactivity lasting 30 minutes or longer.
#
# This level can be provided by using two single-factor authenticators. In this case a
# memorized secret (password) and OTP token. At least one factor _must_ be replay resistant,
# which is fulfilled by the OTP token.
#
# Per these guidelines, our TOKEN_PLAIN and API_KEY_PLAIN provide insufficient replay resistance,
# which is in addition to the replay-resistant nature of encrypted transport, and are therefore
# unsuitable for this authentication level.
AA_LEVEL2 = AuthenticatorAssuranceLevel(
max_session_age=12 * 60 * 60,
max_inactivity=30 * 60,
mechanisms=(AuthMech.PASSWORD_PLAIN,),
otp_mandatory=True
)
# NIST SP 800-63B Section 4.3 Authenticator Assurance Level 3
# Reauthentication shall be performed at least once per 12 hours
# Reauthentication shall be repeated after any period of inactivity lasting 15 minutes or longer
AA_LEVEL3 = AuthenticatorAssuranceLevel(
max_session_age=13 * 60,
max_inactivity=15 * 60,
mechanisms=(),
otp_mandatory=True
)
CURRENT_AAL = ServerAAL(AA_LEVEL1)
| 3,092 | Python | .py | 74 | 38.702703 | 97 | 0.773 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,227 | zfs.py | truenas_middleware/src/middlewared/middlewared/utils/zfs.py | import os
def query_imported_fast_impl(name_filters=None):
# the equivalent of running `zpool list -H -o guid,name` from cli
# name_filters will be a list of pool names
out = dict()
name_filters = name_filters or []
with os.scandir('/proc/spl/kstat/zfs') as it:
for entry in filter(lambda entry: not name_filters or entry.name in name_filters, it):
if not entry.is_dir() or entry.name == '$import':
continue
guid = guid_fast_impl(entry.name)
state = state_fast_impl(entry.name)
out.update({guid: {'name': entry.name, 'state': state}})
return out
def guid_fast_impl(pool):
"""
Lockless read of zpool guid. Raises FileNotFoundError
if pool not imported.
"""
with open(f'/proc/spl/kstat/zfs/{pool}/guid') as f:
return f.read().strip()
def state_fast_impl(pool):
"""
Lockless read of zpool state. Raises FileNotFoundError
if pool not imported.
"""
with open(f'/proc/spl/kstat/zfs/{pool}/state') as f:
return f.read().strip()
| 1,081 | Python | .py | 28 | 31.892857 | 94 | 0.635407 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,228 | __init__.py | truenas_middleware/src/middlewared/middlewared/utils/__init__.py | import asyncio
import errno
import functools
import logging
import operator
import re
import subprocess
import time
import json
from collections import namedtuple
from dataclasses import dataclass
from datetime import datetime
from middlewared.service_exception import MatchNotFound
from .lang import undefined
from .prctl import die_with_parent
from .threading import io_thread_pool_executor
# Define Product Strings
@dataclass(slots=True, frozen=True)
class ProductTypes:
SCALE: str = 'SCALE'
SCALE_ENTERPRISE: str = 'SCALE_ENTERPRISE'
@dataclass(slots=True, frozen=True)
class ProductNames:
PRODUCT_NAME: str = 'TrueNAS'
ProductType = ProductTypes()
ProductName = ProductNames()
MID_PID = None
MIDDLEWARE_RUN_DIR = '/var/run/middleware'
BOOTREADY = f'{MIDDLEWARE_RUN_DIR}/.bootready'
MANIFEST_FILE = '/data/manifest.json'
BRAND = ProductName.PRODUCT_NAME
PRODUCT = ProductType.SCALE
BRAND_PRODUCT = f'{BRAND}-{PRODUCT}'
NULLS_FIRST = 'nulls_first:'
NULLS_LAST = 'nulls_last:'
REVERSE_CHAR = '-'
MAX_FILTERS_DEPTH = 3
TIMESTAMP_DESIGNATOR = '.$date'
logger = logging.getLogger(__name__)
class UnexpectedFailure(Exception):
pass
FilterGetResult = namedtuple('FilterGetResult', 'result,key,done', defaults=(None, True))
def bisect(condition, iterable):
a = []
b = []
for val in iterable:
if condition(val):
a.append(val)
else:
b.append(val)
return a, b
def Popen(args, **kwargs):
shell = kwargs.pop('shell', None)
if shell:
return asyncio.create_subprocess_shell(args, **kwargs)
else:
return asyncio.create_subprocess_exec(*args, **kwargs)
currently_running_subprocesses = set()
async def run(*args, **kwargs):
if isinstance(args[0], list):
args = tuple(args[0])
kwargs.setdefault('stdout', subprocess.PIPE)
kwargs.setdefault('stderr', subprocess.PIPE)
kwargs.setdefault('check', True)
if 'encoding' in kwargs:
kwargs.setdefault('errors', 'strict')
kwargs.setdefault('close_fds', True)
kwargs['preexec_fn'] = die_with_parent
loop = asyncio.get_event_loop()
subprocess_identifier = f"{time.monotonic()}: {args!r}"
try:
currently_running_subprocesses.add(subprocess_identifier)
try:
return await loop.run_in_executor(io_thread_pool_executor, functools.partial(subprocess.run, args, **kwargs))
finally:
currently_running_subprocesses.discard(subprocess_identifier)
except OSError as e:
if e.errno == errno.EMFILE:
logger.warning("Currently running async subprocesses: %r", currently_running_subprocesses)
raise
def partition(s):
rv = ''
while True:
left, sep, right = s.partition('.')
if not sep:
return rv + left, right
if left[-1] == '\\':
rv += left[:-1] + sep
s = right
else:
return rv + left, right
def get_impl(obj, path):
right = path
cur = obj
while right:
left, right = partition(right)
if isinstance(cur, dict):
cur = cur.get(left, undefined)
elif isinstance(cur, (list, tuple)):
if not left.isdigit():
# return all members and the remaining portion of path
if left == '*':
return FilterGetResult(result=cur, key=right, done=False)
raise ValueError(f'{left}: must be array index or wildcard character')
left = int(left)
cur = cur[left] if left < len(cur) else None
return FilterGetResult(cur)
def get_attr(obj, path):
"""
Simple wrapper around getattr to ensure that internal filtering methods return consistent
types.
"""
return FilterGetResult(getattr(obj, path))
def get(obj, path):
"""
Get a path in obj using dot notation. In case of nested list or tuple, item may be specified by
numeric index, otherwise the contents of the array are returned along with the unresolved path
component.
e.g.
obj = {'foo': {'bar': '1'}, 'foo.bar': '2', 'foobar': ['first', 'second', 'third']}
path = 'foo.bar' returns '1'
path = 'foo\\.bar' returns '2'
path = 'foobar.0' returns 'first'
"""
data = get_impl(obj, path)
return data.result if data.result is not undefined else None
def select_path(obj, path):
keys = []
right = path
cur = obj
while right:
left, right = partition(right)
if isinstance(cur, dict):
cur = cur.get(left, MatchNotFound)
keys.append(left)
elif isinstance(cur, (list, tuple)):
raise ValueError('Selecting by list index is not supported')
return (keys, cur)
def casefold(obj):
if obj is None:
return None
if isinstance(obj, str):
return obj.casefold()
if isinstance(obj, (list, tuple)):
return [x.casefold() for x in obj]
raise ValueError(f'{type(obj)}: support for casefolding object type not implemented.')
class filters(object):
def op_in(x, y):
return operator.contains(y, x)
def op_rin(x, y):
if x is None:
return False
return operator.contains(x, y)
def op_nin(x, y):
if x is None:
return False
return not operator.contains(y, x)
def op_rnin(x, y):
if x is None:
return False
return not operator.contains(x, y)
def op_re(x, y):
return re.match(y, x)
def op_startswith(x, y):
if x is None:
return False
return x.startswith(y)
def op_notstartswith(x, y):
if x is None:
return False
return not x.startswith(y)
def op_endswith(x, y):
if x is None:
return False
return x.endswith(y)
def op_notendswith(x, y):
if x is None:
return False
return not x.endswith(y)
opmap = {
'=': operator.eq,
'!=': operator.ne,
'>': operator.gt,
'>=': operator.ge,
'<': operator.lt,
'<=': operator.le,
'~': op_re,
'in': op_in,
'nin': op_nin,
'rin': op_rin,
'rnin': op_rnin,
'^': op_startswith,
'!^': op_notstartswith,
'$': op_endswith,
'!$': op_notendswith,
}
def validate_filters(self, filters, recursion_depth=0, value_maps=None):
"""
This method gets called when `query-filters` gets validated in
the accepts() decorator of public API endpoints. It is generally
a good idea to improve validation here, but not at significant
expense of performance as this is called every time `filter_list`
is called.
"""
if recursion_depth > MAX_FILTERS_DEPTH:
raise ValueError('query-filters max recursion depth exceeded')
for f in filters:
if len(f) == 2:
op, value = f
if op != 'OR':
raise ValueError(f'Invalid operation: {op}')
if not value:
raise ValueError('OR filter requires at least one branch.')
for branch in value:
if isinstance(branch[0], list):
self.validate_filters(branch, recursion_depth + 1, value_maps)
else:
self.validate_filters([branch], recursion_depth + 1, value_maps)
continue
elif len(f) != 3:
raise ValueError(f'Invalid filter {f}')
op = f[1]
if op[0] == 'C':
op = op[1:]
if op == '~':
raise ValueError('Invalid case-insensitive operation: {}'.format(f[1]))
if op not in self.opmap:
raise ValueError('Invalid operation: {}'.format(f[1]))
# special handling for datetime objects
for operand in (f[0], f[2]):
if not isinstance(operand, str) or not operand.endswith(TIMESTAMP_DESIGNATOR):
continue
if op not in ['=', '!=', '>', '>=', '<', '<=']:
raise ValueError(f'{op}: invalid timestamp operation.')
other = f[2] if operand == f[0] else f[0]
# At this point we're just validating that it's an ISO8601 string.
try:
ts = datetime.fromisoformat(other)
except (TypeError, ValueError):
raise ValueError(f'{other}: must be an ISO-8601 formatted timestamp string')
if value_maps is not None:
value_maps[other] = ts
def validate_select(self, select):
for s in select:
if isinstance(s, str):
continue
if isinstance(s, list):
if len(s) != 2:
raise ValueError(
f'{s}: A select as list may only contain two parameters: the name '
'of the parameter being selected, and the name to which to assign it '
'in resulting data.'
)
for idx, selector in enumerate(s):
if isinstance(selector, str):
continue
raise ValueError(
f'{s}: {"first" if idx == 0 else "second"} item must be a string.'
)
continue
raise ValueError(
f'{s}: selectors must be either a parameter name as a string or '
'a list containing two items [<parameter name>, <as name>] to emulate '
'SELECT <parameter name> AS <as name>.'
)
def validate_order_by(self, order_by):
for idx, o in enumerate(order_by):
if isinstance(o, str):
continue
raise ValueError(
f'{order_by}: parameter at index {idx} [{o}] is not a string.'
)
def validate_options(self, options):
if options is None:
return ({}, [], [])
if options.get('get') and options.get('limit', 0) > 1:
raise ValueError(
'Invalid options combination. `get` implies a single result.'
)
if options.get('get') and options.get('offset'):
raise ValueError(
'Invalid options combination. `get` implies a single result.'
)
select = options.get('select', [])
self.validate_select(select)
order_by = options.get('order_by', [])
self.validate_order_by(order_by)
return (options, select, order_by)
def filterop(self, i, f, source_getter):
name, op, value = f
data = source_getter(i, name)
if data.result is undefined:
# Key / attribute doesn't exist in value
return False
if not data.done:
new_filter = [data.key, op, value]
for entry in data.result:
if self.filterop(entry, new_filter, source_getter):
return True
return False
source = data.result
if op[0] == 'C':
fn = self.opmap[op[1:]]
source = casefold(source)
value = casefold(value)
else:
fn = self.opmap[op]
if fn(source, value):
return True
return False
def getter_fn(self, entry):
"""
Evaluate the type of objects returned by iterable and return an
appropriate function to retrieve attributes so that we can apply filters
This allows us to filter objects that are not dictionaries.
"""
if not entry:
return None
if isinstance(entry, dict):
return get_impl
return get_attr
def eval_filter(self, list_item, the_filter, getter, value_maps):
"""
`the_filter` in this case will be a single condition of either the form
[<a>, <opcode>, <b>] or ["OR", [<condition>, <condition>, ...]
This allows us to do a simple check of list length to determine whether
we have a conjunction or disjunction.
value_maps is dict supplied in which to store operands that need to
be converted into a different type.
Recursion depth is checked when validate_filters is called above.
"""
if len(the_filter) == 2:
# OR check
op, value = the_filter
for branch in value:
if isinstance(branch[0], list):
# This branch of OR is a conjunction of
# multiple conditions. All of them must be
# True in order for branch to be True.
hit = True
for i in branch:
if not self.eval_filter(list_item, i, getter, value_maps):
hit = False
break
else:
hit = self.eval_filter(list_item, branch, getter, value_maps)
if hit is True:
return True
# None of conditions in disjunction are True.
return False
# Normal condition check
if not value_maps:
return self.filterop(list_item, the_filter, getter)
if (operand_1 := value_maps.get(the_filter[0])):
operand_2.rstrip(TIMESTAMP_DESIGNATOR)
else:
operand_1 = the_filter[0]
if (operand_2 := value_maps.get(the_filter[2])):
operand_1.rstrip(TIMESTAMP_DESIGNATOR)
else:
operand_2 = the_filter[2]
return self.filterop(list_item, (operand_1, the_filter[1], operand_2), getter)
def do_filters(self, _list, filters, select, shortcircuit, value_maps):
rv = []
# we may be filtering output from a generator and so delay
# evaluation of what "getter" to use until we begin iteration
getter = None
for i in _list:
if getter is None:
getter = self.getter_fn(i)
valid = True
for f in filters:
if not self.eval_filter(i, f, getter, value_maps):
valid = False
break
if not valid:
continue
if select:
entry = self.do_select([i], select)[0]
else:
entry = i
rv.append(entry)
if shortcircuit:
break
return rv
def do_select(self, _list, select):
rv = []
for i in _list:
entry = {}
for s in select:
if isinstance(s, list):
target, new_name = s
else:
target = s
new_name = None
keys, value = select_path(i, target)
if value is MatchNotFound:
continue
if new_name is not None:
entry[new_name] = value
continue
last = keys.pop(-1)
obj = entry
for k in keys:
obj = obj.setdefault(k, {})
obj[last] = value
rv.append(entry)
return rv
def do_count(self, rv):
return len(rv)
def order_nulls(self, _list, order):
if order.startswith(REVERSE_CHAR):
order = order[1:]
reverse = True
else:
reverse = False
nulls = []
non_nulls = []
for entry in _list:
if entry.get(order) is None:
nulls.append(entry)
else:
non_nulls.append(entry)
non_nulls = sorted(non_nulls, key=lambda x: get(x, order), reverse=reverse)
return (nulls, non_nulls)
def order_no_null(self, _list, order):
if order.startswith(REVERSE_CHAR):
order = order[1:]
reverse = True
else:
reverse = False
return sorted(_list, key=lambda x: get(x, order), reverse=reverse)
def do_order(self, rv, order_by):
for o in order_by:
if o.startswith(NULLS_FIRST):
nulls, non_nulls = self.order_nulls(rv, o[len(NULLS_FIRST):])
rv = nulls + non_nulls
elif o.startswith(NULLS_LAST):
nulls, non_nulls = self.order_nulls(rv, o[len(NULLS_LAST):])
rv = non_nulls + nulls
else:
rv = self.order_no_null(rv, o)
return rv
def do_get(self, rv):
try:
return rv[0]
except IndexError:
raise MatchNotFound() from None
def filter_list(self, _list, filters=None, options=None):
options, select, order_by = self.validate_options(options)
do_shortcircuit = options.get('get') and not order_by
if filters:
maps = {}
self.validate_filters(filters, value_maps=maps)
rv = self.do_filters(_list, filters, select, do_shortcircuit, value_maps=maps)
if do_shortcircuit:
return self.do_get(rv)
elif select:
rv = self.do_select(_list, select)
else:
# Normalize the output to a list. Caller may have passed
# a generator into this method.
rv = list(_list)
if options.get('count') is True:
return self.do_count(rv)
rv = self.do_order(rv, order_by)
if options.get('get') is True:
return self.do_get(rv)
if options.get('offset'):
rv = rv[options['offset']:]
if options.get('limit'):
return rv[:options['limit']]
return rv
filter_list = filters().filter_list
def filter_getattrs(filters):
"""
Get a set of attributes in a filter list.
"""
attrs = set()
if not filters:
return attrs
f = filters.copy()
while f:
filter_ = f.pop()
if len(filter_) == 2:
f.append(filter_[1])
elif len(filter_) == 3:
attrs.add(filter_[0])
else:
raise ValueError('Invalid filter.')
return attrs
@functools.cache
def sw_info():
"""Returns the various software information from the manifest file."""
with open(MANIFEST_FILE) as f:
manifest = json.load(f)
version = manifest['version']
return {
'stable': 'MASTER' not in manifest['version'],
'codename': manifest['codename'],
'version': version,
'fullname': f'{BRAND_PRODUCT}-{version}',
'buildtime': manifest['buildtime'],
}
def sw_codename():
return sw_info()['codename']
def sw_buildtime():
return sw_info()['buildtime']
def sw_version():
return sw_info()['fullname']
def sw_version_is_stable():
return sw_info()['stable']
def is_empty(val):
"""
A small utility function that check if the provided string is either None, '',
or just a string containing only spaces
"""
return val in [None, ''] or val.isspace()
class Nid(object):
def __init__(self, _id):
self._id = _id
def __call__(self):
num = self._id
self._id += 1
return num
| 19,453 | Python | .py | 525 | 26.607619 | 121 | 0.558273 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,229 | mdns.py | truenas_middleware/src/middlewared/middlewared/utils/mdns.py | # NOTE: tests are provided in src/middlewared/middlewared/pytest/unit/utils/test_mdns.py
# Any updates to this file should have corresponding updates to tests
import enum
import socket
import xml.etree.ElementTree as xml
from io import StringIO
from . import filter_list
AVAHI_SERVICE_PATH = '/etc/avahi/services'
SVC_HDR = '<?xml version="1.0" standalone="no"?><!DOCTYPE service-group SYSTEM "avahi-service.dtd">'
DISCARD = 9
class DevType(enum.Enum):
AIRPORT = 'AirPort'
APPLETV = 'AppleTv1,1'
MACPRO = 'MacPro'
MACPRORACK = 'MacPro7,1@ECOLOR=226,226,224'
RACKMAC = 'RackMac'
TIMECAPSULE = 'TimeCapsule6,106'
XSERVE = 'Xserve'
def __str__(self):
return self.value
class ServiceType(enum.Enum):
ADISK = ('_adisk._tcp.', DISCARD)
DEV_INFO = ('_device-info._tcp.', DISCARD)
HTTP = ('_http._tcp.', 80)
SMB = ('_smb._tcp.', 445)
NUT = ('_nut._tcp.', 3493)
CUSTOM = (None, None)
class AvahiConst(enum.Enum):
AVAHI_IF_UNSPEC = -1
def ip_addresses_to_interface_indexes(ifaces, ip_addresses):
"""
Avahi can bind services to particular physical intefaces using
interface index. This is used to ensure that we don't adverise
service availability on all networks.
This particular method is used by the etc_files for services.
`ifaces` - results of interface.query
`ip_addresses` - list of ip_addresses the service is supposed
to be bound to.
"""
indexes = []
iface_filter = [['OR', [
['state.aliases.*.address', 'in', ip_addresses],
['state.failover_virtual_aliases.*.address', 'in', ip_addresses]
]]]
found = set([iface['id'] for iface in filter_list(ifaces, iface_filter)])
for iface in found:
indexes.append(socket.if_nametoindex(iface))
return indexes
def parse_srv_record_data(data_in):
"""
This function primarily exists for the purpose of CI tests.
XML data for a service record is passed in as a string.
Returns dictionary with basic information from data
"""
output = []
entry = None
with StringIO(data_in) as xmlbuf:
root = xml.parse(xmlbuf).getroot()
for elem in root.iter():
match elem.tag:
case 'service':
entry = {
'srv': None,
'port': None,
'interface': None,
'txt_records': []
}
output.append(entry)
case 'type':
entry['srv'] = elem.text
case 'port':
entry['port'] = int(elem.text)
case 'interface':
entry['interface'] = int(elem.text)
case 'txt-record':
entry['txt_records'].append(elem.text)
case _:
pass
return output
def generate_avahi_srv_record(
service_type,
interface_indexes=None,
txt_records=None,
custom_service_type=None,
custom_port=None,
):
"""
Generate XML string for service data for an avahi service. Takes
the following parameters:
`service_type`: See ServiceType enum above. If for some reason we are
not generating one of our default record types in the enum and cannot
expand it, then `CUSTOM` may be specified. In this case, the record
type _must_ be specified via the kwarg `custom_sevice_type` and the
port _must_ be specified via the kwarg `custom_port`. If port is
indeterminate, then `9` (DISCARD protocol) should be used.
`interface_indexes`: list of interface indexes to which to bind this
service. Should be left as None to advertise on all interfaces.
NOTE: this will restrict advertisements beyond what is specified in
global avahi configuration.
`txt_records`: list of txt records to publish through the service
entry.
WARNING: avahi daemon sets an inotify watch on its services directory,
the generate sevice record should be written to a path outside the
directory and renamed over existing file.
"""
svc_type = ServiceType[service_type]
if svc_type == ServiceType.CUSTOM:
if custom_service_type is None:
raise ValueError('custom_service_type must be specifed')
if custom_port is None:
raise ValueError('custom_port must be specifed')
srv = custom_service_type
port = custom_port
else:
srv, port = svc_type.value
if custom_port:
port = custom_port
txt_records = txt_records or []
iface_indexes = interface_indexes or [AvahiConst.AVAHI_IF_UNSPEC]
root = xml.Element("service-group")
# We want to use replace-wildcards with %h here, rather than the hostname
# because on hostname conflict:
# 1. avahi will have to iterate thru names again
# 2. avahi currently seems to generate a different postfix for host & service ('-23' vs ' #23')
srv_name = xml.Element('name', {'replace-wildcards': 'yes'})
srv_name.text = '%h'
root.append(srv_name)
for idx in iface_indexes:
service = xml.Element('service')
root.append(service)
regtype = xml.SubElement(service, 'type')
regtype.text = srv
srvport = xml.SubElement(service, 'port')
srvport.text = str(port)
if idx != AvahiConst.AVAHI_IF_UNSPEC:
iindex = xml.SubElement(service, 'interface')
iindex.text = str(idx)
for entry in txt_records:
if not isinstance(entry, str):
raise TypeError(f'{entry}: txt records must be string.')
txt = xml.SubElement(service, 'txt-record')
txt.text = entry
xml_service_config = xml.ElementTree(root)
with StringIO(SVC_HDR) as buf:
xml_service_config.write(buf, 'unicode')
buf.write('\n')
buf.seek(0)
record = buf.read()
return record
| 5,973 | Python | .py | 149 | 32.194631 | 100 | 0.635782 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,230 | gpu.py | truenas_middleware/src/middlewared/middlewared/utils/gpu.py | import collections
import os
import re
import subprocess
from typing import TextIO
import pyudev
from middlewared.service_exception import CallError
from .iommu import get_iommu_groups_info
from .pci import SENSITIVE_PCI_DEVICE_TYPES
RE_PCI_ADDR = re.compile(r'(?P<domain>.*):(?P<bus>.*):(?P<slot>.*)\.')
def parse_nvidia_info_file(file_obj: TextIO) -> tuple[dict, str]:
gpu, bus_loc = dict(), None
for line in file_obj:
k, v = line.split(':', 1)
k, v = k.strip().lower().replace(' ', '_'), v.strip()
gpu[k] = v
if k == 'bus_location':
bus_loc = v
return gpu, bus_loc
def get_nvidia_gpus() -> dict[str, dict]:
"""Don't be so complicated. Return basic information about
NVIDIA devices (if any) that are connected."""
gpus = dict()
try:
with os.scandir('/proc/driver/nvidia/gpus') as gdir:
for i in filter(lambda x: x.is_dir(), gdir):
with open(os.path.join(i.path, 'information'), 'r') as f:
gpu, bus_location = parse_nvidia_info_file(f)
if bus_location is not None:
gpus[bus_location] = gpu
elif gpu:
# maybe a line in the file changed but
# we still got some information, just use
# the procfs dirname as the key (which is
# unique per gpu)
gpus[i.name] = gpu
except (FileNotFoundError, ValueError):
pass
return gpus
def get_critical_devices_in_iommu_group_mapping(iommu_groups: dict) -> dict[str, set[str]]:
iommu_groups_mapping_with_critical_devices = collections.defaultdict(set)
for pci_slot, pci_details in iommu_groups.items():
if pci_details['critical']:
iommu_groups_mapping_with_critical_devices[pci_details['number']].add(pci_slot)
return iommu_groups_mapping_with_critical_devices
def get_gpus() -> list:
cp = subprocess.Popen(['lspci', '-D'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = cp.communicate()
if cp.returncode:
raise CallError(f'Unable to list available gpus: {stderr.decode()}')
gpus = []
gpu_slots = []
for line in stdout.decode().splitlines():
for k in (
'VGA compatible controller',
'Display controller',
'3D controller',
):
if k in line:
gpu_slots.append((line.strip(), k))
break
iommu_groups = get_iommu_groups_info(get_critical_info=True)
critical_iommu_mapping = get_critical_devices_in_iommu_group_mapping(iommu_groups)
for gpu_line, key in gpu_slots:
addr = gpu_line.split()[0]
addr_re = RE_PCI_ADDR.match(addr)
gpu_dev = pyudev.Devices.from_name(pyudev.Context(), 'pci', addr)
# Let's normalise vendor for consistency
vendor = None
vendor_id_from_db = gpu_dev.get('ID_VENDOR_FROM_DATABASE', '').lower()
if 'nvidia' in vendor_id_from_db:
vendor = 'NVIDIA'
elif 'intel' in vendor_id_from_db:
vendor = 'INTEL'
elif 'amd' in vendor_id_from_db:
vendor = 'AMD'
devices = []
critical_reason = None
critical_devices = set()
# So we will try to mark those gpu's as critical which meet following criteria:
# 1) Have a device which belongs to sensitive pci devices group
# 2) Have a device which is in same iommu group as a device which belongs to sensitive pci devices group
if critical_iommu_mapping[iommu_groups.get(addr, {}).get('number')]:
critical_devices_based_on_iommu = {addr}
else:
critical_devices_based_on_iommu = set()
for child in filter(lambda c: all(k in c for k in ('PCI_SLOT_NAME', 'PCI_ID')), gpu_dev.parent.children):
devices.append({
'pci_id': child['PCI_ID'],
'pci_slot': child['PCI_SLOT_NAME'],
'vm_pci_slot': f'pci_{child["PCI_SLOT_NAME"].replace(".", "_").replace(":", "_")}',
})
for k in SENSITIVE_PCI_DEVICE_TYPES.values():
if k.lower() in child.get('ID_PCI_SUBCLASS_FROM_DATABASE', '').lower():
critical_devices.add(child['PCI_SLOT_NAME'])
break
if critical_iommu_mapping[iommu_groups.get(child['PCI_SLOT_NAME'], {}).get('number')]:
critical_devices_based_on_iommu.add(child['PCI_SLOT_NAME'])
if critical_devices:
critical_reason = f'Critical devices found: {", ".join(critical_devices)}'
if critical_devices_based_on_iommu:
critical_reason = f'{critical_reason}\n' if critical_reason else ''
critical_reason += ('Critical devices found in same IOMMU group: '
f'{", ".join(critical_devices_based_on_iommu)}')
gpus.append({
'addr': {
'pci_slot': addr,
**{k: addr_re.group(k) for k in ('domain', 'bus', 'slot')},
},
'description': gpu_line.split(f'{key}:')[-1].split('(rev')[0].strip(),
'devices': devices,
'vendor': vendor,
'uses_system_critical_devices': bool(critical_reason),
'critical_reason': critical_reason,
})
return gpus
| 5,419 | Python | .py | 116 | 36.086207 | 113 | 0.581091 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,231 | shutil.py | truenas_middleware/src/middlewared/middlewared/utils/shutil.py | # -*- coding=utf-8 -*-
import subprocess
__all__ = ["rmtree_one_filesystem"]
def rmtree_one_filesystem(path):
try:
subprocess.run(['rm', '--one-file-system', '-rf', path], stdout=subprocess.PIPE, stderr=subprocess.PIPE,
encoding="utf-8", errors="ignore", check=True)
except subprocess.CalledProcessError as e:
raise OSError(e.stderr.rstrip())
| 393 | Python | .py | 9 | 37.111111 | 112 | 0.645669 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,232 | functools_.py | truenas_middleware/src/middlewared/middlewared/utils/functools_.py | import asyncio
import copy
import functools
from middlewared.utils.lang import undefined
def cache(func):
value = undefined
if asyncio.iscoroutinefunction(func):
@functools.wraps(func)
async def wrapped(self):
nonlocal value
if value == undefined:
value = await func(self)
return copy.deepcopy(value)
else:
@functools.wraps(func)
def wrapped(self):
nonlocal value
if value == undefined:
value = func(self)
return copy.deepcopy(value)
return wrapped
| 610 | Python | .py | 21 | 20.380952 | 44 | 0.613793 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,233 | scsi_generic.py | truenas_middleware/src/middlewared/middlewared/utils/scsi_generic.py | import ctypes
import fcntl
import os
# SG_IO ioctl command constant
SG_IO = 0x2285
# SCSI sense buffer size constant
SG_MAX_SENSE = 32
# Other necessary constants
SG_DXFER_FROM_DEV = -3
SG_DXFER_NONE = 0
SG_FLAG_DIRECT_IO = 1
SG_INFO_OK = 0
# SCSI Enclosure Services command
SES_RECEIVE_DIAGNOSTIC = 0x1C
SES_ENCLOSURE_STATUS_PAGE_CODE = 0x02
INQUIRY = 0x12
class sg_io_hdr_v3(ctypes.Structure):
_fields_ = [
("interface_id", ctypes.c_int),
("dxfer_direction", ctypes.c_int),
("cmd_len", ctypes.c_ubyte),
("mx_sb_len", ctypes.c_ubyte),
("iovec_count", ctypes.c_ushort),
("dxfer_len", ctypes.c_uint),
("dxferp", ctypes.c_void_p),
("cmdp", ctypes.c_void_p),
("sbp", ctypes.c_void_p),
("timeout", ctypes.c_uint),
("flags", ctypes.c_uint),
("pack_id", ctypes.c_int),
("usr_ptr", ctypes.c_void_p),
("status", ctypes.c_ubyte),
("masked_status", ctypes.c_ubyte),
("msg_status", ctypes.c_ubyte),
("sb_len_wr", ctypes.c_ubyte),
("host_status", ctypes.c_ushort),
("driver_status", ctypes.c_ushort),
("resid", ctypes.c_int),
("duration", ctypes.c_uint),
("info", ctypes.c_uint),
]
def get_sgio_hdr_structure(cdb, dxfer_len, timeout=60000):
# Create a buffer for the sense data
sense_buffer = (ctypes.c_ubyte * SG_MAX_SENSE)()
results_buffer = (ctypes.c_ubyte * dxfer_len)()
hdr = sg_io_hdr_v3()
hdr.interface_id = ord('S')
hdr.cmd_len = len(cdb)
hdr.cmdp = ctypes.cast(cdb, ctypes.c_void_p)
hdr.dxfer_direction = SG_DXFER_FROM_DEV
hdr.dxfer_len = dxfer_len
hdr.dxferp = ctypes.cast(results_buffer, ctypes.c_void_p)
hdr.sbp = ctypes.cast(sense_buffer, ctypes.c_void_p)
hdr.mx_sb_len = len(sense_buffer)
hdr.timeout = timeout
return hdr, results_buffer, sense_buffer
def do_io(device, hdr):
fd = os.open(device, os.O_RDONLY | os.O_NONBLOCK)
try:
# Make the ioctl call
if fcntl.ioctl(fd, SG_IO, hdr) != 0:
raise OSError("SG_IO ioctl failed")
elif (hdr.info & SG_INFO_OK) != 0:
raise OSError("SG_IO ioctl indicated failure")
finally:
# Close the device
os.close(fd)
def inquiry(device):
dxfer_len = 0x38
cdb = (ctypes.c_ubyte * 6)(INQUIRY, 0x00, 0x00, 0x00, dxfer_len, 0x00)
hdr, results_buffer, sense_buffer = get_sgio_hdr_structure(cdb, dxfer_len)
do_io(device, hdr)
# Table 148 in SPC-5
t10_vendor_start, t10_vendor_end, t10_final = 8, (15 + 1), ''
product_ident_start, product_ident_end, product_ident_final = t10_vendor_end, (31 + 1), ''
product_rev_start, product_rev_end, product_rev_final = product_ident_end, (35 + 1), ''
serial_start, serial_end, serial_final = product_rev_end, (55 + 1), ''
for char in results_buffer[t10_vendor_start:t10_vendor_end]:
if (_ascii := chr(char)) in (' ', '\x00'):
continue
t10_final += _ascii
for char in results_buffer[product_ident_start:product_ident_end]:
if (_ascii := chr(char)) in (' ', '\x00'):
continue
product_ident_final += _ascii
for char in results_buffer[product_rev_start:product_rev_end]:
if (_ascii := chr(char)) in (' ', '\x00'):
continue
product_rev_final += _ascii
for char in results_buffer[serial_start:serial_end]:
if (_ascii := chr(char)) in (' ', '\x00'):
continue
serial_final += _ascii
return {'vendor': t10_final, 'product': product_ident_final, 'revision': product_rev_final, 'serial': serial_final}
| 3,660 | Python | .py | 94 | 32.574468 | 119 | 0.614777 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,234 | contextlib.py | truenas_middleware/src/middlewared/middlewared/utils/contextlib.py | # -*- coding=utf-8 -*-
import contextlib
import logging
logger = logging.getLogger(__name__)
__all__ = ["asyncnullcontext"]
@contextlib.asynccontextmanager
async def asyncnullcontext(enter_result=None):
yield enter_result
| 230 | Python | .py | 8 | 26.75 | 46 | 0.770642 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,235 | disk_temperatures.py | truenas_middleware/src/middlewared/middlewared/utils/disk_temperatures.py | from dataclasses import dataclass
from typing import Dict, Optional
from middlewared.utils.db import query_table
DISKS_TO_IGNORE: tuple = ('sr', 'md', 'dm-', 'loop', 'zd')
NVME_TYPE: str = 'nvme'
HDD_TYPE: str = 'hdd'
@dataclass
class Disk:
id: str
identifier: str
name: str
serial: str | None = None
model: str | None = None
type: str | None = None
def parse_smartctl_for_temperature_output(json) -> Optional[int]:
return json['temperature']['current']
def get_disks_for_temperature_reading() -> Dict[str, Disk]:
disks = {}
for disk in query_table('storage_disk', prefix='disk_'):
if disk['serial'] != '' and bool(disk['togglesmart']):
disks[disk['serial']] = Disk(
id=disk['name'], identifier=disk['identifier'], serial=disk['serial'],
model=disk['model'], type=disk['type'], name=disk['name'],
)
return disks
def get_disks_temperatures(netdata_metrics) -> Dict[str, Optional[int]]:
disks = get_disks_for_temperature_reading()
temperatures = {}
for disk_temperature in filter(lambda k: 'smart_log' in k, netdata_metrics):
disk_name = disk_temperature.rsplit('.', 1)[-1]
value = netdata_metrics[disk_temperature]['dimensions'][disk_name]['value']
if disk_name.startswith('nvme'):
temperatures[disk_name] = value
else:
temperatures[disks[disk_name].id] = value
return temperatures
| 1,470 | Python | .py | 36 | 34.583333 | 86 | 0.643209 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,236 | iommu.py | truenas_middleware/src/middlewared/middlewared/utils/iommu.py | import collections
import contextlib
import os.path
import pathlib
import re
from .pci import get_pci_device_class, SENSITIVE_PCI_DEVICE_TYPES
RE_DEVICE_NAME = re.compile(r'(\w+):(\w+):(\w+).(\w+)')
def get_iommu_groups_info(get_critical_info: bool = False) -> dict[str, dict]:
addresses = collections.defaultdict(list)
final = dict()
with contextlib.suppress(FileNotFoundError):
for i in pathlib.Path('/sys/kernel/iommu_groups').glob('*/devices/*'):
if not i.is_dir() or not i.parent.parent.name.isdigit() or not RE_DEVICE_NAME.fullmatch(i.name):
continue
iommu_group = int(i.parent.parent.name)
dbs, func = i.name.split('.')
dom, bus, slot = dbs.split(':')
addresses[iommu_group].append({
'domain': f'0x{dom}',
'bus': f'0x{bus}',
'slot': f'0x{slot}',
'function': f'0x{func}',
})
final[i.name] = {
'number': iommu_group,
'addresses': addresses[iommu_group],
}
if get_critical_info:
final[i.name]['critical'] = any(
k.lower() in get_pci_device_class(os.path.join('/sys/bus/pci/devices', i.name))
for k in SENSITIVE_PCI_DEVICE_TYPES.keys()
)
return final
| 1,377 | Python | .py | 33 | 30.939394 | 108 | 0.553478 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,237 | type.py | truenas_middleware/src/middlewared/middlewared/utils/type.py | def copy_function_metadata(f, nf):
nf.__name__ = f.__name__
nf.__doc__ = f.__doc__
# Copy private attrs to new function so decorators can work on top of it
# e.g. _pass_app
for i in dir(f):
if i.startswith('__'):
continue
if i.startswith('_'):
setattr(nf, i, getattr(f, i))
for i in ["accepts", "returns", "audit", "audit_callback", "audit_extended", "roles", "new_style_accepts",
"new_style_returns"]:
if hasattr(f, i):
setattr(nf, i, getattr(f, i))
| 549 | Python | .py | 14 | 31.214286 | 110 | 0.543925 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,238 | socket.py | truenas_middleware/src/middlewared/middlewared/utils/socket.py | import socket
def is_socket_available(socket_path: str) -> bool:
"""Check if a Unix socket is available."""
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
s.connect(socket_path)
return True
except (socket.error, FileNotFoundError):
return False
finally:
s.close()
| 330 | Python | .py | 11 | 24.090909 | 57 | 0.656151 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,239 | debug.py | truenas_middleware/src/middlewared/middlewared/utils/debug.py | import inspect
import linecache
import sys
import traceback
import types
def get_frame_details(frame, logger):
if not isinstance(frame, types.FrameType):
return {}
cur_frame = {
'filename': frame.f_code.co_filename,
'lineno': frame.f_lineno,
'method': frame.f_code.co_name,
'line': linecache.getline(frame.f_code.co_filename, frame.f_lineno),
}
argspec = None
varargspec = None
keywordspec = None
_locals = {}
try:
arginfo = inspect.getargvalues(frame)
argspec = arginfo.args
if arginfo.varargs is not None:
varargspec = arginfo.varargs
temp_varargs = list(arginfo.locals[varargspec])
for i, arg in enumerate(temp_varargs):
temp_varargs[i] = '***'
arginfo.locals[varargspec] = tuple(temp_varargs)
if arginfo.keywords is not None:
keywordspec = arginfo.keywords
_locals.update(list(arginfo.locals.items()))
except Exception:
logger.critical('Error while extracting arguments from frames.', exc_info=True)
if argspec:
cur_frame['argspec'] = argspec
if varargspec:
cur_frame['varargspec'] = varargspec
if keywordspec:
cur_frame['keywordspec'] = keywordspec
if _locals:
try:
cur_frame['locals'] = {k: repr(v) for k, v in _locals.items()}
except Exception:
# repr() may fail since it may be one of the reasons
# of the exception
cur_frame['locals'] = {}
return cur_frame
def get_threads_stacks():
return {
thread_id: traceback.format_stack(frame)
for thread_id, frame in sys._current_frames().items()
}
| 1,743 | Python | .py | 51 | 26.411765 | 87 | 0.622394 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,240 | allowlist.py | truenas_middleware/src/middlewared/middlewared/utils/allowlist.py | import fnmatch
import re
from middlewared.api.current import HttpVerb
ALLOW_LIST_FULL_ADMIN = {'method': '*', 'resource': '*'}
class Allowlist:
def __init__(self, allowlist: list[dict]):
self.exact: dict[HttpVerb, set[str]] = {}
self.full_admin = ALLOW_LIST_FULL_ADMIN in allowlist
self.patterns: dict[HttpVerb, list[re.Pattern]] = {}
for entry in allowlist:
method = entry["method"]
resource = entry["resource"]
if "*" in resource:
self.patterns.setdefault(method, [])
self.patterns[method].append(re.compile(fnmatch.translate(resource)))
else:
self.exact.setdefault(method, set())
self.exact[method].add(resource)
def authorize(self, method: HttpVerb, resource: str):
return self._authorize_internal("*", resource) or self._authorize_internal(method, resource)
def _authorize_internal(self, method: HttpVerb, resource: str):
if (exact := self.exact.get(method)) and resource in exact:
return True
if patterns := self.patterns.get(method):
if any(pattern.match(resource) for pattern in patterns):
return True
return False
| 1,259 | Python | .py | 27 | 36.888889 | 100 | 0.624183 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,241 | privilege.py | truenas_middleware/src/middlewared/middlewared/utils/privilege.py | import enum
from middlewared.auth import TrueNasNodeSessionManagerCredentials
from middlewared.role import ROLES
class LocalAdminGroups(enum.IntEnum):
BUILTIN_ADMINISTRATORS = 544
def privilege_has_webui_access(privilege: dict) -> bool:
"""
This method determines whether the specified privilege is sufficient
to grant WebUI access. Current check is whether any of the roles for
the privilege entry are not builtin, where "builtin" means an
internal role that is used for defining access to particular methods
(as opposed to non-builtin ones that were developed explicitly for
assignment by administrators).
The actual check performed here may change at a future time if we
decide to add explicit `webui_access` flag to privilege.
Returns True if privilege grants webui access and False if it does not.
"""
return any(ROLES[role].builtin is False for role in privilege['roles'])
def credential_has_full_admin(credential: object) -> bool:
if credential.is_user_session and 'FULL_ADMIN' in credential.user['privilege']['roles']:
return True
if isinstance(credential, TrueNasNodeSessionManagerCredentials):
return True
if credential.allowlist is None:
return False
return credential.allowlist.full_admin
def credential_full_admin_or_user(
credential: object,
username: str
) -> bool:
if credential is None:
return False
elif credential_has_full_admin(credential):
return True
return credential.user['username'] == username
def app_credential_full_admin_or_user(
app: object,
username: str
) -> bool:
"""
Privilege check for whether credential has full admin privileges
or matches the specified username
Returns True on success and False on failure
Success:
* app is None - internal middleware call
* credential is a user session and has FULL_ADMIN role
* credential has a wildcard entry in allow list
* credential username matches `username` passed into this method
"""
if app is None:
return True
return credential_full_admin_or_user(app.authenticated_credentials, username)
def privileges_group_mapping(
privileges: list,
group_ids: list,
groups_key: str,
) -> dict:
allowlist = []
roles = set()
privileges_out = []
group_ids = set(group_ids)
for privilege in privileges:
if set(privilege[groups_key]) & group_ids:
allowlist.extend(privilege['allowlist'])
roles |= set(privilege['roles'])
privileges_out.append(privilege)
return {
'allowlist': allowlist,
'roles': list(roles),
'privileges': privileges_out
}
def credential_is_limited_to_own_jobs(credential: object | None) -> bool:
if credential is None or not credential.is_user_session:
return False
return not credential_has_full_admin(credential)
def credential_is_root_or_equivalent(credential: object | None) -> bool:
if credential is None or not credential.is_user_session:
return False
# SYS_ADMIN is set when user UID is 0 (root) or 950 (truenas_admin).
return 'SYS_ADMIN' in credential.user['account_attributes']
| 3,238 | Python | .py | 80 | 34.9 | 92 | 0.721547 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,242 | shell.py | truenas_middleware/src/middlewared/middlewared/utils/shell.py | # -*- coding=utf-8 -*-
import shlex
def join_commandline(args):
return " ".join(map(shlex.quote, args))
| 110 | Python | .py | 4 | 25 | 43 | 0.673077 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,243 | user_api_key.py | truenas_middleware/src/middlewared/middlewared/utils/user_api_key.py | import os
from base64 import b64encode
from dataclasses import dataclass
from struct import pack
from uuid import uuid4
from .tdb import (
TDBDataType,
TDBHandle,
TDBOptions,
TDBPathType,
)
PAM_TDB_DIR = '/var/run/pam_tdb'
PAM_TDB_FILE = os.path.join(PAM_TDB_DIR, 'pam_tdb.tdb')
PAM_TDB_DIR_MODE = 0o700
PAM_TDB_VERSION = 1
PAM_TDB_MAX_KEYS = 10 # Max number of keys per user. Also defined in pam_tdb.c
PAM_TDB_OPTIONS = TDBOptions(TDBPathType.CUSTOM, TDBDataType.BYTES)
@dataclass(frozen=True)
class UserApiKey:
expiry: int
dbid: int
userhash: str
@dataclass(frozen=True)
class PamTdbEntry:
keys: list[UserApiKey]
username: str
def _setup_pam_tdb_dir() -> None:
os.makedirs(PAM_TDB_DIR, mode=PAM_TDB_DIR_MODE, exist_ok=True)
os.chmod(PAM_TDB_DIR, PAM_TDB_DIR_MODE)
def _pack_user_api_key(api_key: UserApiKey) -> bytes:
"""
Convert UserApiKey object to bytes for TDB insertion.
This is packed struct with expiry converted into signed 64 bit
integer, the database id (32-bit unsigned), and the userhash (pascal string)
"""
if not isinstance(api_key, UserApiKey):
raise TypeError(f'{type(api_key)}: not a UserApiKey')
userhash = api_key.userhash.encode() + b'\x00'
return pack(f'<qI{len(userhash)}p', api_key.expiry, api_key.dbid, userhash)
def write_entry(hdl: TDBHandle, entry: PamTdbEntry) -> None:
"""
Convert PamTdbEntry object into a packed struct and insert
into tdb file.
key: username
value: uint32_t (version) + uint32_t (cnt of keys)
"""
if not isinstance(entry, PamTdbEntry):
raise TypeError(f'{type(entry)}: expected PamTdbEntry')
key_cnt = len(entry.keys)
if key_cnt > PAM_TDB_MAX_KEYS:
raise ValueError(f'{key_cnt}: count of entries exceeds maximum')
entry_bytes = pack('<II', PAM_TDB_VERSION, len(entry.keys))
parsed_cnt = 0
for key in entry.keys:
entry_bytes += _pack_user_api_key(key)
parsed_cnt += 1
# since we've already packed struct with array length
# we need to rigidly ensure we don't exceed it.
assert parsed_cnt == key_cnt
hdl.store(entry.username, b64encode(entry_bytes))
def flush_user_api_keys(pam_entries: list[PamTdbEntry]) -> None:
"""
Write a PamTdbEntry object to the pam_tdb file for user
authentication. This method first writes to temporary file
and then renames over pam_tdb file to ensure flush is atomic
and reduce risk of lock contention while under a transaction
lock.
raises:
TypeError - not PamTdbEntry
AssertionError - count of entries changed while generating
tdb payload
RuntimeError - TDB library error
"""
_setup_pam_tdb_dir()
if not isinstance(pam_entries, list):
raise TypeError('Expected list of PamTdbEntry objects')
tmp_path = os.path.join(PAM_TDB_DIR, f'tmp_{uuid4()}.tdb')
with TDBHandle(tmp_path, PAM_TDB_OPTIONS) as hdl:
hdl.keys_null_terminated = False
try:
for entry in pam_entries:
write_entry(hdl, entry)
except Exception:
os.remove(tmp_path)
raise
os.rename(tmp_path, PAM_TDB_FILE)
| 3,224 | Python | .py | 86 | 32.081395 | 80 | 0.690132 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,244 | threading.py | truenas_middleware/src/middlewared/middlewared/utils/threading.py | from concurrent.futures import Executor, Future, ThreadPoolExecutor
from itertools import count
import logging
import os
import threading
from .prctl import set_name
logger = logging.getLogger(__name__)
counter = count(1)
__all__ = ["set_thread_name", "start_daemon_thread", "IoThreadPoolExecutor", "io_thread_pool_executor"]
def set_thread_name(name):
set_name(name)
def start_daemon_thread(*args, **kwargs):
kwargs.setdefault("daemon", True)
if not kwargs["daemon"]:
raise ValueError("`start_daemon_thread` called with `daemon=False`")
t = threading.Thread(*args, **kwargs)
t.start()
return t
class IoThreadPoolExecutor(Executor):
def __init__(self):
self.thread_count = (20 if ((os.cpu_count() or 1) + 4) < 32 else 32) + 1
self.executor = ThreadPoolExecutor(
self.thread_count,
"IoThread",
initializer=lambda: set_thread_name("IoThread"),
)
def submit(self, fn, *args, **kwargs):
if len(self.executor._threads) == self.thread_count and self.executor._idle_semaphore._value - 1 <= 1:
fut = Future()
logger.trace("Calling %r in a single-use thread", fn)
start_daemon_thread(name=f"ExtraIoThread_{next(counter)}", target=worker, args=(fut, fn, args, kwargs))
return fut
return self.executor.submit(fn, *args, **kwargs)
def worker(fut, fn, args, kwargs):
set_thread_name("ExtraIoThread")
try:
fut.set_result(fn(*args, **kwargs))
except Exception as e:
fut.set_exception(e)
io_thread_pool_executor = IoThreadPoolExecutor()
| 1,628 | Python | .py | 40 | 34.525 | 115 | 0.664971 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,245 | rootfs.py | truenas_middleware/src/middlewared/middlewared/utils/rootfs.py | from dataclasses import dataclass
import contextlib
from functools import cached_property
import json
import os
import subprocess
from middlewared.utils.filesystem import stat_x
from middlewared.utils.mount import getmntinfo
@dataclass
class ReadonlyState:
initial: bool
current: bool
@dataclass
class Dataset:
name: str
mountpoint: str
readonly: ReadonlyState
@cached_property
def readonly_source(self):
return subprocess.run(
["zfs", "get", "-H", "-o", "source", "readonly", self.name],
capture_output=True,
check=True,
text=True,
).stdout.strip()
class ReadonlyRootfsManager:
def __init__(self, root="/"):
self.root = root
self.initialized = False
self.datasets: dict[str, Dataset] = {}
def __enter__(self):
return self
def make_writeable(self):
self._initialize()
self._set_state({
name: False
for name, dataset in self.datasets.items()
if dataset.readonly.current
})
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.initialized:
return
self._set_state({
name: dataset.readonly.initial
for name, dataset in self.datasets.items()
if dataset.readonly.current != dataset.readonly.initial
})
def _initialize(self):
if self.initialized:
return
with open(os.path.join(self.root, "conf/truenas_root_ds.json"), "r") as f:
conf = json.loads(f.read())
usr_ds = next((i for i in conf if i["fhs_entry"]["name"] == "usr"))["ds"]
for dataset, name in [
("", usr_ds.rsplit("/", 1)[0]),
("usr", usr_ds),
]:
mountpoint = "/".join(filter(None, (self.root, dataset)))
st_mnt_id = stat_x.statx(mountpoint).stx_mnt_id
readonly = "RO" in getmntinfo(mnt_id=st_mnt_id)[st_mnt_id]["super_opts"]
self.datasets[dataset] = Dataset(name, mountpoint, ReadonlyState(readonly, readonly))
self.initialized = True
def _set_state(self, state: dict[str, bool]):
if state.get("usr") is True:
self._handle_usr(True)
for name, readonly in state.items():
# Do not change `readonly` property when we're running in the installer, and it was not set yet
if self.datasets[name].readonly_source != "local":
continue
subprocess.run(
["zfs", "set", f"readonly={'on' if readonly else 'off'}", self.datasets[name].name],
capture_output=True,
check=True,
text=True,
)
self.datasets[name].readonly.current = readonly
if state.get("usr") is False:
self._handle_usr(False)
def _handle_usr(self, readonly):
binaries = (
# Used in `nvidia.install`
"apt",
"apt-config",
"apt-key",
# Some initramfs scripts use `dpkg --print-architecture` or similar calls
"dpkg",
)
if readonly:
for binary in binaries:
os.chmod(os.path.join(self.root, f"usr/bin/{binary}"), 0o644)
with contextlib.suppress(FileNotFoundError):
os.rename(os.path.join(self.root, f"usr/local/bin/{binary}.bak"),
os.path.join(self.root, f"usr/local/bin/{binary}"))
else:
for binary in binaries:
os.chmod(os.path.join(self.root, f"usr/bin/{binary}"), 0o755)
with contextlib.suppress(FileNotFoundError):
os.rename(os.path.join(self.root, f"usr/local/bin/{binary}"),
os.path.join(self.root, f"usr/local/bin/{binary}.bak"))
| 3,873 | Python | .py | 99 | 28.69697 | 107 | 0.571086 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,246 | os.py | truenas_middleware/src/middlewared/middlewared/utils/os.py | import logging
import os
import resource
logger = logging.getLogger(__name__)
__all__ = ['close_fds']
def close_fds(low_fd, max_fd=None):
if max_fd is None:
max_fd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
# Avoid infinity as thats not practical
if max_fd == resource.RLIM_INFINITY:
max_fd = 8192
os.closerange(low_fd, max_fd)
| 382 | Python | .py | 12 | 26.75 | 62 | 0.665753 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,247 | mount.py | truenas_middleware/src/middlewared/middlewared/utils/mount.py | import os
import logging
logger = logging.getLogger(__name__)
__all__ = ["getmntinfo", "getmnttree"]
def __mntent_dict(line):
mnt_id, parent_id, maj_min, root, mp, opts, extra = line.split(" ", 6)
fstype, mnt_src, super_opts = extra.split(' - ')[1].split()
major, minor = maj_min.split(':')
devid = os.makedev(int(major), int(minor))
return {
'mount_id': int(mnt_id),
'parent_id': int(parent_id),
'device_id': {
'major': int(major),
'minor': int(minor),
'dev_t': devid,
},
'root': root.replace('\\040', ' '),
'mountpoint': mp.replace('\\040', ' '),
'mount_opts': opts.upper().split(','),
'fs_type': fstype,
'mount_source': mnt_src.replace('\\040', ' '),
'super_opts': super_opts.upper().split(','),
}
def __parse_to_dev(line, out_dict):
entry = __mntent_dict(line)
out_dict.update({entry['device_id']['dev_t']: entry})
def __parse_to_mnt_id(line, out_dict):
entry = __mntent_dict(line)
out_dict.update({entry['mount_id']: entry})
def __create_tree(info, mount_id):
root_id = None
for entry in info.values():
if not entry.get('children'):
entry['children'] = []
if entry['parent_id'] == 1:
root_id = entry['mount_id']
continue
parent = info[entry['parent_id']]
if not parent.get('children'):
parent['children'] = [entry]
else:
parent['children'].append(entry)
return info[mount_id or root_id]
def __iter_mountinfo(dev_id=None, mnt_id=None, callback=None, private_data=None):
if dev_id:
maj_min = f'{os.major(dev_id)}:{os.minor(dev_id)}'
else:
maj_min = None
if mnt_id:
mount_id = f'{mnt_id} '
with open('/proc/self/mountinfo') as f:
for line in f:
if maj_min:
if line.find(maj_min) == -1:
continue
callback(line, private_data)
break
elif mnt_id is not None:
if not line.startswith(mount_id):
continue
callback(line, private_data)
break
callback(line, private_data)
def getmntinfo(dev_id=None, mnt_id=None):
"""
Get mount information. Takes the following arguments for faster lookup of
information for a mounted filesystem.
`dev_id` - the device ID of the mounted filesystem of interest. This will
uniquely identify the filesystem, but not uniquely identify the mount point.
If specified results are a dictionary indexed by dev_t.
`mnt_id` - specify the unique ID for the mount. This is unique only for the
lifetime of the mount. statx() may be used to retrieve the mnt_id for a given
path or open file. If specified results are a dictionary indexed by mnt_id.
Each result entry contains the following keys (from proc(5)):
`mount_id` - unique id for a mount (may be reused after umount(2))
`parent_id` - mount_id of the parent mount. A parent_id of `1` indicates the
root of the mount tree.
`device_id` - dictionary containing the value of `st_dev` for files in this
filesystem.
`root` - the pathname of the directory in the filesystem which forms the
root of this mount.
`mountpoint` - the pathname of the mountpoint relative to the root directory.
`mount_opts` - per-mount options (see mount(2)).
`fstype` - the filesystem type.
`mount_source` - filesystem-specific information or "none". In case of ZFS
this contains dataset name.
`super_opts` - per-superblock options (see mount(2)).
"""
info = {}
if mnt_id:
__iter_mountinfo(mnt_id=mnt_id, callback=__parse_to_mnt_id, private_data=info)
else:
__iter_mountinfo(dev_id=dev_id, callback=__parse_to_dev, private_data=info)
return info
def getmnttree(mount_id=None):
"""
Generate a mount info tree of either the root filesystem or a given
filesystem specified by mnt_id. cf. documentation for getmntinfo().
"""
info = {}
__iter_mountinfo(callback=__parse_to_mnt_id, private_data=info)
return __create_tree(info, mount_id)
| 4,254 | Python | .py | 103 | 33.660194 | 86 | 0.614692 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,248 | syslog.py | truenas_middleware/src/middlewared/middlewared/utils/syslog.py | import os
import syslog
import uuid
from middlewared.utils.time_utils import utc_now
def syslog_message(message):
data = f'<{syslog.LOG_USER | syslog.LOG_INFO}>'
data += f'{utc_now().strftime("%b %d %H:%M:%S")} '
data += 'TNAUDIT_MIDDLEWARE: '
data += message
data = data.encode('ascii', 'ignore')
return data
| 342 | Python | .py | 11 | 27.181818 | 54 | 0.668731 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,249 | mako.py | truenas_middleware/src/middlewared/middlewared/utils/mako.py | # -*- coding=utf-8 -*-
import logging
import os
from mako.lookup import TemplateLookup
logger = logging.getLogger(__name__)
__all__ = ["get_template"]
lookup = TemplateLookup(
directories=[os.path.dirname(os.path.dirname(__file__))],
module_directory="/run/mako",
imports=["from middlewared.utils.mako_filters import indent, json, markdown"]
)
def get_template(name):
return lookup.get_template(name)
| 423 | Python | .py | 13 | 29.846154 | 81 | 0.730198 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,250 | audit.py | truenas_middleware/src/middlewared/middlewared/utils/audit.py | from middlewared.auth import (
TokenSessionManagerCredentials,
TrueNasNodeSessionManagerCredentials
)
# Special values start with dot to ensure they cannot collide with local usernames
# created via APIs
API_KEY_PREFIX = '.API_KEY:'
NODE_SESSION = '.TRUENAS_NODE'
UNAUTHENTICATED = '.UNAUTHENTICATED'
UNKNOWN_SESSION = '.UNKNOWN'
def audit_username_from_session(cred) -> str:
if cred is None:
return UNAUTHENTICATED
# This works for regular user session and tokens formed on them
if cred.is_user_session:
return cred.user['username']
# Track back to root credential if necessary (token session)
if isinstance(cred, TokenSessionManagerCredentials):
cred = cred.root_credentials
elif isinstance(cred, TrueNasNodeSessionManagerCredentials):
return NODE_SESSION
return UNKNOWN_SESSION
| 856 | Python | .py | 22 | 34.5 | 82 | 0.762999 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,251 | string.py | truenas_middleware/src/middlewared/middlewared/utils/string.py | def make_sentence(s):
if not s:
return s
if any(s.endswith(c) for c in (".", "!", "?")):
return s
return f"{s}."
| 143 | Python | .py | 6 | 17.833333 | 51 | 0.481481 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,252 | crypto.py | truenas_middleware/src/middlewared/middlewared/utils/crypto.py | from base64 import b64encode
from hashlib import pbkdf2_hmac
from secrets import choice, compare_digest, token_urlsafe, token_hex
from string import ascii_letters, digits, punctuation
from cryptit import cryptit
from samba.crypto import md4_hash_blob
def generate_string(string_size=8, punctuation_chars=False, extra_chars=None):
"""
Generate a cryptographically secure random string of size `string_size`.
If `punctuation_chars` is True, then punctuation characters will be added to the string.
Otherwise, only ASCII (upper and lower) and digits (0-9) are used to generate the string.
"""
initial_string = ascii_letters + digits
if punctuation_chars:
initial_string += punctuation
if extra_chars is not None and isinstance(extra_chars, str):
initial_string += extra_chars
# remove any duplicates since extra_chars is user-provided
initial_string = ''.join(set(initial_string))
return ''.join(choice(initial_string) for i in range(string_size))
def generate_token(size, url_safe=False):
"""
Generate a cryptographically secure token of `size` in bytes returned in hex format.
`url_safe` when True, returns the token using url safe characters only.
"""
if url_safe:
return token_urlsafe(size)
else:
return token_hex(size)
def sha512_crypt(word):
"""Generate a hash using the modular crypt format of `word`
using SHA512 algorithm with rounds set to 656,000 with a
16-char pseudo-random cryptographically secure salt.
"""
sha512_prefix = '$6'
rounds = 656_000
salt_length = 16
salt = generate_string(string_size=salt_length, extra_chars='./')
settings = f'{sha512_prefix}$rounds={rounds}${salt}'
# note this is thread-safe and releases GIL
return cryptit(word, settings)
def check_unixhash(passwd, unixhash):
"""Verify that the hash produced by `passwd` matches the
given `unixhash`.
"""
return compare_digest(cryptit(passwd, unixhash), unixhash)
def generate_nt_hash(passwd):
"""
Generate an NT hash for SMB user password. This is required for
NTLM authentication for local users.
NOTE: the library generating the NT hash ignores the system
FIPS mode.
WARNING: This is a weak algorithm and must be treated as
plain-text equivalent.
"""
md4_hash_bytes = md4_hash_blob(passwd.encode('utf-16le'))
return md4_hash_bytes.hex().upper()
def generate_pbkdf2_512(passwd):
"""
Generate a pbkdf2_sha512 hash for password. This is used for
verification of API keys.
"""
prefix = 'pbkdf2-sha512'
rounds = 500000
salt_length = 16
salt = generate_string(string_size=salt_length, extra_chars='./').encode()
hash = pbkdf2_hmac('sha512', passwd.encode(), salt, rounds)
return f'${prefix}${rounds}${b64encode(salt).decode()}${b64encode(hash).decode()}'
| 2,890 | Python | .py | 68 | 37.705882 | 93 | 0.716476 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,253 | mako_filters.py | truenas_middleware/src/middlewared/middlewared/utils/mako_filters.py | import json as _json
import textwrap
from markdown import markdown as _markdown
from markdown.extensions.codehilite import CodeHiliteExtension
from markdown.extensions.tables import TableExtension
def indent(value):
return textwrap.indent(value, " " * 8)
def json(value):
return _json.dumps(value, indent=True)
def markdown(value):
if not value:
return value
return _markdown(value, extensions=[CodeHiliteExtension(noclasses=True),
TableExtension()])
| 522 | Python | .py | 14 | 31.214286 | 76 | 0.728543 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,254 | cpu.py | truenas_middleware/src/middlewared/middlewared/utils/cpu.py | import functools
import psutil
import re
from collections import defaultdict
AMD_PREFER_TDIE = (
# https://github.com/torvalds/linux/blob/master/drivers/hwmon/k10temp.c#L121
# static const struct tctl_offset tctl_offset_table[] = {
'AMD Ryzen 5 1600X',
'AMD Ryzen 7 1700X',
'AMD Ryzen 7 1800X',
'AMD Ryzen 7 2700X',
'AMD Ryzen Threadripper 19',
'AMD Ryzen Threadripper 29',
)
RE_CORE = re.compile(r'^Core ([0-9]+)$')
RE_CPU_MODEL = re.compile(r'^model name\s*:\s*(.*)', flags=re.M)
@functools.cache
def cpu_info() -> dict:
return {
'cpu_model': get_cpu_model(),
'core_count': psutil.cpu_count(logical=True),
'physical_core_count': psutil.cpu_count(logical=False),
}
def get_cpu_model():
with open('/proc/cpuinfo', 'r') as f:
model = RE_CPU_MODEL.search(f.read())
return model.group(1) if model else None
def generic_cpu_temperatures(cpu_metrics: dict) -> dict:
temperatures = defaultdict(dict)
for chip_name in filter(lambda sen: sen.startswith('coretemp-isa'), cpu_metrics):
for temp in cpu_metrics[chip_name].values():
if not (m := RE_CORE.match(temp['name'])):
continue
temperatures[chip_name][int(m.group(1))] = temp['value']
return dict(enumerate(sum(
[
[temperatures[chip][core] for core in sorted(temperatures[chip].keys())]
for chip in sorted(temperatures.keys())
],
[],
)))
def amd_cpu_temperatures(amd_metrics: dict) -> dict:
cpu_model = cpu_info()['cpu_model']
core_count = cpu_info()['physical_core_count']
amd_sensors = {}
for amd_sensor in amd_metrics.values():
amd_sensors[amd_sensor['name']] = amd_sensor['value']
ccds = []
for k, v in amd_sensors.items():
if k.startswith('Tccd') and v:
if isinstance(v, (int, float)):
ccds.append(v)
has_tdie = (
'Tdie' in amd_sensors and amd_sensors['Tdie'] and isinstance(amd_sensors['Tdie'], (int, float))
)
if cpu_model.startswith(AMD_PREFER_TDIE) and has_tdie:
return dict(enumerate([amd_sensors['Tdie']] * core_count))
elif ccds and core_count % len(ccds) == 0:
return dict(enumerate(sum([[t] * (core_count // len(ccds)) for t in ccds], [])))
elif has_tdie:
return dict(enumerate([amd_sensors['Tdie']] * core_count))
elif (
'Tctl' in amd_sensors and amd_sensors['Tctl'] and isinstance(amd_sensors['Tctl'], (int, float))
):
return dict(enumerate([amd_sensors['Tctl']] * core_count))
elif 'temp1' in amd_sensors:
if isinstance(amd_sensors['temp1'], float):
return dict(enumerate([amd_sensors['temp1']] * core_count))
elif 'temp1_input' in amd_sensors['temp1']:
return dict(enumerate([amd_sensors['temp1']['temp1_input']] * core_count))
| 2,888 | Python | .py | 70 | 34.585714 | 103 | 0.623529 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,255 | disks.py | truenas_middleware/src/middlewared/middlewared/utils/disks.py | import os
import re
import pyudev
DISKS_TO_IGNORE = ('sr', 'md', 'dm-', 'loop', 'zd')
RE_IS_PART = re.compile(r'p\d{1,3}$')
# sda, vda, nvme0n1 but not sda1/vda1/nvme0n1p1
VALID_WHOLE_DISK = re.compile(r'^sd[a-z]+$|^vd[a-z]+$|^nvme\d+n\d+$')
def safe_retrieval(prop, key, default, as_int=False):
value = prop.get(key)
if value is not None:
if isinstance(value, bytes):
value = value.strip().decode()
else:
value = value.strip()
return value if not as_int else int(value)
return default
def get_disk_serial_from_block_device(block_device: pyudev.Device) -> str:
return (
safe_retrieval(block_device.properties, 'ID_SCSI_SERIAL', '') or
safe_retrieval(block_device.properties, 'ID_SERIAL_SHORT', '') or
safe_retrieval(block_device.properties, 'ID_SERIAL', '')
)
def valid_zfs_partition_uuids():
# https://salsa.debian.org/debian/gdisk/blob/master/parttypes.cc for valid zfs types
# 516e7cba was being used by freebsd and 6a898cc3 is being used by linux
return (
'6a898cc3-1dd2-11b2-99a6-080020736631',
'516e7cba-6ecf-11d6-8ff8-00022d09712b',
)
def dev_to_ident(name, sys_disks):
"""Map a disk device (i.e. sda5) to its respective "identifier"
(i.e. "{serial_lunid}AAAA_012345")"""
try:
dev = sys_disks[name]
except KeyError:
return ''
else:
if dev['serial_lunid']:
return f'{{serial_lunid}}{dev["serial_lunid"]}'
elif dev['serial']:
return f'{{serial}}{dev["serial"]}'
elif dev.get('parts'):
for part in filter(lambda x: x['partition_type'] in valid_zfs_partition_uuids(), dev['parts']):
return f'{{uuid}}{part["partition_uuid"]}'
return f'{{devicename}}{name}'
def get_disk_names() -> list[str]:
"""
NOTE: The return of this method should match the keys retrieve when running `self.get_disks`.
"""
disks = []
with os.scandir('/dev') as sdir:
for i in filter(lambda x: VALID_WHOLE_DISK.match(x.name), sdir):
disks.append(i.name)
return disks
def get_disks_with_identifiers(
disks_identifier_required: list[str] | None = None, block_devices_data: dict[str, dict] | None = None,
) -> dict[str, str]:
disks = {}
available_disks = get_disk_names()
disks_identifier_required = disks_identifier_required or available_disks
block_devices_data = block_devices_data or {}
context = pyudev.Context()
for disk_name in disks_identifier_required:
if disk_name not in available_disks:
continue
if block_device_data := block_devices_data.get(disk_name, {}):
identifier = dev_to_ident(disk_name, block_devices_data)
if not identifier.startswith('{devicename}'):
disks[disk_name] = identifier
continue
# If we had cached data but we still end up here, it means we still need to try the partitions check
# and see if we can use that as an identifier
try:
# Retrieve the device directly by name
block_device = pyudev.Devices.from_name(context, 'block', disk_name)
if block_device_data:
serial, lunid = block_device_data['serial'], block_device_data['lunid']
else:
serial = get_disk_serial_from_block_device(block_device)
lunid = safe_retrieval(block_device.properties, 'ID_WWN', '').removeprefix('0x').removeprefix('eui.')
parts = []
for partition in filter(
lambda p: all(p.get(k) for k in ('ID_PART_ENTRY_TYPE', 'ID_PART_ENTRY_UUID')), block_device.children
):
parts.append({
'partition_type': partition['ID_PART_ENTRY_TYPE'],
'partition_uuid': partition['ID_PART_ENTRY_UUID'],
})
except pyudev.DeviceNotFoundError:
block_device_data = {
'serial': '',
'lunid': '',
'serial_lunid': '',
'parts': [],
} | block_device_data
else:
block_device_data = {
'serial': serial,
'lunid': lunid or None,
'serial_lunid': f'{serial}_{lunid}' if serial and lunid else None,
'parts': parts,
}
disks[disk_name] = dev_to_ident(disk_name, {disk_name: block_device_data})
return disks
| 4,510 | Python | .py | 104 | 34.038462 | 117 | 0.594434 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,256 | asyncio_.py | truenas_middleware/src/middlewared/middlewared/utils/asyncio_.py | import asyncio
async def asyncio_map(func, arguments, limit=None, *, semaphore=None):
if limit is not None and semaphore is not None:
raise ValueError("`limit` and `semaphore` can not be specified simultaneously")
if limit is not None or semaphore is not None:
if semaphore is None:
semaphore = asyncio.BoundedSemaphore(limit)
real_func = func
async def func(arg):
async with semaphore:
return await real_func(arg)
futures = [func(arg) for arg in arguments]
return await asyncio.gather(*futures)
| 590 | Python | .py | 13 | 37.153846 | 87 | 0.674256 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,257 | procfs.py | truenas_middleware/src/middlewared/middlewared/utils/network_/procfs.py | from codecs import decode
from dataclasses import dataclass
from socket import inet_ntop, AF_INET, AF_INET6
from struct import pack, unpack
__all__ = ('read_proc_net',)
@dataclass(slots=True, frozen=True)
class InetInfoEntry:
local_ip: str
local_port: int
remote_ip: str
remote_port: int
protocol: str
def hex_to_ipv6(hex_addr):
""" hex address to standard IPv6 format
Process:
Convert hex to binary
Unpack into 4 32-bit integers in network byte order
Pack as 4 32-bit integers in native byte order
Use inet_ntop (standard network API) to format the address
"""
addr = decode(hex_addr, "hex")
addr = unpack('!LLLL', addr)
addr = pack('@IIII', *addr)
addr = inet_ntop(AF_INET6, addr)
return addr
def hex_to_ipv4(hex_addr):
""" hex address to standard IPv4 format
Process:
Convert hex to binary (decode and unpack)
Pack 32-bit integer in native byte order
Use inet_ntop (standard network API) to format address
"""
addr = int(hex_addr, 16)
addr = pack("=L", addr)
addr = inet_ntop(AF_INET, addr)
return addr
def parse_address(hex_address, ipversion):
ip_hex, port_hex = hex_address.split(':')
if ipversion == '4':
ip = hex_to_ipv4(ip_hex)
else:
ip = hex_to_ipv6(ip_hex)
port = int(port_hex, 16)
return ip, port
def read_proc_net(local_port=None, remote_port=None) -> InetInfoEntry | list[InetInfoEntry]:
"""Parse the /proc/net/{tcp/udp(6)} directories from
procfs and gather the local and remote ip/ports connected
to the system.
"""
info = list()
port_specified = any((local_port is not None, remote_port is not None))
for prot in ('tcp', 'tcp6', 'udp', 'udp6'):
with open(f'/proc/net/{prot}', 'r') as f:
ipversion = '6' if prot[-1] == '6' else '4'
for _, line in filter(lambda x: x[0] > 1, enumerate(f, start=1)):
columns = line.split()
lip, lp = parse_address(columns[1], ipversion)
rip, rp = parse_address(columns[2], ipversion)
if port_specified:
if any((
local_port is not None and local_port == lp,
remote_port is not None and remote_port == rp,
)):
return InetInfoEntry(lip, lp, rip, rp, prot)
else:
info.append(InetInfoEntry(lip, lp, rip, rp, prot))
return info
| 2,528 | Python | .py | 67 | 29.880597 | 92 | 0.602449 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,258 | attrs.py | truenas_middleware/src/middlewared/middlewared/utils/filesystem/attrs.py | # Get and set ZFS file attributes
#
# ZFS supports various file-level attributes that are not accessible
# through normal linux filesystem APIs (for example DOS-related attributes).
#
# These utility functions allow getting and setting them
#
# NOTE: tests for parsers are in src/middlewared/middlewared/pytest/unit/utils/test_filesystem_misc.py
# Additional testing for ZFS is covered in tests/api2
import enum
import fcntl
import os
import struct
ZFS_IOC_GETATTRS = 0x80088301
ZFS_IOC_SETATTRS = 0x40088302
class ZFSAttr(enum.IntFlag):
"""
Additional file-level attributes that are stored in upper-half of zfs
z_pflags. See include/sys/fs/zfs.h
NOTE: these are only available on ZFS filesystems
"""
READONLY = 0x0000000100000000
HIDDEN = 0x0000000200000000
SYSTEM = 0x0000000400000000
ARCHIVE = 0x0000000800000000
IMMUTABLE = 0x0000001000000000
NOUNLINK = 0x0000002000000000
APPENDONLY = 0x0000004000000000
NODUMP = 0x0000008000000000
OPAQUE = 0x0000010000000000
AV_QUARANTINED = 0x0000020000000000
AV_MODIFIED = 0x0000040000000000
REPARSE = 0x0000080000000000
OFFLINE = 0x0000100000000000
SPARSE = 0x0000200000000000
SUPPORTED_ATTRS = (
ZFSAttr.READONLY |
ZFSAttr.HIDDEN |
ZFSAttr.SYSTEM |
ZFSAttr.ARCHIVE |
ZFSAttr.IMMUTABLE |
ZFSAttr.NOUNLINK |
ZFSAttr.APPENDONLY |
ZFSAttr.OFFLINE |
ZFSAttr.SPARSE
)
def zfs_attributes_dump(attr_mask: int) -> list:
"""
Convert bitmask of supported ZFS attributes to list
"""
attr_mask = attr_mask & int(SUPPORTED_ATTRS)
out = []
for attr in ZFSAttr:
if attr_mask & int(attr):
out.append(attr.name)
return out
def zfs_attributes_to_dict(attr_mask: int) -> dict:
"""
Convert bitmask of supported ZFS attributes to dict.
"""
attr_mask = attr_mask & int(SUPPORTED_ATTRS)
out = {}
for attr in SUPPORTED_ATTRS:
out[attr.name.lower()] = bool(attr_mask & int(attr))
return out
def dict_to_zfs_attributes_mask(attr_dict: dict) -> int:
"""
Convert dictionary specification of ZFS attributes to bitmask
for setting on file.
"""
attr_mask = 0
for attr, value in attr_dict.items():
zfs_attr = ZFSAttr[attr.upper()]
if SUPPORTED_ATTRS & zfs_attr == 0:
raise ValueError(f'{attr}: invalid ZFS file attribute')
if not isinstance(value, bool):
raise TypeError(f'{attr}: value [{value}] must be boolean')
if value is not True:
continue
attr_mask |= zfs_attr
return int(attr_mask)
def zfs_attributes_to_mask(attr_list: list) -> int:
"""
Convert ZFS attribute list to bitmask for setting
"""
attr_mask = 0
for attr in attr_list:
zfs_attr = ZFSAttr[attr]
if SUPPORTED_ATTRS & zfs_attr == 0:
raise ValueError(f'{attr}: invalid ZFS file attribute')
attr_mask |= ZFSAttr[attr]
return int(attr_mask)
def fget_zfs_file_attributes(fd: int) -> int:
"""
Get bitmask of zfs atttributes on open file.
Note: `fd` may not be an O_PATH open (READ access will be checked).
"""
fl = struct.unpack('L', fcntl.ioctl(fd, ZFS_IOC_GETATTRS, struct.pack('L', 0)))
if not fl:
raise RuntimeError('Unable to retrieve zfs file attributes')
return fl[0]
def fset_zfs_file_attributes(fd: int, attr_mask: int) -> int:
"""
Set zfs attributes on open file using mask of ZFSAttrs above.
`fd` must writeable
NOTE: zfs attributes will be set _precisely_ as specified in the attr_mask
If desire is to simply toggle one attribute it is simpler to use
`set_zfs_file_attributes` below.
"""
fcntl.ioctl(fd, ZFS_IOC_SETATTRS, struct.pack('L', attr_mask))
return fget_zfs_file_attributes(fd)
def set_zfs_file_attributes_dict(path: str, attrs: dict) -> dict:
"""
Set zfs file attributes on a given `path` by using the dictionary `attrs`
Supported keys are lower-case names of SUPPORTED_ATTRS. If a supported
key is omitted from the `attrs` payload then its current value is preserved.
dictionary entries are of form "<attribute>" = <boolean value>
When operation succeeds a dictionary will be returned with current values
of attributes on the file.
NOTE: if caller is concerned about TOCTOU issues with path lookups, then a
procfd path ("/proc/self/fd/<fd>") with an already-open fd may be used in lieu
of a regular filesystem path.
"""
open_flags = os.O_DIRECTORY if os.path.isdir(path) else os.O_RDWR
fd = os.open(path, open_flags)
try:
current = zfs_attributes_to_dict(fget_zfs_file_attributes(fd))
to_set = current | attrs
# avoid issuing ioctl to set new attrs if we aren't changing anything
if to_set == current:
new_attrs = None
else:
new_attrs = fset_zfs_file_attributes(fd, dict_to_zfs_attributes_mask(to_set))
finally:
os.close(fd)
if new_attrs is None:
return current
return zfs_attributes_to_dict(new_attrs)
| 5,122 | Python | .py | 138 | 31.586957 | 102 | 0.690828 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,259 | constants.py | truenas_middleware/src/middlewared/middlewared/utils/filesystem/constants.py | import enum
AT_FDCWD = -100 # special fd value meaning current working directory
class FileType(enum.Enum):
DIRECTORY = enum.auto()
FILE = enum.auto()
SYMLINK = enum.auto()
OTHER = enum.auto()
class ZFSCTL(enum.IntEnum):
# from include/os/linux/zfs/sys/zfs_ctldir.h in ZFS repo
INO_ROOT = 0x0000FFFFFFFFFFFF
INO_SNAPDIR = 0x0000FFFFFFFFFFFD
| 375 | Python | .py | 11 | 30.090909 | 69 | 0.724234 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,260 | utils.py | truenas_middleware/src/middlewared/middlewared/utils/filesystem/utils.py | # This file provides various utilities that don't fit cleanly
# into specific categories of filesystem areas.
#
# timespec_convert_float() has test coverage via stat_x util tests
# timespec_convert_int() has test coverage via copytree util tests
# path_in_ctldir() has test coverage via api tests for filesystem.stat
# and filesystem.listdir methods since it requires access to zpool.
from .constants import ZFSCTL
from pathlib import Path
def path_in_ctldir(path_in):
"""
Determine whether the given path is located within the ZFS
ctldir. The intention for this is to determine whether a given
path is inside a ZFS snapshot so that we can raise meaningful
validation errors in situations like the user trying to set
permissions on a file in a snapshot directory.
"""
path = Path(path_in)
if not path.is_absolute():
raise ValueError(f'{path_in}: not an absolute path')
is_in_ctldir = False
while path.as_posix() != '/':
if not path.name == '.zfs':
path = path.parent
continue
if path.stat().st_ino == ZFSCTL.INO_ROOT:
is_in_ctldir = True
break
path = path.parent
return is_in_ctldir
def timespec_convert_float(timespec):
"""
Convert a timespec struct into float. This is for use where
ctype function returns timespec (for example statx())
"""
return timespec.tv_sec + timespec.tv_nsec / 1000000000
def timespec_convert_int(timespec):
"""
Convert a timespec struct into int. This is suitable for
when a timespec needs to be passed to os.utime()
"""
return timespec.tv_sec * 1000000000 + timespec.tv_nsec
| 1,682 | Python | .py | 42 | 34.785714 | 70 | 0.704113 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,261 | copy.py | truenas_middleware/src/middlewared/middlewared/utils/filesystem/copy.py | # Various utilities related to copying / cloning files and file tree
# test coverage provided by pytest/unit/utils/test_copytree.py
import enum
import os
from dataclasses import dataclass
from errno import EXDEV
from middlewared.job import Job
from os import open as posix_open
from os import (
close,
copy_file_range,
fchmod,
fchown,
fstat,
getxattr,
listxattr,
lseek,
makedev,
mkdir,
path,
readlink,
sendfile,
setxattr,
stat_result,
symlink,
utime,
O_CREAT,
O_DIRECTORY,
O_EXCL,
O_NOFOLLOW,
O_RDONLY,
O_RDWR,
O_TRUNC,
SEEK_CUR,
)
from shutil import copyfileobj
from stat import S_IMODE
from .acl import ACCESS_ACL_XATTRS, ACL_XATTRS
from .directory import (
dirent_struct,
DirectoryIterator,
DirectoryRequestMask,
)
from .stat_x import StatxEtype
from .utils import path_in_ctldir, timespec_convert_int
CLONETREE_ROOT_DEPTH = 0
MAX_RW_SZ = 2147483647 & ~4096 # maximum size of read/write in kernel
class CopyFlags(enum.IntFlag):
""" Flags specifying which metadata to copy from source to destination """
XATTRS = 0x0001 # copy user, trusted, security namespace xattrs
PERMISSIONS = 0x0002 # copy ACL xattrs
TIMESTAMPS = 0x0004 # copy ACL timestamps
OWNER = 0x0008
class CopyTreeOp(enum.Enum):
"""
Available options for customizing the method by which files are copied. DEFAULT
is generally the best option (prefer to do a block clone and use zero-copy method
otherwise).
USERSPACE should be used for certain types of special filesystems such as procfs
or sysfs that may not properly support copy_file_range or sendfile.
"""
DEFAULT = enum.auto() # try clone and fallthrough eventually to userspace
CLONE = enum.auto() # attempt to block clone and if that fails, fail operation
SENDFILE = enum.auto() # attempt sendfile (with fallthrough to copyfileobj)
USERSPACE = enum.auto() # same as shutil.copyfileobj
DEF_CP_FLAGS = CopyFlags.XATTRS | CopyFlags.PERMISSIONS | CopyFlags.OWNER | CopyFlags.TIMESTAMPS
@dataclass(frozen=True, slots=True)
class CopyTreeConfig:
"""
Configuration for copytree() operation.
job: middleware Job object. This is optional and may be passed if the API user
wants to report via job.set_progress
job_msg_prefix: prefix for progress messages
job_msg_inc: call set_progress every N files + dirs copied
raise_error: raise exceptions on metadata copy failures
exist_ok: do not raise an exception if a file or directory already exists
traverse: recurse into child datasets
op: copy tree operation that will be performed (see CopyTreeOp class)
flags: bitmask of metadata to preserve as part of copy
"""
job: Job | None = None
job_msg_prefix: str = ''
job_msg_inc: int = 1000
raise_error: bool = True
exist_ok: bool = True
traverse: bool = False
op: CopyTreeOp = CopyTreeOp.DEFAULT
flags: CopyFlags = DEF_CP_FLAGS # flags specifying which metadata to copy
@dataclass(slots=True)
class CopyTreeStats:
dirs: int = 0
files: int = 0
symlinks: int = 0
bytes: int = 0
def _copytree_conf_to_dir_request_mask(config: CopyTreeConfig) -> DirectoryRequestMask:
""" internal method to convert CopyTreeConfig to a DirectoryRequestMask """
mask_out = 0
if config.flags.value & CopyFlags.XATTRS.value:
mask_out |= DirectoryRequestMask.XATTRS
if config.flags.value & CopyFlags.PERMISSIONS.value:
# XATTR list is required for getting preserving ACLs
mask_out |= DirectoryRequestMask.ACL | DirectoryRequestMask.XATTRS
return mask_out
def copy_permissions(src_fd: int, dst_fd: int, xattr_list: list[str], mode: int) -> None:
""" Copy permissions from one file to another.
Params:
src_fd: source file
dst_fd: destination file
xattr_list: list of all xattrs on src_fd
mode: POSIX mode of src_fd
Returns:
None
Raises:
PermissionError: was forced to try to fchmod to set permissions, but destination already
inherited an ACL and has a RESTRICTED ZFS aclmode.
OSError - EOPNOTSUPP: ACL type mismatch between src_fd and dst_fd
OSError: various errnos for reasons specified in syscall manpages for fgetxattr,
fsetxattr, and fchmod
NOTE: If source file has an ACL containing permissions then fchmod will not be attempted.
"""
if not (access_xattrs := set(xattr_list) & ACCESS_ACL_XATTRS):
# There are no ACLs that encode permissions for _this_ file and so we must use mode
# NOTE: fchmod will raise PermissionError if ZFS dataset aclmode is RESTRICTED
# and if the dst_fd inherited an ACL from parent.
fchmod(dst_fd, S_IMODE(mode))
return
for xat_name in access_xattrs:
xat_buf = getxattr(src_fd, xat_name)
setxattr(dst_fd, xat_name, xat_buf)
def copy_xattrs(src_fd: int, dst_fd: int, xattr_list: list[str]) -> None:
""" copy xattrs that aren't for ACLs
Params:
src_fd: source file
dst_fd: destination file
xattr_list: list of all xattrs on src_fd
Returns:
None
Raises:
OSError - EOPNOTSUPP: xattr support disabled on the destination filesystem.
OSError: various errnos for reasons specified in xattr syscall manpages
"""
for xat_name in set(xattr_list) - ACL_XATTRS:
if xat_name.startswith('system'):
# system xattrs typically denote filesystem-specific xattr handlers that
# may not be applicable to file copies. For now we will skip them silently.
continue
xat_buf = getxattr(src_fd, xat_name)
setxattr(dst_fd, xat_name, xat_buf)
def copy_file_userspace(src_fd: int, dst_fd: int) -> None:
""" wrapper around copyfilobj that uses file descriptors
params:
src_fd: source file
dst_fd: destination file
Returns:
int: bytes written
Raises:
Same exceptions as shutil.copyfileobj
OSError: errno will be set to one of the values specified in
the manpage for ile_range()
"""
src = open(src_fd, 'rb', closefd=False)
dst = open(dst_fd, 'wb', closefd=False)
copyfileobj(src, dst)
# TODO: have better method of getting bytes written than fstat on destination.
return fstat(dst_fd).st_size
def copy_sendfile(src_fd: int, dst_fd: int) -> None:
""" Optimized copy of file. First try sendfile and if that fails
perform userspace copy of file.
params:
src_fd: source file
dst_fd: destination file
Returns:
int: bytes written
Raises:
OSError: errno will be set to one of the values specified in
the manpage for sendfile()
"""
offset = 0
while (sent := sendfile(dst_fd, src_fd, offset, MAX_RW_SZ)) > 0:
offset += sent
if offset == 0 and lseek(dst_fd, 0, SEEK_CUR) == 0:
# maintain fallback code from _fastcopy_sendfile
return copy_file_userspace(src_fd, dst_fd)
return offset
def clone_file(src_fd: int, dst_fd: int) -> None:
""" block cloning is implemented via copy_file_range
params:
src_fd: source file
dst_fd: destination file
Returns:
int: bytes written
Raises:
OSError: EXDEV (zfs) source and destination are on different pools.
OSError: EXDEV (non-zfs) source and destination are on filesystems.
OSError: errno will be set to one of the values specified in
the manpage for copy_file_range()
"""
offset = 0
# loop until copy_file_range returns 0 catch any possible TOCTOU issues
# that may arrive if data added after initial statx call.
while (copied := copy_file_range(
src_fd, dst_fd,
MAX_RW_SZ,
offset_src=offset,
offset_dst=offset
)) > 0:
offset += copied
return offset
def clone_or_copy_file(src_fd: int, dst_fd: int) -> None:
""" try to clone file via copy_file_range and if fails fall back to
shutil.copyfileobj
params:
src_fd: source file
dst_fd: destination file
Returns:
int: bytes written
Raises:
OSError
"""
try:
return clone_file(src_fd, dst_fd)
except OSError as err:
if err.errno == EXDEV:
# different pool / non-zfs
return copy_sendfile(src_fd, dst_fd)
# Other error
raise
def _do_mkfile(
src: dirent_struct,
src_fd: int,
dst_fd: int,
config: CopyTreeConfig,
stats: CopyTreeStats,
c_fn: callable
) -> None:
""" Perform copy / clone of file, possibly preserving metadata.
Params:
src: direct_struct of parent directory of the src_fd
src_fd: handle of file being copied
dst_fd: handle of target file
config: configuration of the copy operation
stats: counters to be update with bytes written
c_fn: the copy / clone function to use for writing data to the destination
Returns:
None
Raises:
OSError
PermissionError
NOTE: this is an internal method that should only be called from within copytree.
"""
if config.flags.value & CopyFlags.PERMISSIONS.value:
try:
copy_permissions(src_fd, dst_fd, src.xattrs, src.stat.stx_mode)
except Exception:
if config.raise_error:
raise
if config.flags.value & CopyFlags.XATTRS.value:
try:
copy_xattrs(src_fd, dst_fd, src.xattrs)
except Exception:
if config.raise_error:
raise
if config.flags.value & CopyFlags.OWNER.value:
fchown(dst_fd, src.stat.stx_uid, src.stat.stx_gid)
stats.bytes += c_fn(src_fd, dst_fd)
# We need to write timestamps after file data to ensure reset atime / mtime
if config.flags.value & CopyFlags.TIMESTAMPS.value:
ns_ts = (
timespec_convert_int(src.stat.stx_atime),
timespec_convert_int(src.stat.stx_mtime)
)
try:
utime(dst_fd, ns=ns_ts)
except Exception:
if config.raise_error:
raise
def _do_mkdir(
src: dirent_struct,
src_fd: int,
dst_dir_fd: int,
config: CopyTreeConfig
) -> int:
""" Internal method to mkdir and set its permissions and xattrs
Params:
src: direct_struct of parent directory of the src_fd
src_fd: handle of file being copied
dst_fd: handle of target file
config: configuration of the copy operation
c_fn: the copy / clone function to use for writing data to the destination
Returns:
file descriptor
Raises:
OSError
NOTE: this is an internal method that should only be called from within copytree.
"""
try:
mkdir(src.name, dir_fd=dst_dir_fd)
except FileExistsError:
if not config.exist_ok:
raise
new_dir_hdl = posix_open(src.name, O_DIRECTORY, dir_fd=dst_dir_fd)
try:
if config.flags.value & CopyFlags.PERMISSIONS.value:
copy_permissions(src_fd, new_dir_hdl, src.xattrs, src.stat.stx_mode)
if config.flags.value & CopyFlags.XATTRS.value:
copy_xattrs(src_fd, new_dir_hdl, src.xattrs)
if config.flags.value & CopyFlags.OWNER.value:
fchown(new_dir_hdl, src.stat.stx_uid, src.stat.stx_gid)
except Exception:
if config.raise_error:
close(new_dir_hdl)
raise
return new_dir_hdl
def _copytree_impl(
d_iter: DirectoryIterator,
dst_str: str,
dst_fd: int,
depth: int,
config: CopyTreeConfig,
target_st: stat_result,
stats: CopyTreeStats
):
""" internal implementation of our copytree method
NOTE: this method is called recursively for each directory to walk down tree.
This means additional O_DIRECTORY open for duration of life of each DirectoryIterator
object (closed when DirectoryIterator context manager exits).
Params:
d_iter: directory iterator for current directory
dst_str: target directory of copy
dst_fd: open file handle for target directory
depth: current depth in src directory tree
config: CopyTreeConfig - used to determine what to copy
target_st: stat_result of target directory for initial copy. This is used
to provide device + inode number so that we can avoid copying destination into
itself.
Returns:
None
Raises:
OSError
PermissionError
"""
match config.op:
case CopyTreeOp.DEFAULT:
c_fn = clone_or_copy_file
case CopyTreeOp.CLONE:
c_fn = clone_file
case CopyTreeOp.SENDFILE:
c_fn = copy_sendfile
case CopyTreeOp.USERSPACE:
c_fn = copy_file_userspace
case _:
raise ValueError(f'{config.op}: unexpected copy operation')
for entry in d_iter:
# We match on `etype` key because our statx wrapper will initially lstat a file
# and if it's a symlink, perform a stat call to get information from symlink target
# This means that S_ISLNK on mode will fail to detect whether it's a symlink.
match entry.etype:
case StatxEtype.DIRECTORY.name:
if not config.traverse:
if entry.stat.stx_mnt_id != d_iter.stat.stx_mnt_id:
# traversal is disabled and entry is in different filesystem
# continue here prevents entering the directory / filesystem
continue
if entry.name == '.zfs':
# User may have visible snapdir. We definitely don't want to try to copy this
# path_in_ctldir checks inode number to verify it's not reserved number for
# these special paths (definitive indication it's ctldir as opposed to random
# dir user named '.zfs')
if path_in_ctldir(entry.path):
continue
if entry.stat.stx_ino == target_st.st_ino:
# We use makedev / dev_t in this case to catch potential edge cases where bind mount
# in path (since bind mounts of same filesystem will have same st_dev, but different
# stx_mnt_id.
if makedev(entry.stat.stx_dev_major, entry.stat.stx_dev_minor) == target_st.st_dev:
continue
# This can fail with OSError and errno set to ELOOP if target was maliciously
# replaced with symlink between our first stat and the open call
entry_fd = posix_open(entry.name, O_DIRECTORY | O_NOFOLLOW, dir_fd=d_iter.dir_fd)
try:
new_dst_fd = _do_mkdir(entry, entry_fd, dst_fd, config)
except Exception:
close(entry_fd)
raise
# We made directory on destination and copied metadata for it, and so we're safe
# to recurse into it in source and continue our operation.
try:
with DirectoryIterator(
entry.name,
request_mask=d_iter.request_mask,
dir_fd=d_iter.dir_fd,
as_dict=False
) as c_iter:
_copytree_impl(
c_iter,
path.join(dst_str, entry.name),
new_dst_fd,
depth + 1,
config,
target_st,
stats
)
if config.flags.value & CopyFlags.TIMESTAMPS.value:
ns_ts = (
timespec_convert_int(entry.stat.stx_atime),
timespec_convert_int(entry.stat.stx_mtime)
)
try:
utime(new_dst_fd, ns=ns_ts)
except Exception:
if config.raise_error:
raise
finally:
close(new_dst_fd)
close(entry_fd)
stats.dirs += 1
case StatxEtype.FILE.name:
entry_fd = posix_open(entry.name, O_RDONLY | O_NOFOLLOW, dir_fd=d_iter.dir_fd)
try:
flags = O_RDWR | O_NOFOLLOW | O_CREAT | O_TRUNC
if not config.exist_ok:
flags |= O_EXCL
dst = posix_open(entry.name, flags, dir_fd=dst_fd)
try:
_do_mkfile(entry, entry_fd, dst, config, stats, c_fn)
finally:
close(dst)
finally:
close(entry_fd)
stats.files += 1
case StatxEtype.SYMLINK.name:
stats.symlinks += 1
dst = readlink(entry.name, dir_fd=d_iter.dir_fd)
try:
symlink(dst, entry.name, dir_fd=dst_fd)
except FileExistsError:
if not config.exist_ok:
raise
continue
case _:
continue
if config.job and ((stats.dirs + stats.files) % config.job_msg_inc) == 0:
config.job.set_progress(100, (
f'{config.job_msg_prefix}'
f'Copied {entry.path} -> {os.path.join(dst_str, entry.name)}.'
))
def copytree(
src: str,
dst: str,
config: CopyTreeConfig
) -> CopyTreeStats:
"""
Copy all files, directories, and symlinks from src to dst. CopyTreeConfig allows
controlling whether we recurse into child datasets on src side as well as specific
metadata to preserve in the copy. This method also has protection against copying
the zfs snapshot directory if for some reason the user has set it to visible.
Params:
src: the source directory
dst: the destination directory
config: configuration parameters for the copy
Returns:
CopyStats
Raises:
OSError: ELOOP: path was replaced with symbolic link while recursing
this should never happen during normal operations and may indicate
an attempted symlink attack
OSError: EOPNOTSUPP: ACL type mismatch between src and dst
OSError: EOPNOTSUPP: xattrs are disabled on dst
OSError: <generic>: various reasons listed in syscall manpages
PermissionError:
Attempt to chmod on destination failed due to RESTRICTED aclmode on dataset.
"""
for p in (src, dst):
if not path.isabs(p):
raise ValueError(f'{p}: absolute path is required')
dir_request_mask = _copytree_conf_to_dir_request_mask(config)
try:
os.mkdir(dst)
except FileExistsError:
if not config.exist_ok:
raise
dst_fd = posix_open(dst, O_DIRECTORY)
stats = CopyTreeStats()
try:
with DirectoryIterator(src, request_mask=int(dir_request_mask), as_dict=False) as d_iter:
_copytree_impl(d_iter, dst, dst_fd, CLONETREE_ROOT_DEPTH, config, fstat(dst_fd), stats)
# Ensure that root level directory also gets metadata copied
try:
xattrs = listxattr(d_iter.dir_fd)
if config.flags.value & CopyFlags.PERMISSIONS.value:
copy_permissions(d_iter.dir_fd, dst_fd, xattrs, d_iter.stat.stx_mode)
if config.flags.value & CopyFlags.XATTRS.value:
copy_xattrs(d_iter.dir_fd, dst_fd, xattrs)
if config.flags.value & CopyFlags.OWNER.value:
fchown(dst_fd, d_iter.stat.stx_uid, d_iter.stat.stx_gid)
if config.flags.value & CopyFlags.TIMESTAMPS.value:
ns_ts = (
timespec_convert_int(d_iter.stat.stx_atime),
timespec_convert_int(d_iter.stat.stx_mtime)
)
utime(dst_fd, ns=ns_ts)
except Exception:
if config.raise_error:
raise
finally:
close(dst_fd)
if config.job:
config.job.set_progress(100, (
f'{config.job_msg_prefix}'
f'Successfully copied {stats.dirs} directories, {stats.files} files, '
f'{stats.symlinks} symlinks for a total of {stats.bytes} bytes of data.'
))
return stats
| 20,873 | Python | .py | 513 | 30.775828 | 104 | 0.616153 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,262 | stat_x.py | truenas_middleware/src/middlewared/middlewared/utils/filesystem/stat_x.py | # This utility provides a basic wrapper for statx(2).
#
# We need statx(2) for gathering birth time, mount id, and
# file attributes for the middleware filesystem plugin
#
# NOTE: tests for these utils are in src/middlewared/middlewared/pytest/unit/utils/test_statx.py
import os
import ctypes
import stat as statlib
from enum import auto, Enum, IntFlag
from .constants import AT_FDCWD
from .utils import path_in_ctldir
class StatxEtype(Enum):
DIRECTORY = auto()
FILE = auto()
SYMLINK = auto()
OTHER = auto()
class ATFlags(IntFlag):
# fcntl.h
STATX_SYNC_AS_STAT = 0x0000
SYMLINK_NOFOLLOW = 0x0100
EMPTY_PATH = 0x1000
VALID_FLAGS = 0x1100
class StatxAttr(IntFlag):
# uapi/linux/stat.h
COMPRESSED = 0x00000004
IMMUTABLE = 0x00000010
APPEND = 0x00000020
NODUMP = 0x00000040
ENCRYPTED = 0x00000800
AUTOMOUNT = 0x00001000
MOUNT_ROOT = 0x00002000
VERIFY = 0x00100000
DAX = 0x00200000
class Mask(ctypes.c_uint):
TYPE = 0x00000001 # stx_mode & S_IFMT
MODE = 0x00000002 # stx_mode & ~S_IFMT
NLINK = 0x00000004 # stx_nlink
UID = 0x00000008 # stx_uid
GID = 0x00000010 # stx_gid
ATIME = 0x00000020 # stx_atime
MTIME = 0x00000040 # stx_mtime
CTIME = 0x00000080 # stx_ctime
INO = 0x00000100 # stx_ino
SIZE = 0x00000200 # stx_size
BLOCKS = 0x00000400 # stx_blocks
BASIC_STATS = 0x000007FF # info in normal stat struct
# Extensions
BTIME = 0x00000800 # stx_btime
MNT_ID = 0x00001000 # stx_mnt_id
ALL = 0x00000FFF # All supported flags
_RESERVED = 0x80000000 # Reserved for future struct statx expansion
class StructStatxTimestamp(ctypes.Structure):
_fields_ = [
("tv_sec", ctypes.c_uint64),
("tv_nsec", ctypes.c_uint32),
("__reserved", ctypes.c_uint32),
]
class StructStatx(ctypes.Structure):
_fields_ = [
# 0x00
("stx_mask", Mask),
("stx_blksize", ctypes.c_uint32),
("stx_attributes", ctypes.c_uint64),
# 0x10
("stx_nlink", ctypes.c_uint32),
("stx_uid", ctypes.c_uint32),
("stx_gid", ctypes.c_uint32),
("stx_mode", ctypes.c_uint16),
("__spare0", ctypes.c_uint16 * 1),
# 0x20
("stx_ino", ctypes.c_uint64),
("stx_size", ctypes.c_uint64),
("stx_blocks", ctypes.c_uint64),
("stx_attributes_mask", ctypes.c_uint64),
# 0x40
("stx_atime", StructStatxTimestamp),
("stx_btime", StructStatxTimestamp),
("stx_ctime", StructStatxTimestamp),
("stx_mtime", StructStatxTimestamp),
# 0x80
("stx_rdev_major", ctypes.c_uint32),
("stx_rdev_minor", ctypes.c_uint32),
("stx_dev_major", ctypes.c_uint32),
("stx_dev_minor", ctypes.c_uint32),
# 0x90
("stx_mnt_id", ctypes.c_uint64),
("__spare2", ctypes.c_uint64),
# 0xa0 (Spare space)
("__spare3", ctypes.c_uint64 * 12),
]
def __get_statx_fn():
libc = ctypes.CDLL('libc.so.6', use_errno=True)
func = libc.statx
func.argtypes = (
ctypes.c_int,
ctypes.c_char_p,
ctypes.c_int,
ctypes.c_uint,
ctypes.POINTER(StructStatx)
)
return func
__statx_fn = __get_statx_fn()
__statx_default_mask = int(Mask.BASIC_STATS | Mask.BTIME)
__statx_lstat_flags = int(ATFlags.STATX_SYNC_AS_STAT | ATFlags.SYMLINK_NOFOLLOW)
def statx(path, dir_fd=None, flags=ATFlags.STATX_SYNC_AS_STAT.value):
path = path.encode() if isinstance(path, str) else path
dir_fd = dir_fd or AT_FDCWD
if dir_fd == AT_FDCWD and flags & ATFlags.EMPTY_PATH.value:
raise ValueError('dir_fd is required when using AT_EMPTY_PATH')
invalid_flags = flags & ~ATFlags.VALID_FLAGS.value
if invalid_flags:
raise ValueError(f'{hex(invalid_flags)}: unsupported statx flags')
data = StructStatx()
result = __statx_fn(dir_fd, path, flags, __statx_default_mask, ctypes.byref(data))
if result < 0:
err = ctypes.get_errno()
raise OSError(err, os.strerror(err))
else:
return data
def statx_entry_impl(entry, dir_fd=None, get_ctldir=True):
"""
This is a convenience wrapper around stat_x that was originally
located within the filesystem plugin
`entry` - pathlib.Path for target of statx
returns a dictionary with the following keys:
`st` - StructStatx object for entry
`attributes` - statx attributes
`etype` - file type (matches names in FileType enum)
`is_ctldir` - boolean value indicating whether path is in the
ZFS ctldir. NOTE: is_ctldir is omitted when using a relative path
Warning: this method is blocking and includes data that is not JSON
serializable
"""
out = {'st': None, 'etype': None, 'attributes': []}
path = entry.as_posix()
try:
# This is equivalent to lstat() call
out['st'] = statx(
path,
dir_fd = dir_fd,
flags = __statx_lstat_flags
)
except FileNotFoundError:
return None
for attr in StatxAttr:
if out['st'].stx_attributes & attr.value:
out['attributes'].append(attr.name)
if statlib.S_ISDIR(out['st'].stx_mode):
out['etype'] = StatxEtype.DIRECTORY.name
elif statlib.S_ISLNK(out['st'].stx_mode):
out['etype'] = StatxEtype.SYMLINK.name
try:
out['st'] = statx(path, dir_fd=dir_fd)
except FileNotFoundError:
return None
elif statlib.S_ISREG(out['st'].stx_mode):
out['etype'] = StatxEtype.FILE.name
else:
out['etype'] = StatxEtype.OTHER.name
if entry.is_absolute():
out['is_ctldir'] = path_in_ctldir(entry)
return out
| 5,766 | Python | .py | 163 | 29.03681 | 96 | 0.638474 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,263 | directory.py | truenas_middleware/src/middlewared/middlewared/utils/filesystem/directory.py | # This provides a middleware backend oriented directory generator that
# is primarily consumed by filesystem.listdir, but may be used in other
# places. It is primarily a thin wrapper around os.scandir, but also
# provides statx output and other optional file information in the
# returned dictionaries.
#
# NOTE: tests for these utils are in src/middlewared/middlewared/pytest/unit/utils/test_directory.py
import enum
import errno
import os
import pathlib
from collections import namedtuple
from .acl import acl_is_present
from .attrs import fget_zfs_file_attributes, zfs_attributes_dump
from .constants import FileType
from .stat_x import statx, statx_entry_impl, ATFlags, StructStatx
from .utils import path_in_ctldir
class DirectoryRequestMask(enum.IntFlag):
"""
Allow users to specify what information they want with the returned
directory object. Removing unnecessary information may be useful to
improve performance of the DirectoryIterator.
ACL - include boolean whether ACL is present (requires listxattr call)
CTLDIR - include boolean whether path is in ZFS ctldir (requires multiple
stat() calls per file
REALPATH - include output of `realpath` call
XATTR - list of extended attributes (requires listxattr call)
ZFS_ATTRS - include ZFS attributes (requires fcntl call per file)
NOTE: this changes to this should also be reflected in API test
`test_listdir_request_mask.py`
"""
ACL = enum.auto()
CTLDIR = enum.auto()
REALPATH = enum.auto()
XATTRS = enum.auto()
ZFS_ATTRS = enum.auto()
ALL_ATTRS = (
DirectoryRequestMask.ACL |
DirectoryRequestMask.CTLDIR |
DirectoryRequestMask.REALPATH |
DirectoryRequestMask.XATTRS |
DirectoryRequestMask.ZFS_ATTRS
)
dirent_struct = namedtuple('struct_dirent', [
'name', 'path', 'realpath', 'stat', 'etype', 'acl', 'xattrs', 'zfs_attrs', 'is_in_ctldir'
])
class DirectoryFd():
"""
Wrapper for O_DIRECTORY open of a file that allows for automatic closing
when object is garbage collected.
"""
def __init__(self, path, dir_fd=None):
self.__path = path
self.__dir_fd = None
self.__dir_fd = os.open(path, os.O_DIRECTORY, dir_fd=dir_fd)
def __del__(self):
self.close()
def __repr__(self):
return f"<DirectoryFd path='{self.__path}' fileno={self.fileno}>"
def close(self):
if self.__dir_fd is None:
return
os.close(self.__dir_fd)
self.__dir_fd = None
@property
def fileno(self) -> int:
return self.__dir_fd
class DirectoryIterator():
"""
A simple wrapper around os.scandir that provides additional features
such as statx output, xattr, and acl presence.
`path` - directory to iterate. `dir_fd` must be specified if relative
path is used.
`file_type` - optimization to only yield results of the specified file
type. Defaults to all file types.
`request_mask` - bitmask of additional data to include with yielded
entries. See DirectoryRequestMask. Defaults to _all_ possible attributes.
`dir_fd` - optional argument to specify an open file descriptor for case
where we are opening a relative path.
`as_dict` - yield entries in dictionary expected by `filesystem.listdir`.
When set to False, then struct_direct (see above) is returned. Default is True
Context manager protocol is supported and preferred for most cases as it
will more aggressively free resources.
```
with DirectoryIterator('/mnt') as d_iter:
for entry in d_iter:
print(entry)
```
NOTE: this iterator maintains two open files:
1. the file underlying os.scandir object.
2. the O_DIRECTORY open of the `path` that was used to create os.scandir
object. This is required to allow peforming *_at syscalls on directory
entries.
"""
def __init__(self, path, file_type=None, request_mask=None, dir_fd=None, as_dict=True):
self.__dir_fd = None
self.__path_iter = None
self.__path = path
self.__dir_fd = DirectoryFd(path, dir_fd)
self.__file_type = FileType(file_type).name if file_type else None
self.__path_iter = os.scandir(self.__dir_fd.fileno)
self.__stat = statx('', dir_fd=self.__dir_fd.fileno, flags=ATFlags.EMPTY_PATH.value)
# Explicitly allow zero for request_mask
self.__request_mask = request_mask if request_mask is not None else ALL_ATTRS
self.__return_fn = self.__return_dict if as_dict else self.__return_dirent
def __repr__(self):
return (
f"<DirectoryIterator path='{self.__path}' "
f"file_type='{'ALL' if self.__file_type is None else self.__file_type}' "
f"request_mask={self.__request_mask}>"
)
def __iter__(self):
return self
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, tp, value, traceback):
# Since we know we're leaving scope of context manager
# we can more aggressively close resources
self.close(force=True)
def __check_dir_entry(self, dirent):
stat_info = statx_entry_impl(pathlib.Path(dirent.name), dir_fd=self.dir_fd)
if stat_info is None:
# path doesn't exist anymore
return None
if self.__file_type and stat_info['etype'] != self.__file_type:
# pre-filtering optimization to only select single type of
# file. This is used by webui to only return directories
# and reduces cost of any subsequent filtering.
return None
return stat_info
def __return_dirent(self, dirent, st, realpath, xattrs, acl, zfs_attrs, is_in_ctldir):
"""
More memory-efficient objects for case where dictionary isn't needed or desired.
"""
return dirent_struct(
dirent.name,
os.path.join(self.__path, dirent.name),
realpath,
st['st'],
st['etype'],
acl,
xattrs,
zfs_attrs,
is_in_ctldir
)
def __return_dict(self, dirent, st, realpath, xattrs, acl, zfs_attrs, is_in_ctldir):
stat = st['st']
return {
'name': dirent.name,
'path': os.path.join(self.__path, dirent.name),
'realpath': realpath,
'type': st['etype'],
'size': stat.stx_size,
'allocation_size': stat.stx_blocks * 512,
'mode': stat.stx_mode,
'acl': acl,
'uid': stat.stx_uid,
'gid': stat.stx_gid,
'mount_id': stat.stx_mnt_id,
'is_mountpoint': 'MOUNT_ROOT' in st['attributes'],
'is_ctldir': is_in_ctldir,
'attributes': st['attributes'],
'xattrs': xattrs,
'zfs_attrs': zfs_attrs
}
def __next__(self):
# dirent here is os.DirEntry yielded from os.scandir()
dirent = next(self.__path_iter)
while (st := self.__check_dir_entry(dirent)) is None:
dirent = next(self.__path_iter)
if self.__request_mask == 0:
# Skip an unnecessary file open/close if we only need stat info
return self.__return_fn(dirent, st, None, None, None, None, None)
try:
fd = os.open(dirent.name, os.O_RDONLY, dir_fd=self.dir_fd)
except FileNotFoundError:
# `dirent` was most likely deleted while we were generating listing
# There's not point in logging an error. Just keep moving on.
return self.__next__()
except OSError as err:
if err.errno in (errno.ENXIO, errno.ENODEV):
# this can happen for broken symlinks
return self.__next__()
raise
try:
if self.__request_mask & int(DirectoryRequestMask.REALPATH):
realpath = os.path.realpath(f'/proc/self/fd/{fd}')
else:
realpath = None
if self.__request_mask & int(DirectoryRequestMask.XATTRS):
xattrs = os.listxattr(fd)
else:
xattrs = None
if self.__request_mask & int(DirectoryRequestMask.ACL):
# try to avoid listing xattrs twice
acl = acl_is_present(os.listxattr(fd) if xattrs is None else xattrs)
else:
acl = None
if self.__request_mask & int(DirectoryRequestMask.ZFS_ATTRS):
try:
attr_mask = fget_zfs_file_attributes(fd)
zfs_attrs = zfs_attributes_dump(attr_mask)
except OSError as e:
# non-ZFS filesystems will fail with ENOTTY or EINVAL
# In this case we set `None` to indicate non-ZFS
if e.errno not in (errno.ENOTTY, errno.EINVAL):
raise e from None
zfs_attrs = None
else:
zfs_attrs = None
if self.__request_mask & int(DirectoryRequestMask.CTLDIR):
is_in_ctldir = path_in_ctldir(os.path.join(self.__path, dirent.name))
else:
is_in_ctldir = None
finally:
os.close(fd)
return self.__return_fn(dirent, st, realpath, xattrs, acl, zfs_attrs, is_in_ctldir)
@property
def dir_fd(self) -> int:
"""
File descriptor for O_DIRECTORY open for target directory.
"""
if self.__dir_fd is None:
return None
return self.__dir_fd.fileno
@property
def request_mask(self) -> DirectoryRequestMask:
return self.__request_mask
@property
def stat(self) -> StructStatx:
return self.__stat
def close(self, force=False) -> None:
try:
if self.__path_iter is not None:
self.__path_iter.close()
self.__path_iter = None
except Exception:
pass
if self.__dir_fd is not None:
# decrement reference to __dir_fd and allow
# garbage collecter to do cleanup. This behavior
# can be overriden by passing a force parameter
if force:
self.__dir_fd.close()
self.__dir_fd = None
def directory_is_empty(path):
"""
This is a more memory-efficient way of determining whether a directory is empty
than looking at os.listdir results.
"""
with DirectoryIterator(path, request_mask=0, as_dict=False) as d_iter:
return not any(d_iter)
| 10,665 | Python | .py | 254 | 32.846457 | 100 | 0.615466 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,264 | acl.py | truenas_middleware/src/middlewared/middlewared/utils/filesystem/acl.py | import enum
class ACLXattr(enum.Enum):
POSIX_ACCESS = "system.posix_acl_access"
POSIX_DEFAULT = "system.posix_acl_default"
ZFS_NATIVE = "system.nfs4_acl_xdr"
ACL_XATTRS = set([xat.value for xat in ACLXattr])
# ACCESS_ACL_XATTRS is set of ACLs that control access to the file itself.
ACCESS_ACL_XATTRS = set([ACLXattr.POSIX_ACCESS.value, ACLXattr.ZFS_NATIVE.value])
def acl_is_present(xat_list: list) -> bool:
"""
This method returns boolean value if ACL is present in a list of extended
attribute names. Both POSIX1E and our NFSv4 ACL implementations omit the
xattr name from the list if it has no impact on permisssions (mode is
authoritative.
"""
return bool(set(xat_list) & ACL_XATTRS)
| 736 | Python | .py | 16 | 42.0625 | 81 | 0.737728 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,265 | crud.py | truenas_middleware/src/middlewared/middlewared/utils/service/crud.py | __all__ = ['real_crud_method']
def real_crud_method(method):
if method.__name__ in ['create', 'update', 'delete'] and hasattr(method, '__self__'):
child_method = getattr(method.__self__, f'do_{method.__name__}', None)
if child_method is not None:
return child_method
| 301 | Python | .py | 6 | 43.5 | 89 | 0.600683 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,266 | __init__.py | truenas_middleware/src/middlewared/middlewared/utils/service/__init__.py | # -*- coding=utf-8 -*-
import logging
logger = logging.getLogger(__name__)
__all__ = []
| 90 | Python | .py | 4 | 21 | 36 | 0.619048 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,267 | call.py | truenas_middleware/src/middlewared/middlewared/utils/service/call.py | # -*- coding=utf-8 -*-
import errno
import logging
from middlewared.service_exception import CallError
logger = logging.getLogger(__name__)
__all__ = ["MethodNotFoundError", "ServiceCallMixin"]
class MethodNotFoundError(CallError):
def __init__(self, method_name, service):
super().__init__(f'Method {method_name!r} not found in {service!r}', CallError.ENOMETHOD)
class ServiceCallMixin:
def get_method(self, name):
if '.' not in name:
raise CallError('Invalid method name', errno.EBADMSG)
service, method_name = name.rsplit('.', 1)
try:
serviceobj = self.get_service(service)
except KeyError:
raise CallError(f'Service {service!r} not found', CallError.ENOMETHOD)
try:
methodobj = getattr(serviceobj, method_name)
except AttributeError:
raise MethodNotFoundError(method_name, service)
return serviceobj, methodobj
| 958 | Python | .py | 23 | 34.434783 | 97 | 0.670996 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,268 | task_state.py | truenas_middleware/src/middlewared/middlewared/utils/service/task_state.py | from middlewared.service import private
class TaskStateMixin:
task_state_methods = NotImplemented
@private
async def get_task_state_context(self):
jobs = {}
for j in await self.middleware.call(
"core.get_jobs",
[("OR", [("method", "=", method) for method in self.task_state_methods])],
{"order_by": ["id"]}
):
try:
task_id = int(j["arguments"][0])
except (IndexError, TypeError, ValueError):
continue
if task_id in jobs and jobs[task_id]["state"] == "RUNNING":
# Newer task with the same name waiting in the queue, discard it and show the running task
continue
jobs[task_id] = j
return {
"jobs": jobs,
}
@private
async def get_task_state_job(self, context, task_id):
return context["jobs"].get(task_id)
@private
async def persist_task_state_on_job_complete(self):
async def on_job_change(middleware, event_type, args):
if event_type == "CHANGED" and args["fields"]["state"] in ["SUCCESS", "FAILED", "ABORTED"]:
job = args["fields"]
if job["method"] in self.task_state_methods:
await self.middleware.call(
"datastore.update",
self._config.datastore,
job["arguments"][0],
{"job": dict(job, id=None, logs_path=None)},
{"prefix": self._config.datastore_prefix},
)
self.middleware.event_subscribe("core.get_jobs", on_job_change)
| 1,690 | Python | .py | 39 | 30.384615 | 106 | 0.533211 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,269 | pwd.py | truenas_middleware/src/middlewared/middlewared/utils/nss/pwd.py | import ctypes
import errno
from collections import namedtuple
from .nss_common import get_nss_func, NssError, NssModule, NssOperation, NssReturnCode
PASSWD_INIT_BUFLEN = 1024
class Passwd(ctypes.Structure):
_fields_ = [
("pw_name", ctypes.c_char_p),
("pw_passwd", ctypes.c_char_p),
("pw_uid", ctypes.c_int),
("pw_gid", ctypes.c_int),
("pw_gecos", ctypes.c_char_p),
("pw_dir", ctypes.c_char_p),
("pw_shell", ctypes.c_char_p)
]
pwd_struct = namedtuple('struct_passwd', [
'pw_name', 'pw_uid', 'pw_gid', 'pw_gecos', 'pw_dir', 'pw_shell', 'source'
])
def __parse_nss_result(result, as_dict, module_name):
try:
name = result.pw_name.decode()
gecos = result.pw_gecos.decode()
homedir = result.pw_dir.decode()
shell = result.pw_shell.decode()
except AttributeError:
return None
if as_dict:
return {
'pw_name': name,
'pw_uid': result.pw_uid,
'pw_gid': result.pw_gid,
'pw_gecos': gecos,
'pw_dir': homedir,
'pw_shell': shell,
'source': module_name
}
return pwd_struct(name, result.pw_uid, result.pw_gid, gecos, homedir, shell, module_name)
def __getpwnam_r(name, result_p, buffer_p, buflen, nss_module):
"""
enum nss_status _nss_#module#_getpwnam_r(const char *name,
struct passwd *result,
char *buffer,
size_t buflen,
int *errnop)
"""
func = get_nss_func(NssOperation.GETPWNAM, nss_module)
func.restype = ctypes.c_int
func.argtypes = [
ctypes.c_char_p,
ctypes.POINTER(Passwd),
ctypes.c_char_p,
ctypes.c_ulong,
ctypes.POINTER(ctypes.c_int)
]
err = ctypes.c_int()
name = name.encode('utf-8')
res = func(ctypes.c_char_p(name), result_p, buffer_p, buflen, ctypes.byref(err))
return (int(res), err.value, result_p)
def __getpwuid_r(uid, result_p, buffer_p, buflen, nss_module):
"""
enum nss_status _nss_#module#_getpwuid_r(uid_t uid,
struct passwd *result,
char *buffer,
size_t buflen,
int *errnop)
"""
func = get_nss_func(NssOperation.GETPWUID, nss_module)
func.restype = ctypes.c_int
func.argtypes = [
ctypes.c_ulong,
ctypes.POINTER(Passwd),
ctypes.c_char_p,
ctypes.c_ulong,
ctypes.POINTER(ctypes.c_int)
]
err = ctypes.c_int()
res = func(uid, result_p, buffer_p, buflen, ctypes.byref(err))
return (int(res), err.value, result_p)
def __getpwent_r(result_p, buffer_p, buflen, nss_module):
"""
enum nss_status _nss_#module#_getpwent_r(struct passwd *result,
char *buffer, size_t buflen,
int *errnop)
"""
func = get_nss_func(NssOperation.GETPWENT, nss_module)
func.restype = ctypes.c_int
func.argtypes = [
ctypes.POINTER(Passwd),
ctypes.c_char_p,
ctypes.c_ulong,
ctypes.POINTER(ctypes.c_int)
]
err = ctypes.c_int()
res = func(result_p, buffer_p, buflen, ctypes.byref(err))
return (int(res), err.value, result_p)
def __setpwent(nss_module):
"""
enum nss_status _nss_#module#_setpwent(void)
"""
func = get_nss_func(NssOperation.SETPWENT, nss_module)
func.argtypes = []
res = func()
if res != NssReturnCode.SUCCESS:
raise NssError(ctypes.get_errno(), NssOperation.SETPWENT, res, nss_module)
def __endpwent(nss_module):
"""
enum nss_status _nss_#module#_endpwent(void)
"""
func = get_nss_func(NssOperation.ENDPWENT, nss_module)
func.argtypes = []
res = func()
if res != NssReturnCode.SUCCESS:
raise NssError(ctypes.get_errno(), NssOperation.ENDPWENT, res, nss_module)
def __getpwent_impl(mod, as_dict, buffer_len=PASSWD_INIT_BUFLEN):
result = Passwd()
buf = ctypes.create_string_buffer(buffer_len)
res, error, result_p = __getpwent_r(ctypes.byref(result), buf,
buffer_len, mod)
match error:
case 0:
pass
case errno.ERANGE:
# Our buffer was too small, increment
return __getpwent_impl(mod, as_dict, buffer_len * 2)
case _:
raise NssError(error, NssOperation.GETPWENT, res, mod)
if res != NssReturnCode.SUCCESS:
return None
return __parse_nss_result(result, as_dict, mod.name)
def __getpwall_impl(module, as_dict):
mod = NssModule[module]
__setpwent(mod)
pwd_list = []
while user := __getpwent_impl(mod, as_dict):
pwd_list.append(user)
__endpwent(mod)
return pwd_list
def __getpwnam_impl(name, module, as_dict, buffer_len=PASSWD_INIT_BUFLEN):
mod = NssModule[module]
result = Passwd()
buf = ctypes.create_string_buffer(buffer_len)
res, error, result_p = __getpwnam_r(name, ctypes.byref(result),
buf, buffer_len, mod)
match error:
case 0:
pass
case errno.ERANGE:
# Our buffer was too small, increment
return __getpwnam_impl(name, module, as_dict, buffer_len * 2)
case _:
raise NssError(error, NssOperation.GETPWNAM, res, mod)
if res == NssReturnCode.NOTFOUND:
return None
return __parse_nss_result(result, as_dict, mod.name)
def __getpwuid_impl(uid, module, as_dict, buffer_len=PASSWD_INIT_BUFLEN):
mod = NssModule[module]
result = Passwd()
buf = ctypes.create_string_buffer(buffer_len)
res, error, result_p = __getpwuid_r(uid, ctypes.byref(result),
buf, buffer_len, mod)
match error:
case 0:
pass
case errno.ERANGE:
# Our buffer was too small, increment
return __getpwuid_impl(uid, module, as_dict, buffer_len * 2)
case _:
raise NssError(error, NssOperation.GETPWUID, res, mod)
if res == NssReturnCode.NOTFOUND:
return None
return __parse_nss_result(result, as_dict, mod.name)
def getpwuid(uid, module=NssModule.ALL.name, as_dict=False):
"""
Return the password database entry for the given user by uid.
`module` - NSS module from which to retrieve the user
`as_dict` - return output as a dictionary rather than `struct_passwd`.
"""
if module != NssModule.ALL.name:
if (result := __getpwuid_impl(uid, module, as_dict)):
return result
raise KeyError(f"getpwuid(): uid not found: '{uid}'")
# We're querying all modules
for mod in NssModule:
if mod == NssModule.ALL:
continue
try:
if (result := __getpwuid_impl(uid, mod.name, as_dict)):
return result
except NssError as e:
if e.return_code != NssReturnCode.UNAVAIL:
raise e from None
raise KeyError(f"getpwuid(): uid not found: '{uid}'")
def getpwnam(name, module=NssModule.ALL.name, as_dict=False):
"""
Return the password database entry for the given user by name.
`module` - NSS module from which to retrieve the user
`as_dict` - return output as a dictionary rather than `struct_passwd`.
"""
if module != NssModule.ALL.name:
if (result := __getpwnam_impl(name, module, as_dict)):
return result
raise KeyError(f"getpwnam(): name not found: '{name}'")
# We're querying all modules
for mod in NssModule:
if mod == NssModule.ALL:
continue
try:
if (result := __getpwnam_impl(name, mod.name, as_dict)):
return result
except NssError as e:
if e.return_code != NssReturnCode.UNAVAIL:
raise e from None
raise KeyError(f"getpwnam(): name not found: '{name}'")
def getpwall(module=NssModule.ALL.name, as_dict=False):
"""
Returns all password entries on server (similar to pwd.getpwall()).
`module` - NSS module from which to retrieve the entries
`as_dict` - return password database entries as dictionaries
This module returns a dictionary keyed by NSS module, e.g.
{'FILES': [<struct_passwd>, <struct_passwd>], 'WINBIND': [], 'SSS': []}
"""
if module != NssModule.ALL.name:
return {module: __getpwall_impl(module, as_dict)}
results = {}
for mod in NssModule:
if mod == NssModule.ALL:
continue
entries = []
try:
entries = __getpwall_impl(mod.name, as_dict)
except NssError as e:
if e.return_code != NssReturnCode.UNAVAIL:
raise e from None
results[mod.name] = entries
return results
def iterpw(module=NssModule.FILES.name, as_dict=False):
"""
Generator that yields password entries on server
`module` - NSS module from which to retrieve the entries
`as_dict` - yield password database entries as dictionaries
WARNING: users of this API should not create two generators for
same passwd database concurrently in the same thread due to NSS
modules storing the handle for the pwent in thread-local variable:
BAD:
iter1 = iterpw(NssModule.FILES.name, True)
iter2 = iterpw(NssModule.FILES.name, True)
for x in iter1:
for y in iter2
or call getpwall() during iteration
ALSO BAD:
iter1 = iterpw(NssModule.FILES.name, True)
for x in iter1:
pwd = getpwall()
"""
if module == NssModule.ALL.name:
raise ValueError('Please select one of: FILES, WINBIND, SSS')
mod = NssModule[module]
__setpwent(mod)
try:
while user := __getpwent_impl(mod, as_dict):
yield user
finally:
__endpwent(mod)
| 10,119 | Python | .py | 263 | 29.391635 | 93 | 0.593187 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,270 | nss_common.py | truenas_middleware/src/middlewared/middlewared/utils/nss/nss_common.py | import enum
import ctypes
import os
NSS_MODULES_DIR = '/usr/lib/x86_64-linux-gnu'
FILES_NSS_PATH = os.path.join(NSS_MODULES_DIR, 'libnss_files.so.2')
SSS_NSS_PATH = os.path.join(NSS_MODULES_DIR, 'libnss_sss.so.2')
WINBIND_NSS_PATH = os.path.join(NSS_MODULES_DIR, 'libnss_winbind.so.2')
class NssReturnCode(enum.IntEnum):
""" Possible NSS return codes, see /usr/include/nss.h """
TRYAGAIN = -2
UNAVAIL = -1
NOTFOUND = 0
SUCCESS = 1
RETURN = 2
class NssModule(enum.Enum):
""" Currently supported NSS modules """
ALL = enum.auto()
FILES = FILES_NSS_PATH
SSS = SSS_NSS_PATH
WINBIND = WINBIND_NSS_PATH
class NssOperation(enum.Enum):
""" Currently supported NSS operations """
GETGRNAM = 'getgrnam_r'
GETGRGID = 'getgrgid_r'
SETGRENT = 'setgrent'
ENDGRENT = 'endgrent'
GETGRENT = 'getgrent_r'
GETPWNAM = 'getpwnam_r'
GETPWUID = 'getpwuid_r'
GETPWENT = 'getpwent_r'
SETPWENT = 'setpwent'
ENDPWENT = 'endpwent'
class NssError(Exception):
def __init__(self, errno, nssop, return_code, module):
self.errno = errno
self.nssop = nssop.value
self.return_code = return_code
self.mod_name = module.name
def __str__(self):
errmsg = f'NSS operation {self.nssop} failed with errno {self.errno}: {self.return_code}'
if self.mod_name != 'ALL':
errmsg += f' on module [{self.mod_name.lower()}].'
return errmsg
def get_nss_func(nss_op, nss_module):
if nss_module == NssModule.ALL:
raise ValueError('ALL module may not be explicitly used')
lib = ctypes.CDLL(nss_module.value, use_errno=True)
return getattr(lib, f'_nss_{nss_module.name.lower()}_{nss_op.value}')
| 1,737 | Python | .py | 48 | 31.0625 | 97 | 0.660896 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,271 | grp.py | truenas_middleware/src/middlewared/middlewared/utils/nss/grp.py | import ctypes
import errno
from collections import namedtuple
from .nss_common import get_nss_func, NssError, NssModule, NssOperation, NssReturnCode
GROUP_INIT_BUFLEN = 1024
class Group(ctypes.Structure):
_fields_ = [
("gr_name", ctypes.c_char_p),
("gr_passwd", ctypes.c_char_p),
("gr_gid", ctypes.c_int),
("gr_mem", ctypes.POINTER(ctypes.c_char_p))
]
group_struct = namedtuple('struct_group', ['gr_name', 'gr_gid', 'gr_mem', 'source'])
def __parse_nss_result(result, as_dict, module_name):
if result.gr_name is None:
return None
name = result.gr_name.decode()
members = list()
i = 0
while result.gr_mem[i]:
members.append(result.gr_mem[i].decode())
i += 1
if as_dict:
return {
'gr_name': name,
'gr_gid': result.gr_gid,
'gr_mem': members,
'source': module_name
}
return group_struct(name, result.gr_gid, members, module_name)
def __getgrnam_r(name, result_p, buffer_p, buflen, nss_module):
"""
enum nss_status _nss_#module#_getgrnam_r(const char *name,
struct group *result,
char *buffer,
size_t buflen,
int *error)
"""
func = get_nss_func(NssOperation.GETGRNAM, nss_module)
func.restype = ctypes.c_int
func.argtypes = [
ctypes.c_char_p,
ctypes.POINTER(Group),
ctypes.c_char_p,
ctypes.c_ulong,
ctypes.POINTER(ctypes.c_int)
]
err = ctypes.c_int()
name = name.encode('utf-8')
res = func(ctypes.c_char_p(name), result_p, buffer_p, buflen, ctypes.byref(err))
return (int(res), err.value, result_p)
def __getgrgid_r(gid, result_p, buffer_p, buflen, nss_module):
"""
enum nss_status _nss_#module#_getgrgid_r(gid_t gid,
struct group *result,
char *buffer,
size_t buflen,
int *error)
"""
func = get_nss_func(NssOperation.GETGRGID, nss_module)
func.restype = ctypes.c_int
func.argtypes = [
ctypes.c_ulong,
ctypes.POINTER(Group),
ctypes.c_char_p,
ctypes.c_ulong,
ctypes.POINTER(ctypes.c_int)
]
err = ctypes.c_int()
res = func(gid, result_p, buffer_p, buflen, ctypes.byref(err))
return (int(res), err.value, result_p)
def __getgrent_r(result_p, buffer_p, buflen, nss_module):
"""
enum nss_status _nss_#module#_getgrent_r(struct group *result,
char *buffer,
size_t buflen,
int *error)
"""
func = get_nss_func(NssOperation.GETGRENT, nss_module)
func.restype = ctypes.c_int
func.argtypes = [
ctypes.POINTER(Group),
ctypes.c_char_p,
ctypes.c_ulong,
ctypes.POINTER(ctypes.c_int)
]
err = ctypes.c_int()
res = func(result_p, buffer_p, buflen, ctypes.byref(err))
return (int(res), err.value, result_p)
def __setgrent(nss_module):
"""
enum nss_status _nss_#module#_setgrent(void)
"""
func = get_nss_func(NssOperation.SETGRENT, nss_module)
func.argtypes = []
res = func()
if res != NssReturnCode.SUCCESS:
raise NssError(ctypes.get_errno(), NssOperation.SETGRENT, res, nss_module)
def __endgrent(nss_module):
"""
enum nss_status _nss_#module#_endgrent(void)
"""
func = get_nss_func(NssOperation.ENDGRENT, nss_module)
func.argtypes = []
res = func()
if res != NssReturnCode.SUCCESS:
raise NssError(ctypes.get_errno(), NssOperation.ENDGRENT, res, nss_module)
def __getgrent_impl(mod, as_dict, buffer_len=GROUP_INIT_BUFLEN):
result = Group()
buf = ctypes.create_string_buffer(buffer_len)
res, error, result_p = __getgrent_r(ctypes.byref(result), buf,
buffer_len, mod)
match error:
case 0:
pass
case errno.ERANGE:
# Our buffer was too small, increment
return __getgrent_impl(mod, as_dict, buffer_len * 2)
case _:
raise NssError(error, NssOperation.GETGRENT, res, mod)
if res != NssReturnCode.SUCCESS:
return None
return __parse_nss_result(result, as_dict, mod.name)
def __getgrall_impl(module, as_dict):
mod = NssModule[module]
__setgrent(mod)
group_list = []
while group := __getgrent_impl(mod, as_dict):
group_list.append(group)
__endgrent(mod)
return group_list
def __getgrnam_impl(name, module, as_dict, buffer_len=GROUP_INIT_BUFLEN):
mod = NssModule[module]
result = Group()
buf = ctypes.create_string_buffer(buffer_len)
res, error, result_p = __getgrnam_r(name, ctypes.byref(result),
buf, buffer_len, mod)
match error:
case 0:
pass
case errno.ERANGE:
# Our buffer was too small, increment
return __getgrnam_impl(name, module, as_dict, buffer_len * 2)
case _:
raise NssError(error, NssOperation.GETGRNAM, res, mod)
if res == NssReturnCode.NOTFOUND:
return None
return __parse_nss_result(result, as_dict, mod.name)
def __getgrgid_impl(gid, module, as_dict, buffer_len=GROUP_INIT_BUFLEN):
mod = NssModule[module]
result = Group()
buf = ctypes.create_string_buffer(buffer_len)
res, error, result_p = __getgrgid_r(gid, ctypes.byref(result),
buf, buffer_len, mod)
match error:
case 0:
pass
case errno.ERANGE:
# Our buffer was too small, increment
return __getgrgid_impl(gid, module, as_dict, buffer_len * 2)
case _:
raise NssError(error, NssOperation.GETGRGID, res, mod)
if res == NssReturnCode.NOTFOUND:
return None
return __parse_nss_result(result, as_dict, mod.name)
def getgrgid(gid, module=NssModule.ALL.name, as_dict=False):
"""
Return the group database entry for the given group by gid.
`module` - NSS module from which to retrieve the group
`as_dict` - return output as a dictionary rather than `struct_group`.
"""
if module != NssModule.ALL.name:
if (result := __getgrgid_impl(gid, module, as_dict)):
return result
raise KeyError(f"getgrgid(): gid not found: '{gid}'")
# We're querying all modules
for mod in NssModule:
if mod == NssModule.ALL:
continue
try:
if (result := __getgrgid_impl(gid, mod.name, as_dict)):
return result
except NssError as e:
if e.return_code != NssReturnCode.UNAVAIL:
raise e from None
raise KeyError(f"getgrgid(): gid not found: '{gid}'")
def getgrnam(name, module=NssModule.ALL.name, as_dict=False):
"""
Return the group database entry for the given group by name.
`module` - NSS module from which to retrieve the group
`as_dict` - return output as a dictionary rather than `struct_group`.
"""
if module != NssModule.ALL.name:
if (result := __getgrnam_impl(name, module, as_dict)):
return result
raise KeyError(f"getgrnam(): name not found: '{name}'")
# We're querying all modules
for mod in NssModule:
if mod == NssModule.ALL:
continue
try:
if (result := __getgrnam_impl(name, mod.name, as_dict)):
return result
except NssError as e:
if e.return_code != NssReturnCode.UNAVAIL:
raise e from None
raise KeyError(f"getgrnam(): name not found: '{name}'")
def getgrall(module=NssModule.ALL.name, as_dict=False):
"""
Returns all group entries on server (similar to grp.getgrall()).
`module` - NSS module from which to retrieve the entries
`as_dict` - return password database entries as dictionaries
This module returns a dictionary keyed by NSS module, e.g.
{'FILES': [<struct_group>, <struct_group>], 'WINBIND': [], 'SSS': []}
"""
if module != NssModule.ALL.name:
return {module: __getgrall_impl(module, as_dict)}
results = {}
for mod in NssModule:
if mod == NssModule.ALL:
continue
entries = []
try:
entries = __getgrall_impl(mod.name, as_dict)
except NssError as e:
if e.return_code != NssReturnCode.UNAVAIL:
raise e from None
results[mod.name] = entries
return results
def itergrp(module=NssModule.FILES.name, as_dict=False):
"""
Generator that yields group entries on server
`module` - NSS module from which to retrieve the entries
`as_dict` - yield password database entries as dictionaries
WARNING: users of this API should not create two generators for
same passwd database concurrently in the same thread due to NSS
modules storing the handle for the pwent in thread-local variable:
BAD:
iter1 = itergrp(NssModule.FILES.name, True)
iter2 = itergrp(NssModule.FILES.name, True)
for x in iter1:
for y in iter2
or call getgrall() during iteration
ALSO BAD:
iter1 = itergrp(NssModule.FILES.name, True)
for x in iter1:
grp = getgrall()
"""
if module == NssModule.ALL.name:
raise ValueError('Please select one of: FILES, WINBIND, SSS')
mod = NssModule[module]
__setgrent(mod)
try:
while group := __getgrent_impl(mod, as_dict):
yield group
finally:
__endgrent(mod)
| 9,883 | Python | .py | 257 | 29.2607 | 86 | 0.592581 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,272 | cache.py | truenas_middleware/src/middlewared/middlewared/utils/rate_limit/cache.py | from asyncio import sleep
from dataclasses import dataclass
from random import uniform
from time import monotonic
from typing import TypedDict
from middlewared.utils.origin import ConnectionOrigin
__all__ = ['RateLimitCache']
@dataclass(frozen=True)
class RateLimitConfig:
"""The maximum number of calls per unique consumer of the endpoint."""
max_calls: int = 20
"""The maximum time in seconds that a unique consumer may request an
endpoint that is being rate limited."""
max_period: int = 60
"""The maximum number of unique entries the cache supports"""
max_cache_entries: int = 100
"""The value used to separate the unique values when generating
a unique key to be used to store the cached information."""
separator: str = '_##_'
"""The starting decimal value for the time to be slept in the event
rate limit thresholds for a particular consumer has been met."""
sleep_start: float = 1.0
"""The ending decimal value for the time to be slept in the event
rate limit thresholds for a particular consumer has been met."""
sleep_end: float = 10.0
class RateLimitObject(TypedDict):
"""The number of times this method was called by the consumer."""
num_times_called: int
"""The monotonic time representing when this particular cache
entry was last reset."""
last_reset: float
RL_CACHE: dict[str, RateLimitObject] = dict()
class RateLimit:
def cache_key(self, method_name: str, ip: str) -> str:
"""Generate a unique key per endpoint/consumer"""
return f'{method_name}{RateLimitConfig.separator}{ip}'
def rate_limit_exceeded(self, method_name: str, ip: str) -> bool:
"""Return a boolean indicating if the total number of calls
per unique endpoint/consumer has been reached."""
key = self.cache_key(method_name, ip)
try:
now: float = monotonic()
if RateLimitConfig.max_period - (now - RL_CACHE[key]['last_reset']) <= 0:
# time window elapsed, so time to reset
RL_CACHE[key]['num_times_called'] = 0
RL_CACHE[key]['last_reset'] = now
# always increment
RL_CACHE[key]['num_times_called'] += 1
return RL_CACHE[key]['num_times_called'] > RateLimitConfig.max_calls
except KeyError:
pass
return False
async def add(self, method_name: str, origin: ConnectionOrigin) -> str | None:
"""Add an entry to the cache. Returns the IP address of
origin of the request if it has been cached, returns None otherwise"""
try:
if (
origin.is_ha_connection or origin.is_unix_family or
origin.rem_addr is None or origin.rem_port is None
):
return None
else:
key = self.cache_key(method_name, origin.rem_addr)
if key not in RL_CACHE:
RL_CACHE[key] = RateLimitObject(num_times_called=0, last_reset=monotonic())
return origin.rem_addr
except AttributeError:
# origin is NoneType
return None
async def cache_pop(self, method_name: str, ip: str) -> None:
"""Pop (remove) an entry from the cache."""
RL_CACHE.pop(self.cache_key(method_name, ip), None)
async def cache_clear(self) -> None:
"""Clear all entries from the cache."""
RL_CACHE.clear()
async def random_sleep(self) -> None:
"""Sleep a random amount of seconds."""
await sleep(round(uniform(RateLimitConfig.sleep_start, RateLimitConfig.sleep_end), 2))
async def cache_get(self) -> RL_CACHE:
"""Return the global cache."""
return RL_CACHE
@property
def max_entries_reached(self) -> bool:
"""Return a boolean indicating if the total number of entries
in the global cache has reached `self.max_cache_entries`."""
return len(RL_CACHE) == RateLimitConfig.max_cache_entries
RateLimitCache = RateLimit()
| 4,041 | Python | .py | 87 | 38.310345 | 95 | 0.650648 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,273 | ipa.py | truenas_middleware/src/middlewared/middlewared/utils/directoryservices/ipa.py | import os
from configparser import RawConfigParser
from io import StringIO
from tempfile import NamedTemporaryFile
from .ipa_constants import IPAPath
def generate_ipa_default_config(
host: str,
basedn: str,
domain: str,
realm: str,
server: str
) -> bytes:
"""
Return bytes of freeipa configuration file.
IPA-related tools / ipa command relies on configuration file
generated via python's RawConfigParser.
For meaning of options see man (5) default.conf
sample config:
```
[global]
basedn = dc=walkerdom,dc=test
realm = WALKERDOM.TEST
domain = walkerdom.test
server = ipa.walkerdom.test
host = truenas.walkerdom.test
xmlrpc_uri = https://ipa.walkerdom.test/ipa/xml
enable_ra = True
```
"""
config = RawConfigParser()
config.add_section('global')
config.set('global', 'host', host)
config.set('global', 'basedn', basedn)
config.set('global', 'realm', realm)
config.set('global', 'domain', domain)
config.set('global', 'server', server)
config.set('global', 'xmlrpc_uri', f'https://{server}/ipa/xml')
config.set('global', 'enable_ra', 'False')
with StringIO() as buf:
config.write(buf)
buf.seek(0)
return buf.read().encode()
def _write_ipa_file(ipa_path: IPAPath, data: bytes) -> None:
with NamedTemporaryFile(dir=IPAPath.IPADIR.path, delete=False) as f:
f.write(data)
f.flush()
os.rename(f.name, ipa_path.path)
os.fchmod(f.fileno(), ipa_path.perm)
if not os.path.exists(ipa_path.path):
raise RuntimeError(f'{ipa_path.path}: failed to create file')
return ipa_path.path
def write_ipa_default_config(
host: str,
basedn: str,
domain: str,
realm: str,
server: str
) -> None:
"""
Write the freeipa default.conf file based on the specified arguments
"""
config = generate_ipa_default_config(host, basedn, domain, realm, server)
return _write_ipa_file(IPAPath.DEFAULTCONF, config)
def write_ipa_cacert(cacert_bytes):
return _write_ipa_file(IPAPath.CACERT, cacert_bytes)
| 2,137 | Python | .py | 65 | 27.630769 | 77 | 0.674611 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,274 | constants.py | truenas_middleware/src/middlewared/middlewared/utils/directoryservices/constants.py | import enum
class DSStatus(enum.Enum):
DISABLED = enum.auto()
FAULTED = enum.auto()
LEAVING = enum.auto()
JOINING = enum.auto()
HEALTHY = enum.auto()
class DSType(enum.Enum):
AD = 'ACTIVEDIRECTORY'
IPA = 'IPA'
LDAP = 'LDAP'
@property
def etc_files(self):
match self:
case DSType.AD:
return ('pam', 'nss', 'smb', 'kerberos')
case DSType.IPA:
return ('ldap', 'ipa', 'pam', 'nss', 'smb', 'kerberos')
case DSType.LDAP:
return ('ldap', 'pam', 'nss', 'kerberos')
class SASL_Wrapping(enum.Enum):
PLAIN = 'PLAIN'
SIGN = 'SIGN'
SEAL = 'SEAL'
class SSL(enum.Enum):
NOSSL = 'OFF'
USESSL = 'ON'
USESTARTTLS = 'START_TLS'
class NSS_Info(enum.Enum):
SFU = ('SFU', (DSType.AD,))
SFU20 = ('SFU20', (DSType.AD,))
RFC2307 = ('RFC2307', (DSType.AD, DSType.LDAP))
RFC2307BIS = ('RFC2307BIS', (DSType.LDAP, DSType.IPA))
TEMPLATE = ('TEMPLATE', (DSType.AD,))
@property
def nss_type(self):
return self.value[0]
@property
def valid_services(self):
return self.value[1]
class DomainJoinResponse(enum.Enum):
PERFORMED_JOIN = 'PERFORMED_JOIN'
ALREADY_JOINED = 'ALREADY_JOINED'
| 1,281 | Python | .py | 43 | 23.418605 | 71 | 0.587899 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,275 | ldap_utils.py | truenas_middleware/src/middlewared/middlewared/utils/directoryservices/ldap_utils.py | from urllib.parse import urlparse
def hostnames_to_uris(hostname_list: list, use_ldaps: bool) -> list:
scheme = 'ldaps' if use_ldaps else 'ldap'
out = []
for host in set(hostname_list):
parsed = urlparse(f'{scheme}://{host}')
try:
port = parsed.port
host = parsed.hostname
except ValueError:
port = None
if port is None:
port = 636 if use_ldaps else 389
out.append(f'{scheme}://{host}:{port}')
return out
| 515 | Python | .py | 15 | 26 | 68 | 0.582996 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,276 | krb5.py | truenas_middleware/src/middlewared/middlewared/utils/directoryservices/krb5.py | # This is a collection of utilities related to kerberos tickets
# and keytabs.
#
# Tests that do not require access to an actual KDC are provided
# in src/middlewared/middlewared/pytest/unit/utils/test_krb5.py
#
# Tests that require access to a KDC are provided as part of API
# test suite.
import errno
import gssapi
import os
import subprocess
import time
from .krb5_constants import krb_tkt_flag, krb5ccache, KRB_ETYPE, KRB_Keytab
from middlewared.service_exception import CallError
from middlewared.utils import filter_list
from tempfile import NamedTemporaryFile
from typing import Optional
# See lib/krb5/keytab/kt_file.c in MIT kerberos source
KRB5_KT_VNO = b'\x05\x02' # KRB v5 keytab version 2, (last changed in 2009)
# The following schemas are used for validation of klist / ktutil_list output
KLIST_ENTRY_SCHEMA = {
'type': 'object',
'properties': {
'issued': {'type': 'integer'},
'expires': {'type': 'integer'},
'renew_until': {'type': 'integer'},
'client': {'type': 'string'},
'server': {'type': 'string'},
'etype': {'type': 'string'},
'flags': {
'type': 'array',
'items': {
'type': 'string',
'enum': [k.name for k in krb_tkt_flag],
'uniqueItems': True
}
}
},
'required': [
'issued', 'expires', 'renew_until',
'client', 'server', 'etype', 'flags'
],
'additionalProperties': False
}
KLIST_OUTPUT_SCHEMA = {
'type': 'object',
'properties': {
'default_principal': {'type': 'string'},
'ticket_cache': {
'type': 'object',
'properties': {
'type': {'type': 'string'},
'name': {'type': 'string'}
},
'required': ['type', 'name']
},
'tickets': {
'type': 'array',
'items': KLIST_ENTRY_SCHEMA,
'uniqueItems': True
},
},
'required': ['default_principal', 'ticket_cache', 'tickets']
}
KTUTIL_LIST_ENTRY_SCHEMA = {
'type': 'object',
'properties': {
'slot': {'type': 'integer'},
'kvno': {'type': 'integer'},
'principal': {'type': 'string'},
'etype': {
'type': 'string',
'enum': [k.value for k in KRB_ETYPE],
'uniqueItems': True
},
'etype_deprecated': {'type': 'boolean'},
'date': {'type': 'integer'},
},
'required': [
'slot', 'kvno', 'etype', 'etype_deprecated', 'date'
],
'additionalProperties': False
}
KTUTIL_LIST_OUTPUT_SCHEMA = {
'type': 'array',
'items': KTUTIL_LIST_ENTRY_SCHEMA,
'uniqueItems': True
}
def __tmp_krb5_keytab() -> str:
"""
Create a temporary keytab file with appropriate header
"""
with NamedTemporaryFile(delete=False) as tmpfile:
tmpfile.write(KRB5_KT_VNO)
tmpfile.flush()
tmpfile.close()
return tmpfile.name
def parse_klist_output(klistbuf: str) -> list:
"""
This is an internal method that parses the output of `klist -ef`
"""
tickets = klistbuf.splitlines()
ticket_cache = None
default_principal = None
tlen = len(tickets)
parsed_klist = []
for idx, e in enumerate(tickets):
if e.startswith('Ticket cache'):
cache_type, cache_name = e.strip('Ticket cache: ').split(':', 1)
ticket_cache = {
'type': cache_type,
'name': cache_name.strip()
}
if e.startswith('Default'):
default_principal = (e.split(':')[1]).strip()
continue
if e and e[0].isdigit():
d = e.split(" ")
issued = int(time.mktime(time.strptime(d[0], "%m/%d/%y %H:%M:%S")))
expires = int(time.mktime(time.strptime(d[1], "%m/%d/%y %H:%M:%S")))
client = default_principal
server = d[2]
renew_until = 0
flags = ''
etype = None
for i in range(idx + 1, idx + 3):
if i >= tlen:
break
if tickets[i][0].isdigit():
break
if tickets[i].startswith("\tEtype"):
etype = tickets[i].strip()
break
if tickets[i].startswith("\trenew"):
ts, flags = tickets[i].split(",")
renew_until = int(time.mktime(time.strptime(
ts.strip('\trenew until '), "%m/%d/%y %H:%M:%S"
)))
flags = flags.split("Flags: ")[1]
continue
extra = tickets[i].split(", ", 1)
flags = extra[0][7:].strip()
etype = extra[1].strip()
parsed_klist.append({
'issued': issued,
'expires': expires,
'renew_until': renew_until,
'client': client,
'server': server,
'etype': etype,
'flags': [krb_tkt_flag(f).name for f in flags],
})
return {
'default_principal': default_principal,
'ticket_cache': ticket_cache,
'tickets': parsed_klist,
}
def klist_impl(ccache_path: str) -> list:
kl = subprocess.run(['klist', '-ef', ccache_path], capture_output=True)
return parse_klist_output(kl.stdout.decode())
def gss_acquire_cred_user(
username: str,
password: str,
ccache_path: str | None = None,
lifetime: int | None = None
) -> gssapi.Credentials:
"""
Acquire GSSAPI credentials based on provided username + password combination
This relies on krb5.conf being properly configured for the kerberos realm.
If `ccache_path` is specified then the credentials are also written to the
specified ccache.
`lifetime` (seconds) may be used to override the defaults in krb5.conf.
Returns gssapi.Credentials
Raises:
gssapi.exceptions.MissingCredentialsError -- may be converted to KRBError
gssapi.exceptions.BadNameError -- user supplied invalid username
"""
gss_name = gssapi.raw.import_name(username.encode(), gssapi.NameType.user)
cr = gssapi.raw.acquire_cred_with_password(
gss_name, password.encode(), lifetime=lifetime
)
if ccache_path is not None:
gssapi.raw.store_cred_into(
{'ccache': ccache_path},
cr.creds,
usage='initiate',
mech=gssapi.raw.MechType.kerberos,
set_default=True, overwrite=True
)
return gssapi.Credentials(cr.creds)
def gss_acquire_cred_principal(
principal_name: str,
ccache_path: str | None = None,
lifetime: int | None = None,
) -> gssapi.Credentials:
"""
Acquire GSSAPI credentials based on provided specified kerberos principal
name. This relies on krb5.conf being properly configured for the kerberos realm,
/etc/krb5.keytab existing and it having an entry that matches the princpal name.
If `ccache_path` is specified then the credentials are also written to the
specified ccache.
`lifetime` (seconds) may be used to override the defaults in krb5.conf.
Returns gssapi.Credentials
Raises:
gssapi.exceptions.MissingCredentialsError -- may be converted to KRBError
gssapi.exceptions.BadNameError -- user supplied invalid kerberos principal name
"""
gss_name = gssapi.Name(principal_name, gssapi.NameType.kerberos_principal)
store = {'client_keytab': KRB_Keytab.SYSTEM.value}
if ccache_path is not None:
store['ccache'] = ccache_path
cr = gssapi.Credentials(
name=gss_name,
store=store,
usage='initiate',
lifetime=lifetime,
)
if ccache_path is not None:
cr.store(set_default=True, overwrite=True)
return cr
def gss_get_current_cred(
ccache_path: str,
raise_error: Optional[bool] = True
) -> gssapi.Credentials | None:
"""
Use gssapi library to inpsect the ticket in the specified ccache
Returns gssapi.Credentials and optionally (if raise_error is False)
None.
"""
try:
cred = gssapi.Credentials(store={'ccache': ccache_path}, usage='initiate')
except gssapi.exceptions.MissingCredentialsError:
if not raise_error:
return None
raise CallError(f'{ccache_path}: Credentials cache does not exist', errno.ENOENT)
try:
cred.inquire()
except gssapi.exceptions.InvalidCredentialsError as e:
if not raise_error:
return None
raise CallError(str(e))
except gssapi.exceptions.ExpiredCredentialsError:
if not raise_error:
return None
raise CallError('Kerberos ticket is expired', errno.ENOKEY)
except Exception as e:
if not raise_error:
return None
raise CallError(str(e))
return cred
def gss_dump_cred(cred: gssapi.Credentials) -> dict:
if not isinstance(cred, gssapi.Credentials):
raise TypeError(f'{type(cred)}: not gssapi.Credentials type')
match cred.name.name_type:
case gssapi.NameType.user:
name_type_str = 'USER'
case gssapi.NameType.kerberos_principal:
name_type_str = 'KERBEROS_PRINCIPAL'
case _:
# We only expect to have USER and KERBEROS principals
# we'll dump the OID
name_type_str = f'UNEXPECTED NAME TYPE: {cred.name.name_type}'
return {
'name': str(cred.name),
'name_type': name_type_str,
'name_type_oid': cred.name.name_type.dotted_form,
'lifetime': cred.lifetime,
}
def kerberos_ticket(fn):
""" Decorator to raise a CallError if no ccache or if ticket in ccache is expired """
def check_ticket(*args, **kwargs):
gss_get_current_cred(krb5ccache.SYSTEM.value)
return fn(*args, **kwargs)
return check_ticket
def parse_keytab(keytab_output: list) -> list:
"""
Internal parser for output of `klist -ket` for a kerberos keytab
"""
keytab_entries = []
for idx, line in enumerate(keytab_output):
fields = line.split()
keytab_entries.append({
'slot': idx + 1,
'kvno': int(fields[0]),
'principal': fields[3],
'etype': fields[4][1:-1].strip('DEPRECATED:'),
'etype_deprecated': fields[4][1:].startswith('DEPRECATED'),
'date': int(time.mktime(time.strptime(fields[1], '%m/%d/%y'))),
})
return keytab_entries
def ktutil_list_impl(keytab_file: str) -> list:
"""
Thin wrapper around `klist -ket` that returns keytab entries as a list
`keytab_file` - path to kerberos keytab
"""
kt_output = subprocess.run(
['klist', '-ket', keytab_file],
capture_output=True
)
kt_lines = kt_output.stdout.decode().splitlines()
if len(kt_lines) < 4:
# we only have header
return []
return parse_keytab(kt_lines[3:])
def keytab_services(keytab_file: str) -> list:
"""
Return list of service names provided by keytab
`keytab_file` - path to kerberos keytab
"""
keytab_data = filter_list(
ktutil_list_impl(keytab_file),
[['principal', 'rin', '/']]
)
services = []
for entry in keytab_data:
services.append(entry['principal'].split('/')[0])
return services
def extract_from_keytab(
keytab_file: str,
filters: list
) -> bytes:
"""
Extract keytab entries matching filter and return as bytes
`keytab_file` - path to kerberos keytab
`filters` - query-filters
"""
kt_list = ktutil_list_impl(keytab_file)
to_keep = filter_list(kt_list, filters)
to_remove = [entry['slot'] for entry in kt_list if entry not in to_keep]
if len(kt_list) == len(to_remove):
# Let caller know that keytab would be empty. If we were to follow
# through with this, caller would receive # keytab containing only
# `b'\x05\x02' (KRB5_KT_VNO)`
return None
tmp_keytab = __tmp_krb5_keytab()
rkt = f'rkt {keytab_file}'
wkt = f'wkt {tmp_keytab}'
delents = "\n".join(f'delent {slot}' for slot in reversed(to_remove))
ktutil_op = subprocess.run(
['ktutil'],
input=f'{rkt}\n{delents}\n{wkt}\n'.encode(),
check=False, capture_output=True
)
# ktutil does not set returncode for malformed
# commands or commands that otherwise fail
if ktutil_op.returncode or ktutil_op.stderr:
os.remove(tmp_keytab)
raise RuntimeError(ktutil_op.stderr.decode())
if len(ktutil_list_impl(tmp_keytab)) != len(to_keep):
raise RuntimeError('Temporary keytab did not contain correct number of entries')
with open(tmp_keytab, 'rb') as f:
kt_bytes = f.read()
os.remove(tmp_keytab)
return kt_bytes
| 12,912 | Python | .py | 355 | 28.388732 | 89 | 0.601491 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,277 | ad_constants.py | truenas_middleware/src/middlewared/middlewared/utils/directoryservices/ad_constants.py | import enum
MAX_SERVER_TIME_OFFSET = 180
MAX_KERBEROS_START_TRIES = 60
DEFAULT_SITE_NAME = 'Default-First-Site-Name'
MACHINE_ACCOUNT_KT_NAME = 'AD_MACHINE_ACCOUNT'
class ADUserAccountControl(enum.IntFlag):
"""
see MS-ADTS section 2.2.16
This is currently used to parse machine account UAC flags
In the future it will also be used to parse user accounts
when authorizing user-based AD api keys.
"""
ACCOUNTDISABLE = 0x0002 # the user account is disabled
HOMEDIR_REQUIRED = 0x0008 # home folder is required
LOCKOUT = 0x0008 # account is temporarily locked out
PASSWD_NOTREQD = 0x0010 # password-length policy does not apply to user
PASSWD_CANT_CHANGE = 0x0040 # user can't change password
ENCRYPTED_TEXT_PWD_ALLOWED = 0x0080 # cleartext password is to be persisted
TEMP_DUPLICATE_ACCOUNT = 0x0100 # account for users whose primary account is in another domain
NORMAL_ACCOUNT = 0x0200 # default account type that represents a typical user
INTERDOMAIN_TRUST_ACCOUNT = 0x0800 # permit to trust account for a system domain that trusts other domains
WORKSTATION_TRUST_ACCOUNT = 0x1000 # computer account (domain member)
SERVER_TRUST_ACCOUNT = 0x2000 # domain controller computer account
DONT_EXPIRE_PASSWORD = 0x10000 # password should never expire
SMARTCARD_REQUIRED = 0x40000 # user must logon by smartcard
TRUSTED_FOR_DELEGATION = 0x80000 # used by kerberos protocol
NOT_DELEGATED = 0x100000 # security context of user isn't delegated to a service
USE_DES_KEY_ONLY = 0x200000 # used by kerberos protocol
DONT_REQ_PREAUTH = 0x400000 # used by kerberos protocol
PASSWORD_EXPIRED = 0x800000 # user password has expired
TRUSTED_TO_AUTH_FOR_DELEGATION = 0x1000000 # used by kerberos protocol
NO_AUTH_DATA_REQUIRED = 0x2000000 # used by kerberos protocol
PARTIAL_SECRETS_ACCOUNT = 0x4000000 # account is a read-only domain controller computer account
@classmethod
def parse_flags(cls, flags_in: int) -> list:
flags_list = []
for flag in cls:
if flags_in & int(flag):
flags_list.append(flag.name)
return flags_list
class ADEncryptionTypes(enum.IntFlag):
"""
See MS-KILE section 2.2.7
This is a decoder ring for the msDS-SupportedEncryptionTypes attribute
for the AD computer account.
"""
DES_CBC_CRC = 0x01
DES_CBC_MD5 = 0x02
ARCFOUR_HMAC = 0x04
AES128_CTS_HMAC_SHA1_96 = 0x08
AES256_CTS_HMAC_SHA1_96 = 0x10
AES256_CTS_HMAC_SHA1_96_SK = 0x20 # enforce AES session keys when legacy ciphers in use.
@classmethod
def parse_flags(cls, flags_in: int) -> list:
if flags_in == 0:
# It is technically possible for sysadmin to edit the supported
# enctypes for our computer object and set SupportedEncTypes to `0`.
# This is undefined, but according to some MS documentation defaults
# to RC4_HMAC. This behavior has been observed in user bug ticket.
return [cls.ARCFOUR_HMAC.name]
flags_list = []
for flag in cls:
if flags_in & int(flag):
flags_list.append(flag.name)
return flags_list
| 3,244 | Python | .py | 65 | 43.461538 | 111 | 0.705845 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,278 | krb5_constants.py | truenas_middleware/src/middlewared/middlewared/utils/directoryservices/krb5_constants.py | import enum
from middlewared.utils import MIDDLEWARE_RUN_DIR
KRB_TKT_CHECK_INTERVAL = 1800
PERSISTENT_KEYRING_PREFIX = 'KEYRING:persistent:'
class KRB_Keytab(enum.Enum):
SYSTEM = '/etc/krb5.keytab'
class krb5ccache(enum.Enum):
SYSTEM = f'{PERSISTENT_KEYRING_PREFIX}0'
TEMP = f'{MIDDLEWARE_RUN_DIR}/krb5cc_middleware_temp'
USER = PERSISTENT_KEYRING_PREFIX # middleware appends UID number to this
class krb_tkt_flag(enum.Enum):
FORWARDABLE = 'F'
FORWARDED = 'f'
PROXIABLE = 'P'
PROXY = 'p'
POSTDATEABLE = 'D'
POSTDATED = 'd'
RENEWABLE = 'R'
INITIAL = 'I'
INVALID = 'i'
HARDWARE_AUTHENTICATED = 'H'
PREAUTHENTICATED = 'A'
TRANSIT_POLICY_CHECKED = 'T'
OKAY_AS_DELEGATE = 'O'
ANONYMOUS = 'a'
class KRB_AppDefaults(enum.Enum):
FORWARDABLE = ('forwardable', 'boolean')
PROXIABLE = ('proxiable', 'boolean')
NO_ADDRESSES = ('no-addresses', 'boolean')
TICKET_LIFETIME = ('ticket_lifetime', 'time')
RENEW_LIFETIME = ('renew_lifetime', 'time')
ENCRYPT = ('encrypt', 'boolean')
FORWARD = ('forward', 'boolean')
def __str__(self):
return self.value[0]
def parm(self):
return self.value[0]
class KRB_LibDefaults(enum.Enum):
DEFAULT_REALM = ('default_realm', 'realm')
CANONICALIZE = ('canonicalize', 'boolean')
CLOCKSKEW = ('clockskew', 'time')
DEFAULT_CCACHE_NAME = ('default_ccache_name', 'ccname')
DEFAULT_TGS_ENCTYPES = ('default_tgs_enctypes', 'etypes')
DEFAULT_TKT_ENCTYPES = ('default_tkt_enctypes', 'etypes')
DNS_CANONICALIZE_HOSTNAME = ('dns_canonicalize_hostname', 'string')
DNS_LOOKUP_KDC = ('dns_lookup_kdc', 'boolean')
DNS_LOOKUP_REALM = ('dns_lookup_realm', 'boolean')
DNS_URI_LOOKUP = ('dns_uri_lookup', 'boolean')
KDC_TIMESYNC = ('kdc_timesync', 'boolean')
MAX_RETRIES = ('max_retries', 'number')
TICKET_LIFETIME = ('ticket_lifetime', 'time')
RENEW_LIFETIME = ('renew_lifetime', 'time')
FORWARDABLE = ('forwardable', 'boolean')
QUALIFY_SHORTNAME = ('qualify_shortname', 'string')
PROXIABLE = ('proxiable', 'boolean')
VERIFY_AP_REQ_NOFAIL = ('verify_ap_req_nofail', 'boolean')
PERMITTED_ENCTYPES = ('permitted_enctypes', 'etypes')
NOADDRESSES = ('noaddresses', 'boolan')
EXTRA_ADDRESSES = ('extra_addresses', 'address')
RDNS = ('rdns', 'boolean')
UDP_PREFERENCE_LIMIT = ('udp_preference_limit', 'number')
def __str__(self):
return self.value[0]
def parm(self):
return self.value[0]
class KRB_RealmProperty(enum.Enum):
ADMIN_SERVER = ('admin_server', 'string')
KDC = ('kdc', 'string')
KPASSWD_SERVER = ('kpasswd_server', 'string')
PRIMARY_KDC = ('primary_kdc', 'string')
class KRB_ETYPE(enum.Enum):
DES_CBC_CRC = 'des-cbc-crc' # weak
DES_CBC_MD5 = 'des-cbc-md5' # weak
DES3_CBC_SHA1 = 'des3-cbc-sha1' # deprecated
ARCFOUR_HMAC = 'arcfour-hmac' # weak
ARCFOUR_HMAC_MD5 = 'arcfour-hmac-md5' # deprecated
AES128_CTS_HMAC_SHA1_96 = 'aes128-cts-hmac-sha1-96'
AES256_CTS_HMAC_SHA1_96 = 'aes256-cts-hmac-sha1-96'
AES256_CTS_HMAC_SHA256_128 = 'aes128-cts-hmac-sha256-128'
AES256_CTS_HMAC_SHA384_192 = 'aes256-cts-hmac-sha384-192'
CAMELLIA128_CTS_CMAC = 'camellia128-cts-cmac'
CAMELLIA256_CTS_CMAC = 'camellia256-cts-cmac'
AES = 'aes' # Entire AES family
CAMELLIA = 'camellia' # Entire Camellia family
| 3,427 | Python | .py | 84 | 35.892857 | 77 | 0.66145 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,279 | ipa_constants.py | truenas_middleware/src/middlewared/middlewared/utils/directoryservices/ipa_constants.py | import enum
from dataclasses import dataclass
class IpaConfigName(enum.StrEnum):
""" Names for IPA-related entries we create in our databases """
IPA_CACERT = 'IPA_DOMAIN_CACERT'
IPA_HOST_KEYTAB = 'IPA_MACHINE_ACCOUNT'
IPA_SMB_KEYTAB = 'IPA_SMB_KEYTAB'
IPA_NFS_KEYTAB = 'IPA_NFS_KEYTAB'
class IPAPath(enum.Enum):
""" IPA related paths and their permissions """
IPADIR = ('/etc/ipa', 0o755)
DEFAULTCONF = ('/etc/ipa/default.conf', 0o644)
CACERT = ('/etc/ipa/ca.crt', 0o644)
@property
def path(self):
return self.value[0]
@property
def perm(self):
return self.value[1]
class IPACmd(enum.Enum):
""" Scripts and commands that are relevant to an IPA domain """
IPACTL = '/usr/local/libexec/ipa_ctl.py'
IPA = '/bin/ipa'
@dataclass(frozen=True)
class IPASmbDomain:
netbios_name: str
domain_sid: str
domain_name: str
range_id_min: int
range_id_max: int
| 955 | Python | .py | 30 | 27.166667 | 68 | 0.67541 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,280 | health.py | truenas_middleware/src/middlewared/middlewared/utils/directoryservices/health.py | import enum
from .constants import DSStatus, DSType
from threading import Lock
class KRB5HealthCheckFailReason(enum.IntEnum):
KRB5_NO_CONFIG = enum.auto()
KRB5_CONFIG_PERM = enum.auto()
KRB5_NO_CCACHE = enum.auto()
KRB5_CCACHE_PERM = enum.auto()
KRB5_NO_KEYTAB = enum.auto()
KRB5_KEYTAB_PERM = enum.auto()
KRB5_TKT_EXPIRED = enum.auto()
class IPAHealthCheckFailReason(enum.IntEnum):
IPA_NO_CONFIG = enum.auto()
IPA_CONFIG_PERM = enum.auto()
IPA_NO_CACERT = enum.auto()
IPA_CACERT_PERM = enum.auto()
NTP_EXCESSIVE_SLEW = enum.auto()
LDAP_BIND_FAILED = enum.auto()
SSSD_STOPPED = enum.auto()
class ADHealthCheckFailReason(enum.IntEnum):
AD_SECRET_ENTRY_MISSING = enum.auto()
AD_SECRET_FILE_MISSING = enum.auto()
AD_SECRET_INVALID = enum.auto()
AD_KEYTAB_INVALID = enum.auto()
AD_NETLOGON_FAILURE = enum.auto()
AD_WBCLIENT_FAILURE = enum.auto()
NTP_EXCESSIVE_SLEW = enum.auto()
WINBIND_STOPPED = enum.auto()
class LDAPHealthCheckFailReason(enum.IntEnum):
LDAP_BIND_FAILED = enum.auto()
SSSD_STOPPED = enum.auto()
class DirectoryServiceHealthError(Exception):
reasons = None
def __init__(self, fail_reason, errmsg):
self.reason = self.reasons(fail_reason)
self.errmsg = errmsg
def __str__(self):
return self.errmsg
class KRB5HealthError(DirectoryServiceHealthError):
reasons = KRB5HealthCheckFailReason
class IPAHealthError(DirectoryServiceHealthError):
reasons = IPAHealthCheckFailReason
class ADHealthError(DirectoryServiceHealthError):
reasons = ADHealthCheckFailReason
class LDAPHealthError(DirectoryServiceHealthError):
reasons = ADHealthCheckFailReason
class DirectoryServiceHealth:
__slots__ = ('_dstype', '_status', '_status_msg', '_initialized', '_lock')
def __init__(self):
self._dstype = None
self._status = None
self._status_msg = None
self._initialized = False
self._lock = Lock()
@property
def initialized(self) -> bool:
return self._initialized
@property
def dstype(self) -> DSType | None:
return self._dstype
@property
def status(self) -> DSStatus | None:
return self._status
@property
def status_msg(self):
return self._status_msg
def update(self, dstype_in, status_in, status_msg):
dstype = DSType(dstype_in) if dstype_in is not None else None
status = DSStatus(status_in) if status_in is not None else None
if status_msg is not None and not isinstance(status_msg, str):
raise ValueError(f'{type(status_msg)}: status_msg must be string or None type')
with self._lock:
self._initialized = True
self._dstype = dstype
self._status = status
self._status_msg = status_msg
def dump(self) -> dict:
with self._lock:
return {
'type': self.dstype.value if self.dstype else None,
'status': self.status.name if self.status else None,
'status_msg': self.status_msg
}
DSHealthObj = DirectoryServiceHealth()
| 3,180 | Python | .py | 84 | 31.22619 | 91 | 0.67189 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,281 | ipactl_constants.py | truenas_middleware/src/middlewared/middlewared/utils/directoryservices/ipactl_constants.py | # Shared constants between ipa_ctl script and middleware utils
import enum
class IpaOperation(enum.Enum):
JOIN = enum.auto()
LEAVE = enum.auto()
SET_NFS_PRINCIPAL = enum.auto()
DEL_NFS_PRINCIPAL = enum.auto()
SET_SMB_PRINCIPAL = enum.auto()
DEL_SMB_PRINCIPAL = enum.auto()
SMB_DOMAIN_INFO = enum.auto()
GET_CACERT_FROM_LDAP = enum.auto()
class ExitCode(enum.IntEnum):
SUCCESS = 0
GENERIC = 1
USAGE = 2
KERBEROS = 3
FREEIPA_CONFIG = 4
JSON_ERROR = 5
NO_SMB_SUPPORT = 6
| 533 | Python | .py | 19 | 23.631579 | 62 | 0.671906 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,282 | ad.py | truenas_middleware/src/middlewared/middlewared/utils/directoryservices/ad.py | import errno
import json
import subprocess
from .ad_constants import (
ADUserAccountControl,
ADEncryptionTypes
)
from middlewared.plugins.smb_.constants import SMBCmd
from middlewared.service_exception import CallError
def _normalize_dict(dict_in) -> None:
"""
normalizes dictionary keys to be lower-case and replaces spaces with
underscores
changes keys in-place
"""
for key in list(dict_in.keys()):
value = dict_in.pop(key)
if isinstance(value, dict):
_normalize_dict(value)
dict_in[key.replace(' ', '_').lower()] = value
return dict_in
def get_domain_info(domain: str) -> dict:
"""
Use libads to query information about the specified domain.
Returned dictionary contains following info:
`ldap_server` IP address of current LDAP server to which TrueNAS is connected.
`ldap_server_name` DNS name of LDAP server to which TrueNAS is connected
`realm` Kerberos realm
`ldap_port`
`server_time` timestamp.
`kdc_server` Kerberos KDC to which TrueNAS is connected
`server_time_offset` current time offset from DC.
`last_machine_account_password_change`. timestamp
"""
netads = subprocess.run([
SMBCmd.NET.value,
'-S', domain,
'--json',
'--option', f'realm={domain}',
'ads', 'info'
], check=False, capture_output=True)
if netads.returncode == 0:
data = json.loads(netads.stdout.decode())
return _normalize_dict(data)
if (err_msg := netads.stderr.decode().strip()) == "Didn't find the ldap server!":
raise CallError(
'Failed to discover Active Directory Domain Controller '
'for domain. This may indicate a DNS misconfiguration.',
errno.ENOENT
)
raise CallError(err_msg)
def lookup_dc(domain_name: str) -> dict:
lookup = subprocess.run([
SMBCmd.NET.value,
'-S', domain_name,
'--json',
'--realm', domain_name,
'ads', 'lookup'
], check=False, capture_output=True)
if lookup.returncode != 0:
raise CallError(
'Failed to look up Domain Controller information: '
f'{lookup.stderr.decode().strip()}'
)
data = json.loads(lookup.stdout.decode())
return _normalize_dict(data)
def get_machine_account_status(target_dc: str = None) -> dict:
def parse_result(data, out):
if ':' not in data:
return
key, value = data.split(':', 1)
if key not in out:
# This is not a line we're interested in
return
if type(out[key]) is list:
out[key].append(value.strip())
elif out[key] == -1:
out[key] = int(value.strip())
else:
out[key] = value.strip()
return
cmd = [SMBCmd.NET.value, '-P', 'ads', 'status']
if target_dc:
cmd.extend(['-S', target_dc])
results = subprocess.run(cmd, capture_output=True)
if results.returncode != 0:
raise CallError(
'Failed to retrieve machine account status: '
f'{results.stderr.decode().strip()}'
)
output = {
'userAccountControl': -1,
'objectSid': None,
'sAMAccountName': None,
'dNSHostName': None,
'servicePrincipalName': [],
'msDS-SupportedEncryptionTypes': -1
}
for line in results.stdout.decode().splitlines():
parse_result(line, output)
output['userAccountControl'] = ADUserAccountControl.parse_flags(output['userAccountControl'])
output['msDS-SupportedEncryptionTypes'] = ADEncryptionTypes.parse_flags(output['msDS-SupportedEncryptionTypes'])
return output
| 3,720 | Python | .py | 103 | 28.728155 | 116 | 0.631315 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,283 | krb5_error.py | truenas_middleware/src/middlewared/middlewared/utils/directoryservices/krb5_error.py | from enum import IntEnum
class KRB5ErrCode(IntEnum):
KRB5KDC_ERR_NONE = 0 # No error
KRB5KDC_ERR_NAME_EXP = 1 # Client's entry in database has expired
KRB5KDC_ERR_SERVICE_EXP = 2 # Server's entry in database has expired
KRB5KDC_ERR_BAD_PVNO = 3 # Requested protocol version not supported
KRB5KDC_ERR_C_OLD_MAST_KVNO = 4 # Client's key is encrypted in an old master key
KRB5KDC_ERR_S_OLD_MAST_KVNO = 5 # Server's key is encrypted in an old master key
KRB5KDC_ERR_C_PRINCIPAL_UNKNOWN = 6 # Client not found in Kerberos database
KRB5KDC_ERR_S_PRINCIPAL_UNKNOWN = 7 # Server not found in Kerberos database
KRB5KDC_ERR_PRINCIPAL_NOT_UNIQUE = 8 # Principal has multiple entries in Kerberos database
KRB5KDC_ERR_NULL_KEY = 9 # Client or server has a null key
KRB5KDC_ERR_CANNOT_POSTDATE = 10 # Ticket is ineligible for postdating
KRB5KDC_ERR_NEVER_VALID = 11 # Requested effective lifetime is negative or too short
KRB5KDC_ERR_POLICY = 12 # KDC policy rejects request
KRB5KDC_ERR_BADOPTION = 13 # KDC can't fulfill requested option
KRB5KDC_ERR_ETYPE_NOSUPP = 14 # KDC has no support for encryption type
KRB5KDC_ERR_SUMTYPE_NOSUPP = 15 # KDC has no support for checksum type
KRB5KDC_ERR_PADATA_TYPE_NOSUPP = 16 # KDC has no support for padata type
KRB5KDC_ERR_TRTYPE_NOSUPP = 17 # KDC has no support for transited type
KRB5KDC_ERR_CLIENT_REVOKED = 18 # Clients credentials have been revoked
KRB5KDC_ERR_SERVICE_REVOKED = 19 # Credentials for server have been revoked
KRB5KDC_ERR_TGT_REVOKED = 20 # TGT has been revoked
KRB5KDC_ERR_CLIENT_NOTYET = 21 # Client not yet valid - try again later
KRB5KDC_ERR_SERVICE_NOTYET = 22 # Server not yet valid - try again later
KRB5KDC_ERR_KEY_EXP = 23 # Password has expired
KRB5KDC_ERR_PREAUTH_FAILED = 24 # Preauthentication failed
KRB5KDC_ERR_PREAUTH_REQUIRED = 25 # Additional pre-authentication required
KRB5KDC_ERR_SERVER_NOMATCH = 26 # Requested server and ticket don't match
KRB5KRB_AP_ERR_BAD_INTEGRITY = 31 # Decrypt integrity check failed
KRB5KRB_AP_ERR_TKT_EXPIRED = 32 # Ticket expired
KRB5KRB_AP_ERR_TKT_NYV = 33 # Ticket not yet valid
KRB5KRB_AP_ERR_REPEAT = 34 # Request is a replay
KRB5KRB_AP_ERR_NOT_US = 35 # The ticket isn't for us
KRB5KRB_AP_ERR_BADMATCH = 36 # Ticket/authenticator don't match
KRB5KRB_AP_ERR_SKEW = 37 # Clock skew too great
KRB5KRB_AP_ERR_BADADDR = 38 # Incorrect net address
KRB5KRB_AP_ERR_BADVERSION = 39 # Protocol version mismatch
KRB5KRB_AP_ERR_MSG_TYPE = 40 # Invalid message type
KRB5KRB_AP_ERR_MODIFIED = 41 # Message stream modified
KRB5KRB_AP_ERR_BADORDER = 42 # Message out of order
KRB5KRB_AP_ERR_ILL_CR_TKT = 43 # Illegal cross-realm ticket
KRB5KRB_AP_ERR_BADKEYVER = 44 # Key version is not available
KRB5KRB_AP_ERR_NOKEY = 45 # Service key not available
KRB5KRB_AP_ERR_MUT_FAIL = 46 # Mutual authentication failed
KRB5KRB_AP_ERR_BADDIRECTION = 47 # Incorrect message direction
KRB5KRB_AP_ERR_METHOD = 48 # Alternative authentication method required
KRB5KRB_AP_ERR_BADSEQ = 49 # Incorrect sequence number in message
KRB5KRB_AP_ERR_INAPP_CKSUM = 50 # Inappropriate type of checksum in message
KRB5KRB_AP_PATH_NOT_ACCEPTED = 51 # Policy rejects transited path
KRB5KRB_ERR_RESPONSE_TOO_BIG = 52 # Response too big for UDP, retry with TCP
KRB5KRB_ERR_GENERIC = 60 # Generic error (see e-text)
KRB5KRB_ERR_FIELD_TOOLONG = 61 # Field is too long for this implementation
KRB5_ERR_RCSID = 128 # (RCS Id string for the krb5 error table)
KRB5_LIBOS_BADLOCKFLAG = 129 # Invalid flag for file lock mode
KRB5_LIBOS_CANTREADPWD = 130 # Cannot read password
KRB5_LIBOS_BADPWDMATCH = 131 # Password mismatch
KRB5_LIBOS_PWDINTR = 132 # Password read interrupted
KRB5_PARSE_ILLCHAR = 133 # Illegal character in component name
KRB5_PARSE_MALFORMED = 134 # Malformed representation of principal
KRB5_CONFIG_CANTOPEN = 135 # Can't open/find Kerberos configuration file
KRB5_CONFIG_BADFORMAT = 136 # Improper format of Kerberos configuration file
KRB5_CONFIG_NOTENUFSPACE = 137 # Insufficient space to return complete information
KRB5_BADMSGTYPE = 138 # Invalid message type specified for encoding
KRB5_CC_BADNAME = 139 # Credential cache name malformed
KRB5_CC_UNKNOWN_TYPE = 140 # Unknown credential cache type
KRB5_CC_NOTFOUND = 141 # Matching credential not found
KRB5_CC_END = 142 # End of credential cache reached
KRB5_NO_TKT_SUPPLIED = 143 # Request did not supply a ticket
KRB5KRB_AP_WRONG_PRINC = 144 # Wrong principal in request
KRB5KRB_AP_ERR_TKT_INVALID = 145 # Ticket has invalid flag set
KRB5_PRINC_NOMATCH = 146 # Requested principal and ticket don't match
KRB5_KDCREP_MODIFIED = 147 # KDC reply did not match expectations
KRB5_KDCREP_SKEW = 148 # Clock skew too great in KDC reply
KRB5_IN_TKT_REALM_MISMATCH = 149 # Client/server realm mismatch in initial ticket request
KRB5_PROG_ETYPE_NOSUPP = 150 # Program lacks support for encryption type
KRB5_PROG_KEYTYPE_NOSUPP = 151 # Program lacks support for key type
KRB5_WRONG_ETYPE = 152 # Requested encryption type not used in message
KRB5_PROG_SUMTYPE_NOSUPP = 153 # Program lacks support for checksum type
KRB5_REALM_UNKNOWN = 154 # Cannot find KDC for requested realm
KRB5_SERVICE_UNKNOWN = 155 # Kerberos service unknown
KRB5_KDC_UNREACH = 156 # Cannot contact any KDC for requested realm
KRB5_NO_LOCALNAME = 157 # No local name found for principal name
KRB5_MUTUAL_FAILED = 158 # Mutual authentication failed
KRB5_RC_TYPE_EXISTS = 159 # Replay cache type is already registered
KRB5_RC_MALLOC = 160 # No more memory to allocate (in replay cache code)
KRB5_RC_TYPE_NOTFOUND = 161 # Replay cache type is unknown
KRB5_RC_UNKNOWN = 162 # Generic unknown RC error
KRB5_RC_REPLAY = 163 # Message is a replay
KRB5_RC_IO = 164 # Replay cache I/O operation failed
KRB5_RC_NOIO = 165 # Replay cache type does not support non-volatile storage
KRB5_RC_PARSE = 166 # Replay cache name parse/format error
KRB5_RC_IO_EOF = 167 # End-of-file on replay cache I/O
KRB5_RC_IO_MALLOC = 168 # No more memory to allocate (in replay cache I/O code)
KRB5_RC_IO_PERM = 169 # Permission denied in replay cache code
KRB5_RC_IO_IO = 170 # I/O error in replay cache i/o code
KRB5_RC_IO_UNKNOWN = 171 # Generic unknown RC/IO error
KRB5_RC_IO_SPACE = 172 # Insufficient system space to store replay information
KRB5_TRANS_CANTOPEN = 173 # Can't open/find realm translation file
KRB5_TRANS_BADFORMAT = 174 # Improper format of realm translation file
KRB5_LNAME_CANTOPEN = 175 # Can't open/find lname translation database
KRB5_LNAME_NOTRANS = 176 # No translation available for requested principal
KRB5_LNAME_BADFORMAT = 177 # Improper format of translation database entry
KRB5_CRYPTO_INTERNAL = 178 # Cryptosystem internal error
KRB5_KT_BADNAME = 179 # Key table name malformed
KRB5_KT_UNKNOWN_TYPE = 180 # Unknown Key table type
KRB5_KT_NOTFOUND = 181 # Key table entry not found
KRB5_KT_END = 182 # End of key table reached
KRB5_KT_NOWRITE = 183 # Cannot write to specified key table
KRB5_KT_IOERR = 184 # Error writing to key table
KRB5_NO_TKT_IN_RLM = 185 # Cannot find ticket for requested realm
KRB5DES_BAD_KEYPAR = 186 # DES key has bad parity
KRB5DES_WEAK_KEY = 187 # DES key is a weak key
KRB5_BAD_ENCTYPE = 188 # Bad encryption type
KRB5_BAD_KEYSIZE = 189 # Key size is incompatible with encryption type
KRB5_BAD_MSIZE = 190 # Message size is incompatible with encryption type
KRB5_CC_TYPE_EXISTS = 191 # Credentials cache type is already registered.
KRB5_KT_TYPE_EXISTS = 192 # Key table type is already registered.
KRB5_CC_IO = 193 # Credentials cache I/O operation failed
KRB5_FCC_PERM = 194 # Credentials cache file permissions incorrect
KRB5_FCC_NOFILE = 195 # No credentials cache found
KRB5_FCC_INTERNAL = 196 # Internal credentials cache error
KRB5_CC_WRITE = 197 # Error writing to credentials cache
KRB5_CC_NOMEM = 198 # No more memory to allocate (in credentials cache code)
KRB5_CC_FORMAT = 199 # Bad format in credentials cache
KRB5_INVALID_FLAGS = 200 # Invalid KDC option combination (library internal error) [for dual tgt library calls]
KRB5_NO_2ND_TKT = 201 # Request missing second ticket [for dual tgt library calls]
KRB5_NOCREDS_SUPPLIED = 202 # No credentials supplied to library routine
KRB5_SENDAUTH_BADAUTHVERS = 203 # Bad sendauth version was sent
KRB5_SENDAUTH_BADAPPLVERS = 204 # Bad application version was sent (via sendauth)
KRB5_SENDAUTH_BADRESPONSE = 205 # Bad response (during sendauth exchange)
KRB5_SENDAUTH_REJECTED = 206 # Server rejected authentication (during sendauth exchange)
KRB5_PREAUTH_BAD_TYPE = 207 # Unsupported preauthentication type
KRB5_PREAUTH_NO_KEY = 208 # Required preauthentication key not supplied
KRB5_PREAUTH_FAILED = 209 # Generic preauthentication failure
KRB5_RCACHE_BADVNO = 210 # Unsupported replay cache format version number
KRB5_CCACHE_BADVNO = 211 # Unsupported credentials cache format version number
KRB5_KEYTAB_BADVNO = 212 # Unsupported key table format version number
KRB5_PROG_ATYPE_NOSUPP = 213 # Program lacks support for address type
KRB5_RC_REQUIRED = 214 # Message replay detection requires rcache parameter
KRB5_ERR_BAD_HOSTNAME = 215 # Hostname cannot be canonicalized
KRB5_ERR_HOST_REALM_UNKNOWN = 216 # Cannot determine realm for host
KRB5_SNAME_UNSUPP_NAMETYPE = 217 # Conversion to service principal undefined for name type
KRB5KRB_AP_ERR_V4_REPLY = 218 # Initial Ticket response appears to be Version 4 error
KRB5_REALM_CANT_RESOLVE = 219 # Cannot resolve KDC for requested realm
KRB5_TKT_NOT_FORWARDABLE = 220 # Requesting ticket can't get forwardable tickets
KRB5_FWD_BAD_PRINCIPAL = 221 # Bad principal name while trying to forward credentials
KRB5_GET_IN_TKT_LOOP = 222 # Looping detected inside krb5_get_in_tkt
KRB5_CONFIG_NODEFREALM = 223 # Configuration file does not specify default realm
KRB5_SAM_UNSUPPORTED = 224 # Bad SAM flags in obtain_sam_padata
KRB5_KT_NAME_TOOLONG = 225 # Keytab name too long
KRB5_KT_KVNONOTFOUND = 226 # Key version number for principal in key table is incorrect
KRB5_APPL_EXPIRED = 227 # This application has expired
KRB5_LIB_EXPIRED = 228 # This Krb5 library has expired
KRB5_CHPW_PWDNULL = 229 # New password cannot be zero length
KRB5_CHPW_FAIL = 230 # Password change failed
KRB5_KT_FORMAT = 231 # Bad format in keytab
KRB5_NOPERM_ETYPE = 232 # Encryption type not permitted
KRB5_CONFIG_ETYPE_NOSUPP = 233 # No supported encryption types (config file error?)
KRB5_OBSOLETE_FN = 234 # Program called an obsolete, deleted function
KRB5_EAI_FAIL = 235 # unknown getaddrinfo failure
KRB5_EAI_NODATA = 236 # no data available for host/domain name
KRB5_EAI_NONAME = 237 # host/domain name not found
KRB5_EAI_SERVICE = 238 # service name unknown
KRB5_ERR_NUMERIC_REALM = 239 # Cannot determine realm for numeric host address
class KRB5Error(Exception):
def __init__(
self,
gss_major: int,
gss_minor: int,
errmsg: str
):
"""
KRB5Error exception is a wrapper around generic GSSAPI errors to
provide more explicit guidance to areas of code that are dealing
specifically with kerberos tickets.
gss_major : major error code from GSSAPI exception
gss_minor : minor error code from GSSAPI exception
err_msg : human-readable message from GSSAPI exception (parses the
major and minor codes and is produced by `gen_message()` method in
exception.
"""
self.gss_major_code = gss_major
self.gss_minor_code = gss_minor
self.errmsg = errmsg
self.krb5_code = KRB5ErrCode(gss_minor & 0xFF)
def __str__(self):
return f'[{self.krb5_code.name}] {self.errmsg}'
| 12,352 | Python | .py | 188 | 60.329787 | 116 | 0.726271 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,284 | krb5_conf.py | truenas_middleware/src/middlewared/middlewared/utils/directoryservices/krb5_conf.py | # This is a collection of utilities related to kerberos tickets
# and keytabs.
#
# Tests that do not require access to an actual KDC are provided
# in src/middlewared/middlewared/pytest/unit/utils/test_krb5.py
#
# Tests that require access to a KDC are provided as part of API
# test suite.
import logging
import os
from copy import deepcopy
from enum import auto, Enum
from tempfile import NamedTemporaryFile
from typing import Optional
from .krb5_constants import KRB_AppDefaults, KRB_ETYPE, KRB_LibDefaults, KRB_RealmProperty
logger = logging.getLogger(__name__)
KRB5_VALUE_BEGIN = '{'
KRB5_VALUE_END = '}'
APPDEFAULTS_SUPPORTED_OPTIONS = set(i.value[0] for i in KRB_AppDefaults)
LIBDEFAULTS_SUPPORTED_OPTIONS = set(i.value[0] for i in KRB_LibDefaults)
SUPPORTED_ETYPES = set(e.value for e in KRB_ETYPE)
class KRB5ConfSection(Enum):
LIBDEFAULTS = auto()
REALMS = auto()
DOMAIN_REALM = auto()
CAPATHS = auto()
APPDEFAULTS = auto()
PLUGINS = auto()
def validate_krb5_parameter(section, param, value):
"""
Perform validation of krb5.conf parameters. If invalid parameters are written to
the configuration file, then services that depend on kerberos will potentially
break.
"""
if isinstance(value, dict):
for k, v in value.items():
validate_krb5_parameter(section, k, v)
return
match section:
# currently "auxiliary parameters" are only allowed in backend for
# libdefaults and appdefaults sections of krb5.conf
case KRB5ConfSection.APPDEFAULTS:
section_enum = KRB_AppDefaults
case KRB5ConfSection.LIBDEFAULTS:
section_enum = KRB_LibDefaults
case _:
raise ValueError(f'{section}: unexpected section type')
try:
param_enum = section_enum[param.upper()]
except KeyError:
raise ValueError(
f'{param}: unsupported option for [{section.name.lower()}] section'
) from None
match param_enum.value[1]:
case 'boolean':
if value not in ('true', 'false'):
raise ValueError(f'{value}: not a boolean value for parameter {param}')
case 'string':
if not isinstance(value, str):
raise ValueError(f'{value}: not a string for parameter {param}')
case 'etypes':
if ',' in value:
raise ValueError('enctypes should be space-delimited list')
for enctype in value.split():
if enctype.strip() not in SUPPORTED_ETYPES:
raise ValueError(f'{enctype}: unsupported enctype specified for parameter {param}')
case 'time':
if isinstance(value, str):
if not value.isdigit():
# Technically krb5.conf allows multiple time formats
# but for simplicity we only allow seconds
raise ValueError(f'{value}: time must be expressed in seconds for parameter {param}')
elif not isinstance(value, int):
raise ValueError(f'{value}: time must be expressed in seconds for parameter {param}')
case _:
pass
def parse_krb_aux_params(
section: KRB5ConfSection,
section_conf: dict,
aux_params: str
):
"""
Parse auxiliary parameters and write them to the specified `section_conf`
`section` - portion of krb5.conf file for which auxiliary parameters are being parsed
`section_conf` - dictionary containing existing krb5 configuration, which will be
updated with configuration specified in the `aux_params`
`aux_params` - auxiliary parameters text blob to be parsed and used to update `section_conf`.
"""
target = section_conf
is_subsection = False
# Parse auxiliary parameters for the specified section the krb5.conf file
# is set up in the style of a Windows INI file. Sections are headed by
# the section name, in square brackets. Each section may contain zero
# or more relations of the form
# `foo = bar`
# or
# ```
# fubar = {
# foo = bar
# baz = quux
# }
# ```
for line in aux_params.splitlines():
if not line.strip():
continue
if len((entry := line.split('='))) < 1:
# invalid line, keep legacy truenas behavior and silently skip
continue
param = entry[0].strip()
if entry[-1].strip() == KRB5_VALUE_BEGIN:
# `fubar = {` line, set `fubar` as target so that we properly
# consolidate values if our defaults are overridden
if is_subsection:
raise ValueError('Invalid nesting of parameters')
section_conf[param] = {}
target = section_conf[param]
is_subsection = True
continue
elif param == KRB5_VALUE_END:
# `}` line ending previous
target = section_conf
is_subsection = False
continue
value = entry[1].strip()
validate_krb5_parameter(section, param, value)
target[param] = value
class KRB5Conf():
def __init__(self):
self.libdefaults = {} # settings used by KRB5 library
self.appdefaults = {} # settings used by some KRB5 applications
self.realms = {} # realm-specific settings
def __add_parameters(self, section: str, config: dict, auxiliary_parameters: Optional[list] = None):
for param, value in config.items():
validate_krb5_parameter(section, param, value)
data = deepcopy(config)
if auxiliary_parameters:
parse_krb_aux_params(section, data, auxiliary_parameters)
match section:
case KRB5ConfSection.APPDEFAULTS:
self.appdefaults = data
case KRB5ConfSection.LIBDEFAULTS:
self.libdefaults = data
case _:
raise ValueError(f'{section}: unexpected section type')
def add_libdefaults(
self,
config: dict,
auxiliary_parameters: Optional[list] = None
):
"""
Add configuration for the [libdefaults] section of the krb5.conf file, replacing
any existing configuration.
Parameters may be specified in two ways (non-exclusive):
`config` - a dictionary containing key-value pairs. Valid parameters are defined
in krb5_constants.KRB_LibDefaults. These may be specified either as:
`{"rdns": false}`
to apply globally or
`{"MYDOM.TEST": {"rdns": False}}` to apply only to a specific application.
`auxiliary_parameters` - text field formatted per krb5.conf guidelines with parameters
that are valid for the [appdefaults] section.
```
MYDOM.TEST = {
rdns = false
}
```
"""
self.__add_parameters(
KRB5ConfSection.LIBDEFAULTS,
config,
auxiliary_parameters
)
def add_appdefaults(
self,
config: dict,
auxiliary_parameters: Optional[str] = None
):
"""
Add configuration for the [appdefaults] section of the krb5.conf file, replacing
any existing configuration.
Parameters may be specified in two ways (non-exclusive):
`config` - a dictionary containing key-value pairs. Valid parameters are defined
in krb5_constants.KRB_AppDefaults. These may be specified either as:
`{"forwardable": True}`
to apply globally or
`{"pam": {"forwardable": True}}` to apply only to a specific application.
`auxiliary_parameters` - text field formatted per krb5.conf guidelines with parameters
that are valid for the [appdefaults] section.
```
pam = {
forwardable = true
}
```
"""
self.__add_parameters(
KRB5ConfSection.APPDEFAULTS,
config,
auxiliary_parameters
)
def __parse_realm(self, realm_info: dict) -> dict:
if 'realm' not in realm_info:
raise ValueError('Realm information does not specify realm')
for prop in (
KRB_RealmProperty.ADMIN_SERVER.value[0],
KRB_RealmProperty.KDC.value[0],
KRB_RealmProperty.KPASSWD_SERVER.value[0]
):
if prop in realm_info and not isinstance(realm_info[prop], list):
raise ValueError(f'{prop}: property must be list')
return {realm_info['realm']: {
'realm': realm_info['realm'],
'admin_server': realm_info[KRB_RealmProperty.ADMIN_SERVER.value[0]].copy(),
'kdc': realm_info[KRB_RealmProperty.KDC.value[0]].copy(),
'kpasswd_server': realm_info[KRB_RealmProperty.KPASSWD_SERVER.value[0]].copy(),
}}
def add_realms(self, realms: list) -> None:
"""
Add configuration for [realms] section of krb5.conf file
Realms are specified as a list of dictionaries containing the following keys
`realm` - name of realm
`admin_server` - list of hosts where administration server is running. Typically
this is the primary kerberos server.
`kdc` - list of kerberos domain controllers
`kpasswd_server` list of servers where all password changes are performed
NOTE: if admin_server, kdc, or kpasswd_server are unspecified, then they will be
resolved through DNS.
"""
clean_realms = {}
for realm in realms:
clean_realms.update(self.__parse_realm(realm))
self.realms = clean_realms
def __dump_a_parameter(self, parm: str, value):
if isinstance(value, dict):
out = f'\t{parm} = {KRB5_VALUE_BEGIN}\n'
for k, v in value.items():
if (val := self.__dump_a_parameter(k, v)) is None:
continue
out += f'\t{val}'
out += f'\t{KRB5_VALUE_END}\n'
return out
elif isinstance(value, list):
if len(value) == 0:
return None
match parm:
case 'kdc' | 'admin_server' | 'kpasswd_server':
# some krb5.conf parameters may be specified multiple times
# (MIT kerberos). Heimdal requires these to be placed on
# single line.
out = ''
for srv in value:
out += f'\t{parm} = {srv}\n'
return out
case _:
# most parameters take a space-delimited list
return f'\t{parm} = {" ".join(value)}\n'
else:
return f'\t{parm} = {value}\n'
def __generate_libdefaults(self):
kconf = "[libdefaults]\n"
for parm, value in self.libdefaults.items():
kconf += self.__dump_a_parameter(parm, value)
return kconf + '\n'
def __generate_appdefaults(self):
kconf = "[appdefaults]\n"
for parm, value in self.appdefaults.items():
kconf += self.__dump_a_parameter(parm, value)
return kconf + '\n'
def __generate_realms(self):
kconf = '[realms]\n'
for realm in list(self.realms.keys()):
this_realm = self.realms[realm].copy()
this_realm.pop('realm')
kconf += self.__dump_a_parameter(
realm, {'default_domain': realm} | this_realm
)
return kconf + '\n'
def __generate_domain_realms(self):
kconf = '[domain_realms]\n'
for realm in self.realms.keys():
kconf += f'\t{realm.lower()} = {realm}\n'
kconf += f'\t.{realm.lower()} = {realm}\n'
kconf += f'\t{realm.upper()} = {realm}\n'
kconf += f'\t.{realm.upper()} = {realm}\n'
return kconf + '\n'
def generate(self):
"""
Generate krb5.conf file and return as string
"""
kconf = self.__generate_libdefaults()
kconf += self.__generate_appdefaults()
kconf += self.__generate_realms()
kconf += self.__generate_domain_realms()
return kconf
def write(self, path: Optional[str] = '/etc/krb5.conf'):
"""
Write the stored krb5.conf file to the specified `path`
"""
config = self.generate()
with NamedTemporaryFile(delete=False, dir=os.path.dirname(path)) as f:
f.write(config.encode())
f.flush()
os.fchmod(f.fileno(), 0o644)
os.rename(f.name, path)
| 12,608 | Python | .py | 302 | 31.980132 | 105 | 0.603025 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,285 | 0003_iscsi_vendor.py | truenas_middleware/src/middlewared/middlewared/migration/0003_iscsi_vendor.py | async def migrate(middleware):
extents = await middleware.call(
'iscsi.extent.query', [['vendor', '=', None]], {'select': ['id', 'vendor']}
)
for extent in extents:
await middleware.call(
'datastore.update',
'services.iscsitargetextent',
extent['id'], {
'iscsi_target_extent_vendor': 'TrueNAS'
}
)
if extents:
await middleware.call('service.reload', 'iscsitarget')
| 478 | Python | .py | 14 | 25.071429 | 83 | 0.557235 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,286 | 0002_shell_linux.py | truenas_middleware/src/middlewared/middlewared/migration/0002_shell_linux.py | import os
nologin = "/usr/sbin/nologin"
def is_valid_shell(shell):
return os.path.exists(shell) and os.access(shell, os.X_OK)
def migrate(middleware):
updated = False
for user in middleware.call_sync("datastore.query", "account.bsdusers", [], {"prefix": "bsdusr_"}):
if not user["shell"] or is_valid_shell(user["shell"]):
continue
new_shell = user["shell"].replace("/usr/local/", "/usr/")
if not is_valid_shell(new_shell):
if user["username"] == "root":
new_shell = "/usr/bin/zsh"
else:
new_shell = nologin
middleware.logger.info("Updating user %r shell from %r to %r", user["username"], user["shell"], new_shell)
middleware.call_sync(
"datastore.update", "account.bsdusers", user["id"], {"shell": new_shell}, {"prefix": "bsdusr_"}
)
updated = True
if updated:
middleware.call_sync("service.reload", "user")
| 976 | Python | .py | 22 | 35.727273 | 114 | 0.593023 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,287 | 0006_system_cert.py | truenas_middleware/src/middlewared/middlewared/migration/0006_system_cert.py | async def migrate(middleware):
# This is required because earlier middleware was running considering openssl security level to be 1
# but with moving to debian, the default now is 2 which enforces security standards. It means that potentially
# user might have configured cert for system which might not comply with security level 2 and UI becoming
# inaccessible in this regard because of that
system_cert = (await middleware.call('system.general.config'))['ui_certificate']
if not system_cert or await middleware.call(
'certificate.cert_services_validation', system_cert['id'], 'certificate', False
):
await middleware.call('certificate.setup_self_signed_cert_for_ui')
await middleware.call('service.restart', 'http')
| 772 | Python | .py | 11 | 64.454545 | 114 | 0.749014 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,288 | 0008_default_keymap.py | truenas_middleware/src/middlewared/middlewared/migration/0008_default_keymap.py | async def migrate(middleware):
config = await middleware.call('system.general.config')
if config['kbdmap'] not in await middleware.call('system.general.kbdmap_choices'):
await middleware.call(
'datastore.update',
'system.settings',
config['id'],
{'stg_kbdmap': 'us'},
)
await middleware.call('system.general.set_kbdlayout')
| 404 | Python | .py | 10 | 31.4 | 86 | 0.619289 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,289 | 0009_system_global_id.py | truenas_middleware/src/middlewared/middlewared/migration/0009_system_global_id.py | import uuid
async def migrate(middleware):
await middleware.call(
'datastore.insert',
'system.globalid', {
'system_uuid': str(uuid.uuid4())
}
)
| 190 | Python | .py | 8 | 17 | 44 | 0.583333 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,290 | 0010_nvram_attr_vms.py | truenas_middleware/src/middlewared/middlewared/migration/0010_nvram_attr_vms.py | import os
import shutil
from middlewared.plugins.vm.utils import SYSTEM_NVRAM_FOLDER_PATH, get_vm_nvram_file_name
DEFAULT_NVRAM_FOLDER_PATH = '/var/lib/libvirt/qemu/nvram'
LIBVIRT_QEMU_UID = 64055
LIBVIRT_QEMU_GID = 64055
def migrate(middleware):
os.makedirs(SYSTEM_NVRAM_FOLDER_PATH, exist_ok=True)
os.chown(SYSTEM_NVRAM_FOLDER_PATH, LIBVIRT_QEMU_UID, LIBVIRT_QEMU_GID)
if middleware.call_sync('system.is_ha_capable'):
middleware.logger.debug('Skipping nvram migration as system is HA capable')
return
for vm in middleware.call_sync('vm.query', [['bootloader', '=', 'UEFI']]):
try:
migrate_vm_nvram_file(middleware, vm)
except Exception:
middleware.logger.error('Failed to migrate nvram file for VM %r(%r)', vm['name'], vm['id'], exc_info=True)
def migrate_vm_nvram_file(middleware, vm):
file_name = get_vm_nvram_file_name(vm)
new_path = os.path.join(SYSTEM_NVRAM_FOLDER_PATH, file_name)
to_copy_path = os.path.join(DEFAULT_NVRAM_FOLDER_PATH, file_name)
if os.path.exists(to_copy_path):
shutil.copy2(to_copy_path, new_path)
os.chown(new_path, LIBVIRT_QEMU_UID, LIBVIRT_QEMU_GID)
else:
# File does not exist for us to copy, so we need to just log it
middleware.logger.debug(
'No nvram file found for VM %r(%r), hence setting it up with %r', vm['name'], vm['id'], new_path
)
| 1,427 | Python | .py | 29 | 42.931034 | 118 | 0.683225 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,291 | 0005_tunables_linux.py | truenas_middleware/src/middlewared/middlewared/migration/0005_tunables_linux.py | async def migrate(middleware):
await middleware.call(
'datastore.delete',
'system.tunable', [
['id', 'in', [d['id'] for d in await middleware.call('tunable.query', [['type', 'in', ['RC', 'LOADER']]])]]
]
)
| 250 | Python | .py | 7 | 28.428571 | 119 | 0.534979 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,292 | 0004_ntp_default_servers_linux.py | truenas_middleware/src/middlewared/middlewared/migration/0004_ntp_default_servers_linux.py | async def migrate(middleware):
servers = await middleware.call(
'datastore.query', 'system.ntpserver', [['ntp_address', 'in', [f'{i}.freebsd.pool.ntp.org' for i in range(3)]]]
)
for server in servers:
await middleware.call(
'datastore.update',
'system.ntpserver',
server['id'], {
'ntp_address': server['ntp_address'].replace('freebsd', 'debian')
}
)
if servers:
await middleware.call('service.restart', 'ntpd')
| 524 | Python | .py | 14 | 28.357143 | 119 | 0.569745 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,293 | 0011_catalog_community_train.py | truenas_middleware/src/middlewared/middlewared/migration/0011_catalog_community_train.py | from middlewared.plugins.catalog.utils import COMMUNITY_TRAIN, OFFICIAL_LABEL
async def migrate(middleware):
config = await middleware.call('catalog.config')
if COMMUNITY_TRAIN not in config['preferred_trains']:
await middleware.call(
'datastore.update', 'services.catalog', OFFICIAL_LABEL, {
'preferred_trains': [COMMUNITY_TRAIN] + config['preferred_trains'],
},
)
await middleware.call('catalog.update_train_for_enterprise')
| 497 | Python | .py | 10 | 41.7 | 83 | 0.68866 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,294 | 0001_cloudsync_sftp_private_key_file.py | truenas_middleware/src/middlewared/middlewared/migration/0001_cloudsync_sftp_private_key_file.py | # -*- coding=utf-8 -*-
import os
def migrate(middleware):
for credential in middleware.call_sync("cloudsync.credentials.query", [["provider", "=", "SFTP"]]):
if "key_file" in credential["attributes"] and os.path.exists(credential["attributes"]["key_file"]):
middleware.logger.info("Migrating SFTP cloud credential %d to keychain", credential['id'])
try:
with open(credential["attributes"]["key_file"]) as f:
private_key = f.read()
for keypair in middleware.call_sync("keychaincredential.query", [["type", "=", "SSH_KEY_PAIR"]]):
if keypair["attributes"]["private_key"].strip() == private_key.strip():
break
else:
keypair = middleware.call_sync("keychaincredential.create", {
"name": credential["name"],
"type": "SSH_KEY_PAIR",
"attributes": {
"private_key": private_key,
}
})
del credential["attributes"]["key_file"]
credential["attributes"]["private_key"] = keypair["id"]
middleware.call_sync("datastore.update", "system.cloudcredentials", credential["id"], {
"attributes": credential["attributes"],
})
except Exception as e:
middleware.logger.warning("Error migrating SFTP cloud credential %d to keychain: %r",
credential['id'], e)
| 1,606 | Python | .py | 28 | 39.5 | 113 | 0.514631 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,295 | rdma.py | truenas_middleware/src/middlewared/middlewared/api/v25_04_0/rdma.py | from middlewared.api.base import BaseModel, single_argument_result
from typing import Literal
__all__ = [
"RdmaLinkConfigArgs", "RdmaLinkConfigResult",
"RdmaCardConfigArgs", "RdmaCardConfigResult",
"RdmaCapableServicesArgs", "RdmaCapableServicesResult"
]
class RdmaLinkConfigArgs(BaseModel):
all: bool = False
class RdmaLinkConfig(BaseModel):
rdma: str
netdev: str
class RdmaLinkConfigResult(BaseModel):
result: list[RdmaLinkConfig]
class RdmaCardConfigArgs(BaseModel):
pass
@single_argument_result
class RdmaCardConfigResult(BaseModel):
serial: str
product: str
part_number: str
links: list[RdmaLinkConfig]
class RdmaCapableServicesArgs(BaseModel):
pass
class RdmaCapableServicesResult(BaseModel):
result: list[Literal["NFS"]]
| 799 | Python | .py | 26 | 27 | 66 | 0.788918 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,296 | acme_protocol.py | truenas_middleware/src/middlewared/middlewared/api/v25_04_0/acme_protocol.py | from pydantic import Field, Secret
from middlewared.api.base import BaseModel, Excluded, excluded_field, single_argument_args, ForUpdateMetaclass
__all__ = [
'ACMERegistrationCreateArgs', 'ACMERegistrationCreateResult', 'DNSAuthenticatorUpdateArgs',
'DNSAuthenticatorUpdateResult', 'DNSAuthenticatorCreateArgs', 'DNSAuthenticatorCreateResult',
'DNSAuthenticatorDeleteArgs', 'DNSAuthenticatorDeleteResult', 'DNSAuthenticatorSchemasArgs',
'DNSAuthenticatorSchemasResult', 'ACMERegistrationEntry', 'ACMEDNSAuthenticatorEntry',
]
class JWKCreate(BaseModel):
key_size: int = 2048
public_exponent: int = 65537
class ACMERegistrationEntry(BaseModel):
id: int
uri: str
directory: str
tos: str
new_account_uri: str
new_nonce_uri: str
new_order_uri: str
revoke_cert_uri: str
class ACMEDNSAuthenticatorEntry(BaseModel):
id: int
authenticator: str
attributes: Secret[dict]
name: str
class DNSAuthenticatorCreate(ACMEDNSAuthenticatorEntry):
id: Excluded = excluded_field()
class DNSAuthenticatorUpdate(DNSAuthenticatorCreate, metaclass=ForUpdateMetaclass):
authenticator: Excluded = excluded_field()
class DNSAuthenticatorAttributeSchema(BaseModel):
_name_: str
title: str
_required_: bool
class DNSAuthenticatorSchemaEntry(BaseModel):
key: str
schema_: list[DNSAuthenticatorAttributeSchema] = Field(..., alias='schema')
################### Arguments ###################
@single_argument_args('acme_registration_create')
class ACMERegistrationCreateArgs(BaseModel):
tos: bool = False
jwk_create: JWKCreate = Field(default=JWKCreate())
acme_directory_uri: str
class DNSAuthenticatorCreateArgs(BaseModel):
dns_authenticator_create: DNSAuthenticatorCreate
class DNSAuthenticatorUpdateArgs(BaseModel):
id: int
dns_authenticator_update: DNSAuthenticatorUpdate
class DNSAuthenticatorDeleteArgs(BaseModel):
id: int
class DNSAuthenticatorSchemasArgs(BaseModel):
pass
################### Returns ###################
class ACMERegistrationCreateResult(BaseModel):
result: ACMERegistrationEntry
class DNSAuthenticatorCreateResult(BaseModel):
result: ACMEDNSAuthenticatorEntry
class DNSAuthenticatorUpdateResult(BaseModel):
result: ACMEDNSAuthenticatorEntry
class DNSAuthenticatorDeleteResult(BaseModel):
result: bool
class DNSAuthenticatorSchemasResult(BaseModel):
result: list[DNSAuthenticatorSchemaEntry]
| 2,484 | Python | .py | 62 | 35.951613 | 110 | 0.777404 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,297 | user.py | truenas_middleware/src/middlewared/middlewared/api/v25_04_0/user.py | from typing import Literal
from annotated_types import Ge, Le
from pydantic import EmailStr, Field, Secret
from typing_extensions import Annotated
from middlewared.api.base import (BaseModel, Excluded, excluded_field, ForUpdateMetaclass, LocalUsername, RemoteUsername,
LocalUID, LongString, NonEmptyString, single_argument_args, single_argument_result)
__all__ = ["UserEntry",
"UserCreateArgs", "UserCreateResult",
"UserUpdateArgs", "UserUpdateResult",
"UserDeleteArgs", "UserDeleteResult",
"UserShellChoicesArgs", "UserShellChoicesResult",
"UserGetUserObjArgs", "UserGetUserObjResult",
"UserGetNextUidArgs", "UserGetNextUidResult",
"UserHasLocalAdministratorSetUpArgs", "UserHasLocalAdministratorSetUpResult",
"UserSetupLocalAdministratorArgs", "UserSetupLocalAdministratorResult",
"UserSetPasswordArgs", "UserSetPasswordResult",
"UserProvisioningUriArgs", "UserProvisioningUriResult",
"UserTwofactorConfigArgs", "UserTwofactorConfigResult",
"UserVerifyTwofactorTokenArgs", "UserVerifyTwofactorTokenResult",
"UserUnset2faSecretArgs", "UserUnset2faSecretResult",
"UserRenew2faSecretArgs", "UserRenew2faSecretResult"]
DEFAULT_HOME_PATH = "/var/empty"
class UserEntry(BaseModel):
id: int
uid: int
username: LocalUsername | RemoteUsername
unixhash: Secret[str | None]
smbhash: Secret[str | None]
home: NonEmptyString = DEFAULT_HOME_PATH
shell: NonEmptyString = "/usr/bin/zsh"
"Available choices can be retrieved with `user.shell_choices`."
full_name: str
builtin: bool
smb: bool = True
group: dict
groups: list[int] = []
"""Specifies whether the user should be allowed access to SMB shares. User will also automatically be added to
the `builtin_users` group."""
password_disabled: bool = False
ssh_password_enabled: bool = False
"Required if `password_disabled` is false."
sshpubkey: LongString | None = None
locked: bool = False
sudo_commands: list[NonEmptyString] = []
sudo_commands_nopasswd: list[NonEmptyString] = []
email: EmailStr | None = None
id_type_both: bool
local: bool
immutable: bool
twofactor_auth_configured: bool
sid: str | None
roles: list[str]
api_keys: list[int]
class UserCreate(UserEntry):
id: Excluded = excluded_field()
unixhash: Excluded = excluded_field()
smbhash: Excluded = excluded_field()
builtin: Excluded = excluded_field()
id_type_both: Excluded = excluded_field()
local: Excluded = excluded_field()
immutable: Excluded = excluded_field()
twofactor_auth_configured: Excluded = excluded_field()
sid: Excluded = excluded_field()
roles: Excluded = excluded_field()
api_keys: Excluded = excluded_field()
uid: LocalUID | None = None
"UNIX UID. If not provided, it is automatically filled with the next one available."
full_name: NonEmptyString
group_create: bool = False
group: int | None = None
"Required if `group_create` is `false`."
home_create: bool = False
home_mode: str = "700"
password: Secret[str | None] = None
class UserUpdate(UserCreate, metaclass=ForUpdateMetaclass):
uid: Excluded = excluded_field()
group_create: Excluded = excluded_field()
class UserCreateArgs(BaseModel):
user_create: UserCreate
class UserCreateResult(BaseModel):
result: int
class UserUpdateArgs(BaseModel):
id: int
user_update: UserUpdate
class UserUpdateResult(BaseModel):
result: int
class UserDeleteOptions(BaseModel):
delete_group: bool = True
"Deletes the user primary group if it is not being used by any other user."
class UserDeleteArgs(BaseModel):
id: int
options: UserDeleteOptions = Field(default=UserDeleteOptions())
class UserDeleteResult(BaseModel):
result: int
class UserShellChoicesArgs(BaseModel):
group_ids: list[int] = []
class UserShellChoicesResult(BaseModel):
result: dict = Field(examples=[
{
'/usr/bin/bash': 'bash',
'/usr/bin/rbash': 'rbash',
'/usr/bin/dash': 'dash',
'/usr/bin/sh': 'sh',
'/usr/bin/zsh': 'zsh',
'/usr/bin/tmux': 'tmux',
'/usr/sbin/nologin': 'nologin'
},
])
@single_argument_args("get_user_obj")
class UserGetUserObjArgs(BaseModel):
username: str | None = None
uid: int | None = None
get_groups: bool = False
"retrieve group list for the specified user."
sid_info: bool = False
"retrieve SID and domain information for the user."
@single_argument_result
class UserGetUserObjResult(BaseModel):
pw_name: str
"name of the user"
pw_gecos: str
"full username or comment field"
pw_dir: str
"user home directory"
pw_shell: str
"user command line interpreter"
pw_uid: int
"numerical user id of the user"
pw_gid: int
"numerical group id for the user's primary group"
grouplist: list[int] | None
"""
optional list of group ids for groups of which this account is a member. If `get_groups` is not specified,
this value will be null.
"""
sid: str | None
"optional SID value for the account that is present if `sid_info` is specified in payload."
source: Literal['LOCAL', 'ACTIVEDIRECTORY', 'LDAP']
"the source for the user account."
local: bool
"boolean value indicating whether the account is local to TrueNAS or provided by a directory service."
class UserGetNextUidArgs(BaseModel):
pass
class UserGetNextUidResult(BaseModel):
result: int
class UserHasLocalAdministratorSetUpArgs(BaseModel):
pass
class UserHasLocalAdministratorSetUpResult(BaseModel):
result: bool
class UserSetupLocalAdministratorEC2Options(BaseModel):
instance_id: NonEmptyString
class UserSetupLocalAdministratorOptions(BaseModel):
ec2: UserSetupLocalAdministratorEC2Options | None = None
class UserSetupLocalAdministratorArgs(BaseModel):
username: Literal['root', 'truenas_admin']
password: Secret[str]
options: UserSetupLocalAdministratorOptions = Field(default=UserSetupLocalAdministratorOptions())
class UserSetupLocalAdministratorResult(BaseModel):
result: None
@single_argument_args("set_password_data")
class UserSetPasswordArgs(BaseModel):
username: str
old_password: Secret[str | None] = None
new_password: Secret[NonEmptyString]
class UserSetPasswordResult(BaseModel):
result: None
class UserProvisioningUriArgs(BaseModel):
username: str
class UserProvisioningUriResult(BaseModel):
result: str
class UserTwofactorConfigArgs(BaseModel):
username: str
@single_argument_result
class UserTwofactorConfigResult(BaseModel):
provisioning_uri: str | None
secret_configured: bool
interval: int
otp_digits: int
class UserVerifyTwofactorTokenArgs(BaseModel):
username: str
token: Secret[str | None] = None
class UserVerifyTwofactorTokenResult(BaseModel):
result: bool
class UserUnset2faSecretArgs(BaseModel):
username: str
class UserUnset2faSecretResult(BaseModel):
result: None
class TwofactorOptions(BaseModel, metaclass=ForUpdateMetaclass):
otp_digits: Annotated[int, Ge(6), Le(8)]
"Represents number of allowed digits in the OTP"
interval: Annotated[int, Ge(5)]
"Time duration in seconds specifying OTP expiration time from its creation time"
class UserRenew2faSecretArgs(BaseModel):
username: str
twofactor_options: TwofactorOptions
UserRenew2faSecretResult = single_argument_result(UserEntry, "UserRenew2faSecretResult")
| 7,708 | Python | .py | 196 | 33.892857 | 121 | 0.728178 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,298 | alertservice.py | truenas_middleware/src/middlewared/middlewared/api/v25_04_0/alertservice.py | from middlewared.api.base import BaseModel, NonEmptyString
__all__ = [
'AlertServiceEntry', 'AlertServiceCreateArgs', 'AlertServiceUpdateArgs', 'AlertServiceDeleteArgs',
'AlertServiceTestArgs', 'AlertServiceCreateResult', 'AlertServiceUpdateResult', 'AlertServiceDeleteResult',
'AlertServiceTestResult',
]
class AlertServiceCreate(BaseModel):
name: NonEmptyString
type: str
attributes: dict
level: str
enabled: bool = True
class AlertServiceEntry(AlertServiceCreate):
id: int
type__title: str
########### Arguments ###########
class AlertServiceCreateArgs(BaseModel):
alert_service_create: AlertServiceCreate
class AlertServiceUpdateArgs(BaseModel):
id: int
alert_service_update: AlertServiceCreate
class AlertServiceDeleteArgs(BaseModel):
id: int
class AlertServiceTestArgs(BaseModel):
alert_service_create: AlertServiceCreate
########### Returns ###########
class AlertServiceCreateResult(BaseModel):
result: AlertServiceEntry
class AlertServiceUpdateResult(BaseModel):
result: AlertServiceEntry
class AlertServiceDeleteResult(BaseModel):
result: bool
class AlertServiceTestResult(BaseModel):
result: bool
| 1,218 | Python | .py | 34 | 31.823529 | 111 | 0.772021 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,299 | iscsi_auth.py | truenas_middleware/src/middlewared/middlewared/api/v25_04_0/iscsi_auth.py | from typing import Literal
from pydantic import Secret
from middlewared.api.base import BaseModel, Excluded, excluded_field, ForUpdateMetaclass
__all__ = [
"IscsiAuthEntry",
"IscsiAuthCreateArgs",
"IscsiAuthCreateResult",
"IscsiAuthUpdateArgs",
"IscsiAuthUpdateResult",
"IscsiAuthDeleteArgs",
"IscsiAuthDeleteResult",
]
class IscsiAuthEntry(BaseModel):
id: int
tag: int
user: str
secret: Secret[str]
peeruser: str = ''
peersecret: Secret[str] = ''
class IscsiAuthCreate(IscsiAuthEntry):
id: Excluded = excluded_field()
class IscsiAuthCreateArgs(BaseModel):
data: IscsiAuthCreate
class IscsiAuthCreateResult(BaseModel):
result: IscsiAuthEntry
class IscsiAuthUpdate(IscsiAuthCreate, metaclass=ForUpdateMetaclass):
pass
class IscsiAuthUpdateArgs(BaseModel):
id: int
data: IscsiAuthUpdate
class IscsiAuthUpdateResult(BaseModel):
result: IscsiAuthEntry
class IscsiAuthDeleteArgs(BaseModel):
id: int
class IscsiAuthDeleteResult(BaseModel):
result: Literal[True]
| 1,065 | Python | .py | 36 | 25.555556 | 88 | 0.776786 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |