id int64 0 458k | file_name stringlengths 4 119 | file_path stringlengths 14 227 | content stringlengths 24 9.96M | size int64 24 9.96M | language stringclasses 1 value | extension stringclasses 14 values | total_lines int64 1 219k | avg_line_length float64 2.52 4.63M | max_line_length int64 5 9.91M | alphanum_fraction float64 0 1 | repo_name stringlengths 7 101 | repo_stars int64 100 139k | repo_forks int64 0 26.4k | repo_open_issues int64 0 2.27k | repo_license stringclasses 12 values | repo_extraction_date stringclasses 433 values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
25,000 | generate_self_signed.py | truenas_middleware/src/middlewared/middlewared/plugins/crypto_/generate_self_signed.py | import typing
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, serialization
from .generate_utils import generate_builder, normalize_san
from .key_utils import generate_private_key
from .utils import DEFAULT_LIFETIME_DAYS
def generate_self_signed_certificate() -> typing.Tuple[str, str]:
cert = generate_builder({
'crypto_subject_name': {
'country_name': 'US',
'organization_name': 'iXsystems',
'common_name': 'localhost',
'email_address': 'info@ixsystems.com',
'state_or_province_name': 'Tennessee',
'locality_name': 'Maryville',
},
'lifetime': DEFAULT_LIFETIME_DAYS,
'san': normalize_san(['localhost'])
})
key = generate_private_key({
'serialize': False,
'key_length': 2048,
'type': 'RSA'
})
cert = cert.public_key(
key.public_key()
).add_extension(
x509.ExtendedKeyUsage([x509.oid.ExtendedKeyUsageOID.SERVER_AUTH]), False
).sign(
key, hashes.SHA256(), default_backend()
)
return (
cert.public_bytes(serialization.Encoding.PEM).decode(),
key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption()
).decode()
)
| 1,448 | Python | .py | 40 | 28.75 | 80 | 0.649073 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,001 | attachments.py | truenas_middleware/src/middlewared/middlewared/plugins/crypto_/attachments.py | from middlewared.service import private, Service
class CertificateService(Service):
class Config:
cli_namespace = 'system.certificate'
def __init__(self, *args, **kwargs):
super(CertificateService, self).__init__(*args, **kwargs)
self.delegates = []
@private
async def register_attachment_delegate(self, delegate):
self.delegates.append(delegate)
@private
async def in_use_attachments(self, cert_id):
return [delegate for delegate in self.delegates if await delegate.state(cert_id)]
@private
async def get_attachments(self, cert_id):
return list(filter(bool, [await delegate.consuming_cert_human_output(cert_id) for delegate in self.delegates]))
@private
async def redeploy_cert_attachments(self, cert_id):
for delegate in await self.in_use_attachments(cert_id):
await delegate.redeploy(cert_id)
| 912 | Python | .py | 20 | 38.8 | 119 | 0.702489 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,002 | crypto_key.py | truenas_middleware/src/middlewared/middlewared/plugins/crypto_/crypto_key.py | from middlewared.schema import accepts, Bool, Dict, Int, List, Ref, Str
from middlewared.service import Service
from middlewared.validators import Email
from .generate_ca import generate_certificate_authority
from .generate_certs import generate_certificate
from .generate_self_signed import generate_self_signed_certificate
from .generate_utils import normalize_san, sign_csr_with_ca
from .utils import EKU_OIDS
class CryptoKeyService(Service):
class Config:
private = True
def normalize_san(self, san_list):
return normalize_san(san_list)
def generate_self_signed_certificate(self):
return generate_self_signed_certificate()
@accepts(
Dict(
'certificate_cert_info',
Int('key_length'),
Int('serial', required=False, null=True),
Int('lifetime', required=True),
Str('ca_certificate', required=False, max_length=None),
Str('ca_privatekey', required=False, max_length=None),
Str('key_type', required=False),
Str('ec_curve', required=False),
Str('country', required=True),
Str('state', required=True),
Str('city', required=True),
Str('organization', required=True),
Str('organizational_unit'),
Str('common', null=True),
Str('email', validators=[Email()], required=True),
Str('digest_algorithm', enum=['SHA1', 'SHA224', 'SHA256', 'SHA384', 'SHA512']),
List('san', items=[Str('san')], required=True, empty=False),
Dict(
'cert_extensions',
Dict(
'BasicConstraints',
Bool('ca', default=False),
Bool('enabled', default=False),
Int('path_length', null=True, default=None),
Bool('extension_critical', default=False)
),
Dict(
'AuthorityKeyIdentifier',
Bool('authority_cert_issuer', default=False),
Bool('enabled', default=False),
Bool('extension_critical', default=False)
),
Dict(
'ExtendedKeyUsage',
List('usages', items=[Str('usage', enum=EKU_OIDS)]),
Bool('enabled', default=False),
Bool('extension_critical', default=False)
),
Dict(
'KeyUsage',
Bool('enabled', default=False),
Bool('digital_signature', default=False),
Bool('content_commitment', default=False),
Bool('key_encipherment', default=False),
Bool('data_encipherment', default=False),
Bool('key_agreement', default=False),
Bool('key_cert_sign', default=False),
Bool('crl_sign', default=False),
Bool('encipher_only', default=False),
Bool('decipher_only', default=False),
Bool('extension_critical', default=False)
),
register=True
),
register=True
)
)
def generate_certificate(self, data):
return generate_certificate(data)
@accepts(Ref('certificate_cert_info'))
def generate_self_signed_ca(self, data):
return self.generate_certificate_authority(data)
@accepts(Ref('certificate_cert_info'))
def generate_certificate_authority(self, data):
return generate_certificate_authority(data)
@accepts(
Dict(
'sign_csr',
Str('ca_certificate', required=True, max_length=None),
Str('ca_privatekey', required=True, max_length=None),
Str('csr', required=True, max_length=None),
Str('csr_privatekey', required=True, max_length=None),
Int('serial', required=True),
Str('digest_algorithm', default='SHA256'),
Ref('cert_extensions')
)
)
def sign_csr_with_ca(self, data):
return sign_csr_with_ca(data)
| 4,180 | Python | .py | 96 | 30.604167 | 91 | 0.556946 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,003 | common_validation.py | truenas_middleware/src/middlewared/middlewared/plugins/crypto_/common_validation.py | import datetime
import re
from middlewared.async_validators import validate_country
from middlewared.utils.time_utils import utc_now
from .utils import RE_CERTIFICATE
async def validate_cert_name(middleware, cert_name, datastore, verrors, name):
certs = await middleware.call(
'datastore.query',
datastore,
[('cert_name', '=', cert_name)]
)
if certs:
verrors.add(
name,
'A certificate with this name already exists'
)
if cert_name in ("external", "self-signed", "external - signature pending"):
verrors.add(
name,
f'{cert_name} is a reserved internal keyword for Certificate Management'
)
reg = re.search(r'^[a-z0-9_\-]+$', cert_name or '', re.I)
if not reg:
verrors.add(
name,
'Use alphanumeric characters, "_" and "-".'
)
async def _validate_common_attributes(middleware, data, verrors, schema_name):
country = data.get('country')
if country:
await validate_country(middleware, country, verrors, f'{schema_name}.country')
certificate = data.get('certificate')
if certificate:
matches = RE_CERTIFICATE.findall(certificate)
if not matches or not await middleware.call('cryptokey.load_certificate', certificate):
verrors.add(
f'{schema_name}.certificate',
'Not a valid certificate'
)
private_key = data.get('privatekey')
passphrase = data.get('passphrase')
if private_key:
await middleware.call('cryptokey.validate_private_key', private_key, verrors, schema_name, passphrase)
signedby = data.get('signedby')
if signedby:
valid_signing_ca = await middleware.call(
'certificateauthority.query',
[
('certificate', '!=', None),
('privatekey', '!=', None),
('certificate', '!=', ''),
('privatekey', '!=', ''),
('id', '=', signedby)
],
)
if not valid_signing_ca:
verrors.add(
f'{schema_name}.signedby',
'Please provide a valid signing authority'
)
csr = data.get('CSR')
if csr:
if not await middleware.call('cryptokey.load_certificate_request', csr):
verrors.add(
f'{schema_name}.CSR',
'Please provide a valid CSR'
)
csr_id = data.get('csr_id')
if csr_id and not await middleware.call('certificate.query', [['id', '=', csr_id], ['CSR', '!=', None]]):
verrors.add(
f'{schema_name}.csr_id',
'Please provide a valid csr_id which has a valid CSR filed'
)
await middleware.call(
'cryptokey.validate_certificate_with_key', certificate, private_key, schema_name, verrors, passphrase
)
key_type = data.get('key_type')
if key_type:
if key_type != 'EC':
if not data.get('key_length'):
verrors.add(
f'{schema_name}.key_length',
'RSA-based keys require an entry in this field.'
)
if not data.get('digest_algorithm'):
verrors.add(
f'{schema_name}.digest_algorithm',
'This field is required.'
)
if not verrors and data.get('cert_extensions'):
verrors.extend(
(await middleware.call('cryptokey.validate_extensions', data['cert_extensions'], schema_name))
)
if lifetime := data.get('lifetime'):
try:
utc_now() + datetime.timedelta(days=lifetime)
except OverflowError:
verrors.add(
f'{schema_name}.lifetime',
'Lifetime for the certificate is too long.'
)
| 3,878 | Python | .py | 101 | 28.019802 | 110 | 0.565726 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,004 | dhparams.py | truenas_middleware/src/middlewared/middlewared/plugins/crypto_/dhparams.py | import os
import subprocess
from middlewared.service import job, private, Service
DHPARAM_PEM_PATH = '/data/dhparam.pem'
class CertificateService(Service):
class Config:
cli_namespace = 'system.certificate'
@private
async def dhparam(self):
return DHPARAM_PEM_PATH
@private
@job()
def dhparam_setup(self, job):
"""Generate dhparam.pem if it doesn't exist, or has no data in it"""
with open(DHPARAM_PEM_PATH, 'a+') as f:
if os.fstat(f.fileno()).st_size == 0:
subprocess.run(
['openssl', 'dhparam', '-out', DHPARAM_PEM_PATH, '-rand', '/dev/urandom', '2048'],
stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True,
)
os.fchmod(f.fileno(), 0o600)
| 817 | Python | .py | 21 | 30.142857 | 102 | 0.607098 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,005 | extensions_utils.py | truenas_middleware/src/middlewared/middlewared/plugins/crypto_/extensions_utils.py | import functools
import inspect
import typing
from cryptography import x509
@functools.cache
def extensions() -> dict:
# For now we only support the following extensions
# We also support SubjectAlternativeName but as we include that natively if the user provides it
# we don't expose it to the end user as an extension making the process for the end user easier to
# create a certificate/ca as most wouldn't even want to know what extension is or does.
# Apart from this we also add subjectKeyIdentifier automatically
supported = ['BasicConstraints', 'AuthorityKeyIdentifier', 'ExtendedKeyUsage', 'KeyUsage']
return {
attr: inspect.getfullargspec(getattr(x509.extensions, attr).__init__).args[1:]
for attr in supported
}
def get_extension_params(
extension: list, cert: typing.Union[x509.CertificateSigningRequestBuilder, x509.CertificateBuilder, None] = None,
issuer: typing.Optional[x509.Certificate] = None
) -> list:
params = []
if extension[0] == 'BasicConstraints':
params = [extension[1].get('ca'), extension[1].get('path_length')]
elif extension[0] == 'ExtendedKeyUsage':
usages = []
for ext_usage in extension[1].get('usages', []):
usages.append(getattr(x509.oid.ExtendedKeyUsageOID, ext_usage))
params = [usages]
elif extension[0] == 'KeyUsage':
params = [extension[1].get(k, False) for k in extensions()['KeyUsage']]
elif extension[0] == 'AuthorityKeyIdentifier':
params = [
x509.SubjectKeyIdentifier.from_public_key(
issuer.public_key() if issuer else cert._public_key
).digest if cert or issuer else None,
None, None
]
if extension[1]['authority_cert_issuer'] and cert:
params[1:] = [
[x509.DirectoryName(cert._issuer_name)],
issuer.serial_number if issuer else cert._serial_number
]
return params
def add_extensions(
cert: typing.Union[x509.CertificateSigningRequestBuilder, x509.CertificateBuilder], extensions_data: dict,
key, issuer=None
) -> typing.Union[x509.CertificateSigningRequestBuilder, x509.CertificateBuilder]:
# issuer must be a certificate object
# By default we add the following
if not isinstance(cert, x509.CertificateSigningRequestBuilder):
cert = cert.public_key(
key.public_key()
).add_extension(
x509.SubjectKeyIdentifier.from_public_key(key.public_key()), False
)
for extension in filter(lambda v: v[1]['enabled'], extensions_data.items()):
klass = getattr(x509.extensions, extension[0])
cert = cert.add_extension(
klass(*get_extension_params(extension, cert, issuer)),
extension[1].get('extension_critical') or False
)
return cert
| 2,871 | Python | .py | 62 | 38.83871 | 117 | 0.677754 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,006 | cryptokey_crl.py | truenas_middleware/src/middlewared/middlewared/plugins/crypto_/cryptokey_crl.py | import datetime
from cryptography import x509
from cryptography.x509.oid import NameOID
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from middlewared.service import Service
from middlewared.utils.time_utils import utc_now
from .load_utils import load_private_key
from .key_utils import retrieve_signing_algorithm
from .utils import CERT_BACKEND_MAPPINGS
class CryptoKeyService(Service):
class Config:
private = True
def generate_crl(self, ca, certs, next_update=1):
# There is a tricky case here - what happens if the root CA is compromised ?
# In normal world scenarios, that CA is removed from app's trust store and any
# subsequent certs it had issues wouldn't be validated by the app then. Making a CRL
# for a revoked root CA in normal cases doesn't make sense as the thief can sign a
# counter CRL saying that everything is fine. As our environment is controlled,
# i think we are safe to create a crl for root CA as well which we can publish for
# services which make use of it i.e openvpn and they'll know that the certs/ca's have been
# compromised.
#
# `ca` is root ca from where the chain `certs` starts.
# `certs` is a list of all certs ca inclusive which are to be
# included in the CRL ( if root ca is compromised, it will be in `certs` as well ).
private_key = load_private_key(ca['privatekey'])
ca_cert = x509.load_pem_x509_certificate(ca['certificate'].encode(), default_backend())
if not private_key:
return None
ca_data = self.middleware.call_sync('cryptokey.load_certificate', ca['certificate'])
issuer = {k: ca_data.get(v) for k, v in CERT_BACKEND_MAPPINGS.items()}
crl_builder = x509.CertificateRevocationListBuilder().issuer_name(x509.Name([
x509.NameAttribute(getattr(NameOID, k.upper()), v)
for k, v in issuer.items() if v
])).last_update(
utc_now()
).next_update(
utc_now() + datetime.timedelta(next_update, 300, 0)
)
for cert in certs:
crl_builder = crl_builder.add_revoked_certificate(
x509.RevokedCertificateBuilder().serial_number(
self.middleware.call_sync('cryptokey.load_certificate', cert['certificate'])['serial']
).revocation_date(
cert['revoked_date']
).build(
default_backend()
)
)
# https://www.ietf.org/rfc/rfc5280.txt
# We should add AuthorityKeyIdentifier and CRLNumber at the very least
crl = crl_builder.add_extension(
x509.AuthorityKeyIdentifier(
x509.SubjectKeyIdentifier.from_public_key(
ca_cert.public_key()
).digest, [x509.DirectoryName(
x509.Name([
x509.NameAttribute(getattr(NameOID, k.upper()), v)
for k, v in issuer.items() if v
])
)], ca_cert.serial_number
), False
).add_extension(
x509.CRLNumber(1), False
).sign(
private_key=private_key, algorithm=retrieve_signing_algorithm({}, private_key), backend=default_backend()
)
return crl.public_bytes(serialization.Encoding.PEM).decode()
| 3,482 | Python | .py | 69 | 39.695652 | 117 | 0.632539 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,007 | pci.py | truenas_middleware/src/middlewared/middlewared/plugins/vm/pci.py | import collections
import os
import re
from pyudev import Context
from middlewared.schema import accepts, Bool, Dict, Int, List, Ref, returns, Str
from middlewared.service import private, Service, ValidationErrors
from middlewared.utils.gpu import get_gpus
from middlewared.utils.iommu import get_iommu_groups_info
from middlewared.utils.pci import get_pci_device_class, SENSITIVE_PCI_DEVICE_TYPES
from .utils import convert_pci_id_to_vm_pci_slot
RE_DEVICE_PATH = re.compile(r'pci_(\w+)_(\w+)_(\w+)_(\w+)')
class VMDeviceService(Service):
class Config:
namespace = 'vm.device'
@accepts(roles=['VM_DEVICE_READ'])
@returns(Bool())
def iommu_enabled(self):
"""Returns "true" if iommu is enabled, "false" otherwise"""
return os.path.exists('/sys/kernel/iommu_groups')
@private
def get_pci_device_default_data(self):
return {
'capability': {
'class': None,
'domain': None,
'bus': None,
'slot': None,
'function': None,
'product': 'Not Available',
'vendor': 'Not Available',
},
'controller_type': None,
'critical': False,
'iommu_group': {},
'available': False,
'drivers': [],
'error': None,
'device_path': None,
'reset_mechanism_defined': False,
'description': '',
}
@private
def get_pci_device_details(self, obj, iommu_info):
data = self.get_pci_device_default_data()
if not (igi := iommu_info.get(obj.sys_name)):
data['error'] = 'Unable to determine iommu group'
dbs, func = obj.sys_name.split('.')
dom, bus, slot = dbs.split(':')
device_path = os.path.join('/sys/bus/pci/devices', obj.sys_name)
cap_class = f'{(obj.attributes.get("class") or b"").decode()}' or get_pci_device_class(device_path)
controller_type = obj.properties.get('ID_PCI_SUBCLASS_FROM_DATABASE') or SENSITIVE_PCI_DEVICE_TYPES.get(
cap_class[:6]
)
drivers = []
if driver := obj.properties.get('DRIVER'):
drivers.append(driver)
data['capability']['class'] = cap_class or None
data['capability']['domain'] = f'{int(dom, base=16)}'
data['capability']['bus'] = f'{int(bus, base=16)}'
data['capability']['slot'] = f'{int(slot, base=16)}'
data['capability']['function'] = f'{int(func, base=16)}'
data['capability']['product'] = obj.properties.get('ID_MODEL_FROM_DATABASE', 'Not Available')
data['capability']['vendor'] = obj.properties.get('ID_VENDOR_FROM_DATABASE', 'Not Available')
data['controller_type'] = controller_type
data['critical'] = bool(not cap_class or SENSITIVE_PCI_DEVICE_TYPES.get(cap_class[:6]))
data['iommu_group'] = igi
data['available'] = all(i == 'vfio-pci' for i in drivers) and not data['critical']
data['drivers'] = drivers
data['device_path'] = os.path.join('/sys/bus/pci/devices', obj.sys_name)
data['reset_mechanism_defined'] = os.path.exists(os.path.join(data['device_path'], 'reset'))
prefix = obj.sys_name + (f' {controller_type!r}' if controller_type else '')
vendor = data['capability']['vendor'].strip()
suffix = data['capability']['product'].strip()
if vendor and suffix:
data['description'] = f'{prefix}: {suffix} by {vendor!r}'
else:
data['description'] = prefix
return data
@private
def get_all_pci_devices_details(self):
result = dict()
iommu_info = get_iommu_groups_info()
for i in Context().list_devices(subsystem='pci'):
key = f"pci_{i.sys_name.replace(':', '_').replace('.', '_')}"
result[key] = self.get_pci_device_details(i, iommu_info)
return result
@private
def get_single_pci_device_details(self, pcidev):
result = dict()
iommu_info = get_iommu_groups_info()
for i in filter(lambda x: x.sys_name == pcidev, Context().list_devices(subsystem='pci')):
key = f"pci_{i.sys_name.replace(':', '_').replace('.', '_')}"
result[key] = self.get_pci_device_details(i, iommu_info)
return result
@accepts(Str('device'), roles=['VM_DEVICE_READ'])
@returns(Dict(
'passthrough_device',
Dict(
'capability',
Str('class', null=True, required=True),
Str('domain', null=True, required=True),
Str('bus', null=True, required=True),
Str('slot', null=True, required=True),
Str('function', null=True, required=True),
Str('product', null=True, required=True),
Str('vendor', null=True, required=True),
required=True,
),
Str('controller_type', null=True, required=True),
Dict(
'iommu_group',
Int('number', required=True),
List('addresses', items=[Dict(
'address',
Str('domain', required=True),
Str('bus', required=True),
Str('slot', required=True),
Str('function', required=True),
)]),
required=True,
),
Bool('available', required=True),
List('drivers', items=[Str('driver', required=False)], required=True),
Str('error', null=True, required=True),
Str('device_path', null=True, required=True),
Bool('reset_mechanism_defined', required=True),
Str('description', empty=True, required=True),
register=True,
))
def passthrough_device(self, device):
"""Retrieve details about `device` PCI device"""
self.middleware.call_sync('vm.check_setup_libvirt')
if device_details := self.get_single_pci_device_details(RE_DEVICE_PATH.sub(r'\1:\2:\3.\4', device)):
return device_details[device]
else:
return {
**self.get_pci_device_default_data(),
'error': 'Device not found',
}
@accepts(roles=['VM_DEVICE_READ'])
@returns(List(items=[Ref('passthrough_device')], register=True))
def passthrough_device_choices(self):
"""Available choices for PCI passthru devices"""
return self.get_all_pci_devices_details()
@accepts()
@returns(Ref('passthrough_device_choices'))
def pptdev_choices(self):
"""Available choices for PCI passthru device"""
return self.get_all_pci_devices_details()
@accepts(Str('gpu_pci_id', empty=False))
@returns(List(items=[Str('pci_ids')]))
def get_pci_ids_for_gpu_isolation(self, gpu_pci_id):
"""
Get PCI IDs of devices which are required to be isolated for `gpu_pci_id` GPU isolation.
Basically when a GPU passthrough is desired for a VM, we need to isolate all the devices which are in the same
IOMMU group as the GPU. This is required because if we don't do this, the VM will not be able to start because
the devices in the same IOMMU group as the GPU will be in use by the host and will not be available for the VM
to use.
This endpoints retrieves all the PCI devices which are in the same IOMMU group as the GPU and returns their PCI
IDs so UI can use those and create PCI devices for them and isolate them.
"""
gpu = next((gpu for gpu in get_gpus() if gpu['addr']['pci_slot'] == gpu_pci_id), None)
verrors = ValidationErrors()
if not gpu:
verrors.add('gpu_pci_id', f'GPU with {gpu_pci_id!r} PCI ID not found')
verrors.check()
iommu_groups = get_iommu_groups_info()
iommu_groups_mapping_with_group_no = collections.defaultdict(set)
for pci_slot, pci_details in iommu_groups.items():
iommu_groups_mapping_with_group_no[pci_details['number']].add(convert_pci_id_to_vm_pci_slot(pci_slot))
pci_ids = set()
for device in gpu['devices']:
if not (device_info := iommu_groups.get(device['pci_slot'])):
continue
pci_ids.update(iommu_groups_mapping_with_group_no[device_info['number']])
return list(pci_ids)
| 8,306 | Python | .py | 176 | 37.528409 | 119 | 0.598074 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,008 | vm_memory_info.py | truenas_middleware/src/middlewared/middlewared/plugins/vm/vm_memory_info.py | import psutil
from middlewared.schema import accepts, Bool, Dict, Int, returns, Str
from middlewared.service import CallError, Service
from middlewared.validators import MACAddr
from .devices import NIC
from .utils import ACTIVE_STATES
class VMService(Service):
@accepts()
@returns(Dict(
'vmemory_in_use',
Int('RNP', required=True, description='Running but not provisioned'),
Int('PRD', required=True, description='Provisioned but not running'),
Int('RPRD', required=True, description='Running and provisioned'),
))
async def get_vmemory_in_use(self):
"""
The total amount of virtual memory in MB used by guests
Returns a dict with the following information:
RNP - Running but not provisioned
PRD - Provisioned but not running
RPRD - Running and provisioned
"""
memory_allocation = {'RNP': 0, 'PRD': 0, 'RPRD': 0}
guests = await self.middleware.call('datastore.query', 'vm.vm')
for guest in guests:
status = await self.middleware.call('vm.status', guest['id'])
if status['state'] in ACTIVE_STATES:
memory_allocation['RPRD' if guest['autostart'] else 'RNP'] += guest['memory'] * 1024 * 1024
elif guest['autostart']:
memory_allocation['PRD'] += guest['memory'] * 1024 * 1024
return memory_allocation
@accepts(Bool('overcommit', default=False), roles=['VM_READ'])
@returns(Int('available_memory'))
async def get_available_memory(self, overcommit):
"""
Get the current maximum amount of available memory to be allocated for VMs.
In case of `overcommit` being `true`, calculations are done in the following manner:
1. If a VM has requested 10G but is only consuming 5G, only 5G will be counted
2. System will consider shrinkable ZFS ARC as free memory ( shrinkable ZFS ARC is current ZFS ARC
minus ZFS ARC minimum )
In case of `overcommit` being `false`, calculations are done in the following manner:
1. Complete VM requested memory will be taken into account regardless of how much actual physical
memory the VM is consuming
2. System will not consider shrinkable ZFS ARC as free memory
Memory is of course a very "volatile" resource, values may change abruptly between a
second but I deem it good enough to give the user a clue about how much memory is
available at the current moment and if a VM should be allowed to be launched.
"""
# Use 90% of available memory to play safe
free = int(psutil.virtual_memory().available * 0.9)
# Difference between current ARC total size and the minimum allowed
arc_total = await self.middleware.call('sysctl.get_arcstats_size')
arc_min = await self.middleware.call('sysctl.get_arc_min')
arc_shrink = max(0, arc_total - arc_min)
total_free = free + arc_shrink
vms_memory_used = 0
if overcommit is False:
# If overcommit is not wanted its verified how much physical memory
# the vm process is currently using and add the maximum memory its
# supposed to have.
for vm in await self.middleware.call('vm.query', [['status.state', 'in', ACTIVE_STATES]]):
try:
current_vm_mem = await self.middleware.call('vm.get_memory_usage_internal', vm)
except Exception:
self.logger.error('Unable to retrieve %r vm memory usage', vm['name'], exc_info=True)
continue
else:
vm_max_mem = vm['memory'] * 1024 * 1024
# We handle edge case with vm_max_mem < current_vm_mem
if vm_max_mem > current_vm_mem:
vms_memory_used += vm_max_mem - current_vm_mem
return max(0, total_free - vms_memory_used)
@accepts(Int('vm_id'), roles=['VM_READ'])
@returns(Dict(
Int('minimum_memory_requested', description='Minimum memory requested by the VM'),
Int('total_memory_requested', description='Maximum / total memory requested by the VM'),
Bool('overcommit_required', description='Overcommit of memory is required to start VM'),
Bool(
'memory_req_fulfilled_after_overcommit',
description='Memory requirements of VM are fulfilled if over-committing memory is specified'
),
Int('arc_to_shrink', description='Size of ARC to shrink in bytes', null=True),
Int('current_arc_max', description='Current size of max ARC in bytes'),
Int('arc_min', description='Minimum size of ARC in bytes'),
Int('arc_max_after_shrink', description='Size of max ARC in bytes after shrinking'),
Int(
'actual_vm_requested_memory',
description='VM memory in bytes to consider when making calculations for available/required memory.'
' If VM ballooning is specified for the VM, the minimum VM memory specified by user will'
' be taken into account otherwise total VM memory requested will be taken into account.'
),
))
async def get_vm_memory_info(self, vm_id):
"""
Returns memory information for `vm_id` VM if it is going to be started.
All memory attributes are expressed in bytes.
"""
vm = await self.middleware.call('vm.get_instance', vm_id)
if vm['status']['state'] in ACTIVE_STATES:
# TODO: Let's add this later as we have a use case in the UI - could be useful to
# show separate info of each VM in the UI moving on
raise CallError(f'Unable to retrieve {vm["name"]!r} VM information as it is already running.')
arc_max = await self.middleware.call('sysctl.get_arc_max')
arc_min = await self.middleware.call('sysctl.get_arc_min')
shrinkable_arc_max = max(0, arc_max - arc_min)
available_memory = await self.get_available_memory(False)
available_memory_with_overcommit = await self.get_available_memory(True)
vm_max_memory = vm['memory'] * 1024 * 1024
vm_min_memory = vm['min_memory'] * 1024 * 1024 if vm['min_memory'] else None
vm_requested_memory = vm_min_memory or vm_max_memory
overcommit_required = vm_requested_memory > available_memory
arc_to_shrink = 0
if overcommit_required:
arc_to_shrink = min(shrinkable_arc_max, vm_requested_memory - available_memory)
return {
'minimum_memory_requested': vm_min_memory,
'total_memory_requested': vm_max_memory,
'overcommit_required': overcommit_required,
'arc_to_shrink': arc_to_shrink,
'memory_req_fulfilled_after_overcommit': vm_requested_memory < available_memory_with_overcommit,
'current_arc_max': arc_max,
'arc_min': arc_min,
'arc_max_after_shrink': arc_max - arc_to_shrink,
'actual_vm_requested_memory': vm_requested_memory,
}
@accepts()
@returns(Str('mac', validators=[MACAddr(separator=':')]),)
def random_mac(self):
"""
Create a random mac address.
Returns:
str: with six groups of two hexadecimal digits
"""
return NIC.random_mac()
| 7,437 | Python | .py | 134 | 44.977612 | 113 | 0.635902 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,009 | vm_devices.py | truenas_middleware/src/middlewared/middlewared/plugins/vm/vm_devices.py | import os
import re
import middlewared.sqlalchemy as sa
from middlewared.plugins.vm.devices.storage_devices import IOTYPE_CHOICES
from middlewared.plugins.zfs_.utils import zvol_name_to_path, zvol_path_to_name
from middlewared.schema import accepts, Bool, Dict, Int, OROperator, Patch, returns, Str
from middlewared.service import CallError, CRUDService, private
from middlewared.utils import run
from middlewared.async_validators import check_path_resides_within_volume
from .devices import DEVICES
from .utils import ACTIVE_STATES
RE_PPTDEV_NAME = re.compile(r'([0-9]+/){2}[0-9]+')
class VMDeviceModel(sa.Model):
__tablename__ = 'vm_device'
id = sa.Column(sa.Integer(), primary_key=True)
dtype = sa.Column(sa.String(50))
attributes = sa.Column(sa.JSON())
vm_id = sa.Column(sa.ForeignKey('vm_vm.id'), index=True)
order = sa.Column(sa.Integer(), nullable=True)
class VMDeviceService(CRUDService):
ENTRY = Patch(
'vmdevice_create', 'vm_device_entry',
('add', Int('id')),
('rm', {'name': 'attributes'}),
('add', OROperator(*[device.schema for device in DEVICES.values()], name='attributes')),
)
class Config:
namespace = 'vm.device'
datastore = 'vm.device'
datastore_extend = 'vm.device.extend_device'
cli_namespace = 'service.vm.device'
role_prefix = 'VM_DEVICE'
@accepts()
@returns(Dict(additional_attrs=True, example={'vms/test 1': '/dev/zvol/vms/test+1'}))
async def disk_choices(self):
"""
Returns disk choices for device type "DISK".
"""
out = {}
zvols = await self.middleware.call(
'zfs.dataset.unlocked_zvols_fast', [
['OR', [['attachment', '=', None], ['attachment.method', '=', 'vm.devices.query']]],
['ro', '=', False],
],
{}, ['ATTACHMENT', 'RO']
)
for zvol in zvols:
out[zvol['path']] = zvol['name']
return out
@accepts()
@returns(Dict(
*[Str(k, enum=[k]) for k in IOTYPE_CHOICES]
))
async def iotype_choices(self):
"""
IO-type choices for storage devices.
"""
return {k: k for k in IOTYPE_CHOICES}
@private
async def extend_device(self, device):
if device['vm']:
device['vm'] = device['vm']['id']
if not device['order']:
if device['dtype'] == 'CDROM':
device['order'] = 1000
elif device['dtype'] in ('DISK', 'RAW'):
device['order'] = 1001
else:
device['order'] = 1002
return device
@accepts(roles=['VM_DEVICE_READ'])
@returns(Dict(additional_attrs=True))
def nic_attach_choices(self):
"""
Available choices for NIC Attach attribute.
"""
return self.middleware.call_sync('interface.choices', {'exclude': ['epair', 'tap', 'vnet']})
@accepts(roles=['VM_DEVICE_READ'])
@returns(Dict(additional_attrs=True))
async def bind_choices(self):
"""
Available choices for Bind attribute.
"""
return {
d['address']: d['address'] for d in await self.middleware.call(
'interface.ip_in_use', {'static': True, 'any': True, 'loopback': True}
)
}
@private
async def update_device(self, data, old=None):
if data['dtype'] == 'DISK':
create_zvol = data['attributes'].pop('create_zvol', False)
if create_zvol:
ds_options = {
'name': data['attributes'].pop('zvol_name'),
'type': 'VOLUME',
'volsize': data['attributes'].pop('zvol_volsize'),
}
self.logger.debug(f'Creating ZVOL {ds_options["name"]} with volsize {ds_options["volsize"]}')
zvol_blocksize = await self.middleware.call(
'pool.dataset.recommended_zvol_blocksize', ds_options['name'].split('/', 1)[0]
)
ds_options['volblocksize'] = zvol_blocksize
await self.middleware.call('pool.dataset.create', ds_options)
elif data['dtype'] == 'RAW' and (
not data['attributes'].pop('exists', True) or (
old and old['attributes']['size'] != data['attributes']['size']
)
):
path = data['attributes']['path']
cp = await run(['truncate', '-s', str(data['attributes']['size']), path], check=False)
if cp.returncode:
raise CallError(f'Failed to create or update raw file {path}: {cp.stderr}')
return data
@accepts(
Dict(
'vmdevice_create',
Str('dtype', enum=['NIC', 'DISK', 'CDROM', 'PCI', 'DISPLAY', 'RAW', 'USB'], required=True),
Int('vm', required=True),
Dict('attributes', additional_attrs=True, default=None),
Int('order', default=None, null=True),
register=True,
),
)
async def do_create(self, data):
"""
Create a new device for the VM of id `vm`.
If `dtype` is the `RAW` type and a new raw file is to be created, `attributes.exists` will be passed as false.
This means the API handles creating the raw file and raises the appropriate exception if file creation fails.
If `dtype` is of `DISK` type and a new Zvol is to be created, `attributes.create_zvol` will be passed as
true with valid `attributes.zvol_name` and `attributes.zvol_volsize` values.
"""
data = await self.validate_device(data, update=False)
data = await self.update_device(data)
id_ = await self.middleware.call(
'datastore.insert', self._config.datastore, data
)
await self.__reorder_devices(id_, data['vm'], data['order'])
return await self.get_instance(id_)
async def do_update(self, id_, data):
"""
Update a VM device of `id`.
Pass `attributes.size` to resize a `dtype` `RAW` device. The raw file will be resized.
"""
device = await self.get_instance(id_)
new = device.copy()
new.update(data)
new = await self.validate_device(new, device)
new = await self.update_device(new, device)
await self.middleware.call('datastore.update', self._config.datastore, id_, new)
await self.__reorder_devices(id_, device['vm'], new['order'])
return await self.get_instance(id_)
@private
async def delete_resource(self, options, device):
if options['zvol']:
if device['dtype'] != 'DISK':
raise CallError('The device is not a disk and has no zvol to destroy.')
if not device['attributes'].get('path', '').startswith('/dev/zvol'):
raise CallError('Unable to destroy zvol as disk device has misconfigured path')
zvol_id = zvol_path_to_name(device['attributes']['path'])
if await self.middleware.call('pool.dataset.query', [['id', '=', zvol_id]]):
# FIXME: We should use pool.dataset.delete but right now FS attachments will consider
# the current device as a valid reference. Also should we stopping the vm only when deleting an
# attachment ?
await self.middleware.call('zfs.dataset.delete', zvol_id)
if options['raw_file']:
if device['dtype'] != 'RAW':
raise CallError('Device is not of RAW type.')
try:
os.unlink(device['attributes']['path'])
except OSError:
raise CallError(f'Failed to destroy {device["attributes"]["path"]}')
@accepts(
Int('id'),
Dict(
'vm_device_delete',
Bool('zvol', default=False),
Bool('raw_file', default=False),
Bool('force', default=False),
)
)
async def do_delete(self, id_, options):
"""
Delete a VM device of `id`.
"""
device = await self.get_instance(id_)
status = await self.middleware.call('vm.status', device['vm'])
if status['state'] in ACTIVE_STATES:
raise CallError('Please stop/resume associated VM before deleting VM device.')
try:
await self.delete_resource(options, device)
except CallError:
if not options['force']:
raise
return await self.middleware.call('datastore.delete', self._config.datastore, id_)
async def __reorder_devices(self, id_, vm_id, order):
if order is None:
return
filters = [('vm', '=', vm_id), ('id', '!=', id_)]
if await self.middleware.call('vm.device.query', filters + [('order', '=', order)]):
used_order = [order]
for device in await self.middleware.call('vm.device.query', filters, {'order_by': ['order']}):
if not device['order']:
continue
if device['order'] not in used_order:
used_order.append(device['order'])
continue
device['order'] = min(used_order) + 1
while device['order'] in used_order:
device['order'] += 1
used_order.append(device['order'])
await self.middleware.call('datastore.update', self._config.datastore, device['id'], device)
@private
async def disk_uniqueness_integrity_check(self, device, vm):
# This ensures that the disk is not already present for `vm`
def translate_device(dev):
# A disk should have a path configured at all times, when that is not the case, that means `dtype` is DISK
# and end user wants to create a new zvol in this case.
return dev['attributes'].get('path') or zvol_name_to_path(dev['attributes']['zvol_name'])
disks = [
d for d in vm['devices']
if d['dtype'] in ('DISK', 'RAW', 'CDROM') and translate_device(d) == translate_device(device)
]
if not disks:
# We don't have that disk path in vm devices, we are good to go
return True
elif len(disks) > 1:
# VM is mis-configured
return False
elif not device.get('id') and disks:
# A new device is being created, however it already exists in vm. This can also happen when VM instance
# is being created, in that case it's okay. Key here is that we won't have the id field present
return not bool(disks[0].get('id'))
elif device.get('id'):
# The device is being updated, if the device is same as we have in db, we are okay
return device['id'] == disks[0].get('id')
else:
return False
@private
async def validate_path_field(self, verrors, schema, path):
await check_path_resides_within_volume(verrors, self.middleware, schema, path)
@private
async def validate_device(self, device, old=None, update=True):
vm_instance = await self.middleware.call('vm.get_instance', device['vm'])
device_obj = DEVICES[device['dtype']](device, self.middleware)
await self.middleware.run_in_thread(device_obj.validate, device, old, vm_instance, update)
return device
@private
async def validate_display_devices(self, verrors, vm_instance):
devs = await self.get_display_devices(vm_instance)
if len(devs['spice']) > 1:
verrors.add('attributes.type', 'Only one SPICE Display device is supported')
@private
async def get_display_devices(self, vm_instance):
devs = {'spice': []}
for dev in filter(lambda d: d['dtype'] == 'DISPLAY', vm_instance['devices']):
if dev['attributes']['type'] == 'SPICE':
devs['spice'].append(dev)
return devs
| 11,970 | Python | .py | 259 | 35.915058 | 118 | 0.584034 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,010 | vm_lifecycle.py | truenas_middleware/src/middlewared/middlewared/plugins/vm/vm_lifecycle.py | from middlewared.schema import accepts, Bool, Dict, Int, returns
from middlewared.service import CallError, item_method, job, private, Service
from .vm_supervisor import VMSupervisorMixin
class VMService(Service, VMSupervisorMixin):
@private
async def lifecycle_action_check(self):
if not await self.middleware.call('vm.license_active'):
raise CallError('Requested action cannot be performed as system is not licensed to use VMs')
@item_method
@accepts(
Int('id'),
Dict('options', Bool('overcommit', default=False)),
roles=['VM_WRITE']
)
@returns()
async def start(self, id_, options):
"""
Start a VM.
options.overcommit defaults to false, meaning VMs are not allowed to
start if there is not enough available memory to hold all configured VMs.
If true, VM starts even if there is not enough memory for all configured VMs.
Error codes:
ENOMEM(12): not enough free memory to run the VM without overcommit
"""
await self.lifecycle_action_check()
await self.middleware.run_in_thread(self._check_setup_connection)
vm = await self.middleware.call('vm.get_instance', id_)
vm_state = vm['status']['state']
if vm_state == 'RUNNING':
raise CallError(f'{vm["name"]!r} is already running')
if vm_state == 'SUSPENDED':
raise CallError(f'{vm["name"]!r} VM is suspended and can only be resumed/powered off')
if vm['bootloader'] not in await self.middleware.call('vm.bootloader_options'):
raise CallError(f'"{vm["bootloader"]}" is not supported on this platform.')
if await self.middleware.call('system.is_ha_capable'):
for device in vm['devices']:
if device['dtype'] in ('PCI', 'USB'):
raise CallError(
'Please remove PCI/USB devices from VM before starting it in HA capable machines as '
'they are not supported.'
)
# Perhaps we should have a default config option for VMs?
await self.middleware.call('vm.init_guest_vmemory', vm, options['overcommit'])
try:
await self.middleware.run_in_thread(self._start, vm['name'])
except Exception:
if (await self.middleware.call('vm.get_instance', id_))['status']['state'] != 'RUNNING':
await self.middleware.call('vm.teardown_guest_vmemory', id_)
raise
await self.middleware.call('service.reload', 'http')
@item_method
@accepts(
Int('id'),
Dict(
'options',
Bool('force', default=False),
Bool('force_after_timeout', default=False),
),
roles=['VM_WRITE']
)
@returns()
@job(lock=lambda args: f'stop_vm_{args[0]}')
def stop(self, job, id_, options):
"""
Stops a VM.
For unresponsive guests who have exceeded the `shutdown_timeout` defined by the user and have become
unresponsive, they required to be powered down using `vm.poweroff`. `vm.stop` is only going to send a
shutdown signal to the guest and wait the desired `shutdown_timeout` value before tearing down guest vmemory.
`force_after_timeout` when supplied, it will initiate poweroff for the VM forcing it to exit if it has
not already stopped within the specified `shutdown_timeout`.
"""
self._check_setup_connection()
vm_data = self.middleware.call_sync('vm.get_instance', id_)
if options['force']:
self._poweroff(vm_data['name'])
else:
self._stop(vm_data['name'], vm_data['shutdown_timeout'])
if options['force_after_timeout'] and self.middleware.call_sync('vm.status', id_)['state'] == 'RUNNING':
self._poweroff(vm_data['name'])
@item_method
@accepts(Int('id'), roles=['VM_WRITE'])
@returns()
def poweroff(self, id_):
"""
Poweroff a VM.
"""
self._check_setup_connection()
vm_data = self.middleware.call_sync('vm.get_instance', id_)
self._poweroff(vm_data['name'])
@item_method
@accepts(Int('id'), roles=['VM_WRITE'])
@returns()
@job(lock=lambda args: f'restart_vm_{args[0]}')
def restart(self, job, id_):
"""
Restart a VM.
"""
self._check_setup_connection()
vm = self.middleware.call_sync('vm.get_instance', id_)
stop_job = self.middleware.call_sync('vm.stop', id_, {'force_after_timeout': True})
stop_job.wait_sync()
if stop_job.error:
raise CallError(f'Failed to stop {vm["name"]!r} vm: {stop_job.error}')
self.middleware.call_sync('vm.start', id_, {'overcommit': True})
@item_method
@accepts(Int('id'), roles=['VM_WRITE'])
@returns()
def suspend(self, id_):
"""
Suspend `id` VM.
"""
self._check_setup_connection()
vm = self.middleware.call_sync('vm.get_instance', id_)
self._suspend(vm['name'])
@item_method
@accepts(Int('id'), roles=['VM_WRITE'])
@returns()
def resume(self, id_):
"""
Resume suspended `id` VM.
"""
self._check_setup_connection()
vm = self.middleware.call_sync('vm.get_instance', id_)
self._resume(vm['name'])
@private
def suspend_vms(self, vm_ids):
vms = {vm['id']: vm for vm in self.middleware.call_sync('vm.query')}
for vm_id in filter(
lambda vm_id: vms.get(vm_id).get('status', {}).get('state') == 'RUNNING',
map(int, vm_ids)
):
try:
self.suspend(vm_id)
except Exception:
self.logger.error('Failed to suspend %r vm', vms[vm_id]['name'], exc_info=True)
@private
def resume_suspended_vms(self, vm_ids):
vms = {vm['id']: vm for vm in self.middleware.call_sync('vm.query')}
for vm_id in filter(
lambda vm_id: vms.get(vm_id).get('status', {}).get('state') == 'SUSPENDED',
map(int, vm_ids)
):
try:
self.resume(vm_id)
except Exception:
self.logger.error('Failed to resume %r vm', vms[vm_id]['name'], exc_info=True)
async def _event_vms(middleware, event_type, args):
vm = await middleware.call('vm.query', [['id', '=', args['id']]])
if not vm or vm[0]['status']['state'] != 'STOPPED' or args.get('state') != 'SHUTOFF':
return
middleware.create_task(middleware.call('vm.teardown_guest_vmemory', args['id']))
async def setup(middleware):
middleware.event_subscribe('vm.query', _event_vms)
| 6,748 | Python | .py | 153 | 34.882353 | 117 | 0.597927 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,011 | vm_display_info.py | truenas_middleware/src/middlewared/middlewared/plugins/vm/vm_display_info.py | from socket import AF_INET6
from middlewared.schema import accepts, Dict, Int, List, returns, Str
from middlewared.service import pass_app, private, Service
from .devices import DISPLAY
from .utils import NGINX_PREFIX
class VMService(Service):
@accepts(roles=['VM_READ'])
@returns(Dict(
'available_display_port',
Int('port', required=True, description='Available server port'),
Int('web', required=True, description='Web port to be used based on available `port`'),
))
async def port_wizard(self):
"""
It returns the next available Display Server Port and Web Port.
Returns a dict with two keys `port` and `web`.
"""
all_ports = await self.middleware.call('port.get_all_used_ports')
def get_next_port():
for i in filter(lambda i: i not in all_ports, range(5900, 65535)):
yield i
gen = get_next_port()
return {'port': next(gen), 'web': next(gen)}
@private
async def all_used_display_device_ports(self, additional_filters=None):
all_ports = [6000]
additional_filters = additional_filters or []
for device in await self.middleware.call('vm.device.query', [['dtype', '=', 'DISPLAY']] + additional_filters):
all_ports.extend([device['attributes']['port'], device['attributes']['web_port']])
return all_ports
@accepts()
@returns(Dict(
*[Str(r, enum=[r]) for r in DISPLAY.RESOLUTION_ENUM]
))
async def resolution_choices(self):
"""
Retrieve supported resolution choices for VM Display devices.
"""
return {r: r for r in DISPLAY.RESOLUTION_ENUM}
@accepts(Int('id'), roles=['VM_READ'])
@returns(List(
'vmdevice', items=[
Dict(
'vmdevice',
Int('id'),
Str('dtype'),
DISPLAY.schema,
Int('order'),
Int('vm'),
),
]
))
async def get_display_devices(self, id_):
"""
Get the display devices from a given guest. If a display device has password configured,
`attributes.password_configured` will be set to `true`.
"""
devices = []
for device in await self.middleware.call('vm.device.query', [['vm', '=', id_], ['dtype', '=', 'DISPLAY']]):
device['attributes']['password_configured'] = bool(device['attributes'].get('password'))
devices.append(device)
return devices
@accepts(
Int('id'),
Str('host', default=''),
Dict(
'options',
Str('protocol', default='HTTP', enum=['HTTP', 'HTTPS']),
),
roles=['VM_READ']
)
@returns(Dict(
'display_devices_uri',
Str('error', null=True),
Str('uri', null=True),
))
@pass_app()
async def get_display_web_uri(self, app, id_, host, options):
"""
Retrieve Display URI for a given VM or appropriate error if there is no display device available
or if it is not configured to use web interface
"""
uri_data = {'error': None, 'uri': None}
protocol = options['protocol'].lower()
if not host:
try:
if app.origin.is_tcp_ip_family and (_h := app.origin.loc_addr):
host = _h
if app.origin.family == AF_INET6:
host = f'[{_h}]'
except AttributeError:
pass
if display_devices := await self.get_display_devices(id_):
for device in map(lambda d: DISPLAY(d, middleware=self.middleware), display_devices):
if device.data['attributes'].get('web'):
uri_data['uri'] = device.web_uri(host, protocol=protocol)
else:
uri_data['error'] = 'Web display is not configured'
else:
uri_data['error'] = 'Display device is not configured for this VM'
return uri_data
@private
async def get_display_devices_ui_info(self):
devices = []
for device in await self.middleware.call('vm.device.query', [['dtype', '=', 'DISPLAY']]):
devices.append(DISPLAY(device, middleware=self.middleware).get_webui_info())
return devices
@private
async def get_vm_display_nginx_route(self):
return NGINX_PREFIX
| 4,425 | Python | .py | 110 | 30.536364 | 118 | 0.575017 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,012 | memory.py | truenas_middleware/src/middlewared/middlewared/plugins/vm/memory.py | import errno
from middlewared.schema import accepts, Int, returns
from middlewared.service import CallError, private, Service
from .utils import ACTIVE_STATES
from .vm_supervisor import VMSupervisorMixin
class VMService(Service, VMSupervisorMixin):
async def _set_guest_vmemory(self, vm_id, overcommit):
vm = await self.middleware.call('vm.get_instance', vm_id)
memory_details = await self.middleware.call('vm.get_vm_memory_info', vm_id)
if not memory_details['overcommit_required']:
# There really isn't anything to be done if over-committing is not required
return
if not overcommit:
raise CallError(f'Cannot guarantee memory for guest {vm["name"]}', errno.ENOMEM)
if memory_details['current_arc_max'] != memory_details['arc_max_after_shrink']:
self.logger.debug(
'Setting ARC from %s to %s', memory_details['current_arc_max'], memory_details['arc_max_after_shrink']
)
await self.middleware.call('sysctl.set_arc_max', memory_details['arc_max_after_shrink'])
@private
async def init_guest_vmemory(self, vm, overcommit):
guest_status = await self.middleware.call('vm.status', vm['id'])
if guest_status.get('state') not in ACTIVE_STATES:
await self._set_guest_vmemory(vm['id'], overcommit)
else:
raise CallError('VM process is running, we won\'t allocate memory')
@private
async def teardown_guest_vmemory(self, vm_id):
vm = await self.middleware.call('vm.get_instance', vm_id)
if vm['status']['state'] != 'STOPPED':
return
guest_memory = vm['memory'] * 1024 * 1024
arc_max = await self.middleware.call('sysctl.get_arc_max')
arc_min = await self.middleware.call('sysctl.get_arc_min')
new_arc_max = min(
await self.middleware.call('sysctl.get_default_arc_max'),
arc_max + guest_memory
)
if arc_max != new_arc_max:
if new_arc_max > arc_min:
self.logger.debug(f'Giving back guest memory to ARC: {new_arc_max}')
await self.middleware.call('sysctl.set_arc_max', new_arc_max)
else:
self.logger.warn(
f'Not giving back memory to ARC because new arc_max ({new_arc_max}) <= arc_min ({arc_min})'
)
@accepts(Int('vm_id'), roles=['VM_READ'])
@returns(Int('memory_usage', description='Memory usage of a VM in bytes'))
def get_memory_usage(self, vm_id):
return self.get_memory_usage_internal(self.middleware.call_sync('vm.get_instance', vm_id))
@private
def get_memory_usage_internal(self, vm):
return self._memory_info(vm['name'])
| 2,769 | Python | .py | 53 | 42.716981 | 118 | 0.636834 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,013 | usb.py | truenas_middleware/src/middlewared/middlewared/plugins/vm/usb.py | import re
from xml.etree import ElementTree as etree
from middlewared.schema import accepts, Bool, Dict, List, Ref, Str, returns
from middlewared.service import CallError, private, Service
from middlewared.utils import run
from .devices.usb import USB_CONTROLLER_CHOICES
from .utils import get_virsh_command_args
RE_VALID_USB_DEVICE = re.compile(r'^usb_\d+_\d+(?:_\d)*$')
class VMDeviceService(Service):
class Config:
namespace = 'vm.device'
@accepts()
@returns(Dict(*[Str(k, enum=[k]) for k in USB_CONTROLLER_CHOICES]))
async def usb_controller_choices(self):
"""
Retrieve USB controller type choices
"""
return {k: k for k in USB_CONTROLLER_CHOICES}
@private
def retrieve_usb_device_information(self, xml_str):
xml = etree.fromstring(xml_str.strip())
capability = next((e for e in list(xml) if e.tag == 'capability'), None)
if capability is None:
return capability
required_keys = set(self.get_capability_keys())
capability_info = {}
for element in filter(lambda e: e.tag in required_keys and e.text is not None, capability):
capability_info[element.tag] = element.text
if element.tag in ('product', 'vendor') and element.get('id'):
capability_info[f'{element.tag}_id'] = element.get('id')
return None if set(capability_info) != required_keys else capability_info
@private
def get_capability_keys(self):
return {
'product': None,
'vendor': None,
'product_id': None,
'vendor_id': None,
'bus': None,
'device': None,
}
@accepts(Str('device', empty=False), roles=['VM_DEVICE_READ'])
@returns(Dict(
Dict(
'capability',
Str('product', required=True, null=True),
Str('product_id', required=True, null=True),
Str('vendor', required=True, null=True),
Str('vendor_id', required=True, null=True),
Str('bus', required=True, null=True),
Str('device', required=True, null=True),
),
Bool('available', required=True),
Str('error', required=True, null=True),
register=True,
))
async def usb_passthrough_device(self, device):
"""
Retrieve details about `device` USB device.
"""
await self.middleware.call('vm.check_setup_libvirt')
data = await self.get_basic_usb_passthrough_device_data()
cp = await run(get_virsh_command_args() + ['nodedev-dumpxml', device], check=False)
if cp.returncode:
data['error'] = cp.stderr.decode()
return data
capability_info = await self.middleware.call(
'vm.device.retrieve_usb_device_information', cp.stdout.decode()
)
if not capability_info:
data['error'] = 'Unable to determine capabilities of USB device'
else:
data['capability'] = capability_info
return {
**data,
'available': not data['error'],
}
@private
async def get_basic_usb_passthrough_device_data(self):
return {
'capability': self.get_capability_keys(),
'available': False,
'error': None,
}
@accepts(roles=['VM_DEVICE_READ'])
@returns(List(items=[Ref('usb_passthrough_device')]))
async def usb_passthrough_choices(self):
"""
Available choices for USB passthrough devices.
"""
await self.middleware.call('vm.check_setup_libvirt')
cp = await run(get_virsh_command_args() + ['nodedev-list', 'usb_device'], check=False)
if cp.returncode:
raise CallError(f'Unable to retrieve USB devices: {cp.stderr.decode()}')
devices = [k for k in map(str.strip, cp.stdout.decode().split('\n')) if RE_VALID_USB_DEVICE.findall(k)]
mapping = {}
for device in devices:
details = await self.usb_passthrough_device(device)
if details['error']:
continue
mapping[device] = details
return mapping
@private
async def get_usb_port_from_usb_details(self, usb_data):
if any(not usb_data.get(k) for k in ('product_id', 'vendor_id')):
raise CallError('Product / Vendor ID must be specified for USBs')
for device, device_details in (await self.usb_passthrough_choices()).items():
capability = device_details['capability']
if all(usb_data[k] == capability[k] for k in ('product_id', 'vendor_id')):
return device
| 4,665 | Python | .py | 110 | 33.136364 | 111 | 0.606 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,014 | clone.py | truenas_middleware/src/middlewared/middlewared/plugins/vm/clone.py | import errno
import re
import uuid
from middlewared.plugins.zfs_.utils import zvol_name_to_path, zvol_path_to_name
from middlewared.schema import accepts, Bool, Int, returns, Str
from middlewared.service import CallError, item_method, Service, private
from middlewared.service_exception import ValidationErrors
ZVOL_CLONE_SUFFIX = '_clone'
ZVOL_CLONE_RE = re.compile(rf'^(.*){ZVOL_CLONE_SUFFIX}\d+$')
class VMService(Service):
async def __next_clone_name(self, name):
vm_names = [
i['name']
for i in await self.middleware.call('vm.query', [
('name', '~', rf'{name}{ZVOL_CLONE_SUFFIX}\d+')
])
]
clone_index = 0
while True:
clone_name = f'{name}{ZVOL_CLONE_SUFFIX}{clone_index}'
if clone_name not in vm_names:
break
clone_index += 1
return clone_name
async def __clone_zvol(self, name, zvol, created_snaps, created_clones):
if not await self.middleware.call('zfs.dataset.query', [('id', '=', zvol)]):
raise CallError(f'zvol {zvol} does not exist.', errno.ENOENT)
snapshot_name = name
i = 0
while True:
zvol_snapshot = f'{zvol}@{snapshot_name}'
if await self.middleware.call('zfs.snapshot.query', [('id', '=', zvol_snapshot)]):
if ZVOL_CLONE_RE.search(snapshot_name):
snapshot_name = ZVOL_CLONE_RE.sub(rf'\1{ZVOL_CLONE_SUFFIX}{i}', snapshot_name)
else:
snapshot_name = f'{name}{ZVOL_CLONE_SUFFIX}{i}'
i += 1
continue
break
await self.middleware.call('zfs.snapshot.create', {'dataset': zvol, 'name': snapshot_name})
created_snaps.append(zvol_snapshot)
clone_suffix = name
i = 0
while True:
clone_dst = f'{zvol}_{clone_suffix}'
if await self.middleware.call('zfs.dataset.query', [('id', '=', clone_dst)]):
if ZVOL_CLONE_RE.search(clone_suffix):
clone_suffix = ZVOL_CLONE_RE.sub(rf'\1{ZVOL_CLONE_SUFFIX}{i}', clone_suffix)
else:
clone_suffix = f'{name}{ZVOL_CLONE_SUFFIX}{i}'
i += 1
continue
break
await self.middleware.call('zfs.snapshot.clone', {'snapshot': zvol_snapshot, 'dataset_dst': clone_dst})
created_clones.append(clone_dst)
return clone_dst
@item_method
@accepts(
Int('id'),
Str('name', default=None),
roles=['VM_WRITE']
)
@returns(Bool())
async def clone(self, id_, name):
"""
Clone the VM `id`.
`name` is an optional parameter for the cloned VM.
If not provided it will append the next number available to the VM name.
"""
vm = await self.middleware.call('vm.get_instance', id_)
await self.validate(vm)
origin_name = vm['name']
for key in ('id', 'status', 'display_available'):
vm.pop(key, None)
devices = vm.pop('devices')
vm['name'] = await self.__next_clone_name(vm['name'])
vm['uuid'] = str(uuid.uuid4()) # We want to use a newer uuid here as it is supposed to be unique per VM
if name is not None:
vm['name'] = name
# In case we need to rollback
created_snaps = []
created_clones = []
try:
new_vm = await self.middleware.call('vm.do_create', vm)
for item in devices:
item.pop('id', None)
item['vm'] = new_vm['id']
if item['dtype'] == 'NIC':
if 'mac' in item['attributes']:
del item['attributes']['mac']
if item['dtype'] == 'DISPLAY':
if 'port' in item['attributes']:
dev_dict = await self.middleware.call('vm.port_wizard')
item['attributes']['port'] = dev_dict['port']
item['attributes']['web_port'] = dev_dict['web']
if item['dtype'] == 'DISK':
zvol = zvol_path_to_name(item['attributes']['path'])
item['attributes']['path'] = zvol_name_to_path(
await self.__clone_zvol(vm['name'], zvol, created_snaps, created_clones)
)
if item['dtype'] == 'RAW':
self.logger.warning('RAW disks must be copied manually. Skipping...')
continue
await self.middleware.call('vm.device.create', item)
except Exception as e:
for i in reversed(created_clones):
try:
await self.middleware.call('zfs.dataset.delete', i)
except Exception:
self.logger.warning('Rollback of VM clone left dangling zvol: %s', i)
for i in reversed(created_snaps):
try:
dataset, snap = i.split('@')
await self.middleware.call('zfs.snapshot.remove', {
'dataset': dataset,
'name': snap,
'defer_delete': True,
})
except Exception:
self.logger.warn('Rollback of VM clone left dangling snapshot: %s', i)
raise e
self.logger.info('VM cloned from {0} to {1}'.format(origin_name, vm['name']))
return True
@private
async def validate(self, vm):
verrors = ValidationErrors()
for index, device in enumerate(vm['devices']):
if device['dtype'] == 'DISPLAY' and not device['attributes'].get('password'):
verrors.add(
f'vm.devices.{index}.attributes.password',
'Password must be configured for display device in order to clone the VM.'
)
verrors.check()
| 6,001 | Python | .py | 133 | 32.120301 | 112 | 0.533196 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,015 | lifecycle.py | truenas_middleware/src/middlewared/middlewared/plugins/vm/lifecycle.py | import asyncio
import contextlib
from middlewared.schema import accepts, Bool, Dict
from middlewared.service import CallError, private, Service
from middlewared.utils.asyncio_ import asyncio_map
from .utils import ACTIVE_STATES
from .vm_supervisor import VMSupervisorMixin
SHUTDOWN_LOCK = asyncio.Lock()
class VMService(Service, VMSupervisorMixin):
@private
async def wait_for_libvirtd(self, timeout):
async def libvirtd_started(middleware):
await middleware.call('service.start', 'libvirtd')
while not await middleware.call('service.started', 'libvirtd'):
await asyncio.sleep(2)
try:
self._system_supports_virtualization()
if not await self.middleware.call('service.started', 'libvirtd'):
await asyncio.wait_for(self.middleware.create_task(libvirtd_started(self.middleware)), timeout=timeout)
# We want to do this before initializing libvirt connection
await self.middleware.run_in_thread(self._open)
await self.middleware.run_in_thread(self._check_connection_alive)
await self.middleware.call('vm.setup_libvirt_events')
except (asyncio.TimeoutError, CallError):
self.middleware.logger.error('Failed to setup libvirt', exc_info=True)
@private
def setup_libvirt_connection(self, timeout=30):
self.middleware.call_sync('vm.wait_for_libvirtd', timeout)
@private
async def check_setup_libvirt(self):
if not await self.middleware.call('service.started', 'libvirtd'):
await self.middleware.call('vm.setup_libvirt_connection')
@private
def initialize_vms(self, timeout=30):
vms = self.middleware.call_sync('vm.query')
if vms and self._is_kvm_supported():
self.setup_libvirt_connection(timeout)
else:
return
if self._is_connection_alive():
for vm_data in vms:
try:
self._add_with_vm_data(vm_data)
except Exception as e:
# Whatever happens, we don't want middlewared not booting
self.middleware.logger.error(
'Unable to setup %r VM object: %s', vm_data['name'], str(e), exc_info=True
)
self.middleware.call_sync('service.reload', 'http')
else:
self.middleware.logger.error('Failed to establish libvirt connection')
@private
async def start_on_boot(self):
for vm in await self.middleware.call('vm.query', [('autostart', '=', True)], {'force_sql_filters': True}):
try:
await self.middleware.call('vm.start', vm['id'])
except Exception as e:
self.middleware.logger.error(f'Failed to start VM {vm["name"]}: {e}')
@private
@accepts(
Dict(
'deinitialize_vms_options',
Bool('reload_ui', default=True),
Bool('stop_libvirt', default=True),
)
)
async def deinitialize_vms(self, options):
await self.middleware.call('vm.close_libvirt_connection')
if options['reload_ui']:
await self.middleware.call('service.reload', 'http')
if options['stop_libvirt']:
await self.middleware.call('service.stop', 'libvirtd')
@private
def close_libvirt_connection(self):
if self.LIBVIRT_CONNECTION:
with contextlib.suppress(CallError):
self._close()
@private
def setup_details(self):
return {
'connected': self._is_connection_alive(),
'connection_initialised': bool(self.LIBVIRT_CONNECTION),
'domains': list(self.vms.keys()),
'libvirt_domains': self._list_domains() if self.LIBVIRT_CONNECTION else None,
}
@private
async def terminate(self):
async with SHUTDOWN_LOCK:
await self.middleware.call('vm.close_libvirt_connection')
@private
async def terminate_timeout(self):
return max(map(lambda v: v['shutdown_timeout'], await self.middleware.call('vm.query')), default=10)
async def __event_system_ready(middleware, event_type, args):
"""
Method called when system is ready, supposed to start VMs
flagged that way.
"""
await middleware.call('vm.initialize_vms')
# we ignore the 'ready' event on an HA system since the failover event plugin
# is responsible for starting this service, however, the VMs still need to be
# initialized (which is what the above callers are doing)
if await middleware.call('failover.licensed'):
return
middleware.create_task(middleware.call('vm.start_on_boot'))
async def __event_system_shutdown(middleware, event_type, args):
async def poweroff_stop_vm(vm):
if vm['status']['state'] == 'RUNNING':
stop_job = await middleware.call('vm.stop', vm['id'], {'force_after_timeout': True})
await stop_job.wait()
if stop_job.error:
middleware.logger.error('Stopping %r VM failed: %r', vm['name'], stop_job.error)
else:
try:
await middleware.call('vm.poweroff', vm['id'])
except Exception:
middleware.logger.error('Powering off %r VM failed', vm['name'], exc_info=True)
vms = await middleware.call('vm.query', [('status.state', 'in', ACTIVE_STATES)])
if vms:
async with SHUTDOWN_LOCK: # FIXME: Why a global lock?? Not needed....
await asyncio_map(poweroff_stop_vm, vms, 16)
middleware.logger.debug('VM(s) stopped successfully')
# We do this in vm.terminate as well, reasoning for repeating this here is that we don't want to
# stop libvirt on middlewared restarts, we only want that to happen if a shutdown has been initiated
# and we have cleanly exited
await middleware.call('vm.deinitialize_vms', {'reload_ui': False})
async def setup(middleware):
# it's _very_ important that we run this before we do
# any type of VM initialization. We have to capture the
# zfs c_max value before we start manipulating these
# sysctls during vm start/stop
await middleware.call('sysctl.store_default_arc_max')
if await middleware.call('system.ready'):
middleware.create_task(middleware.call('vm.initialize_vms', 5)) # We use a short timeout here deliberately
middleware.event_subscribe('system.ready', __event_system_ready)
middleware.event_subscribe('system.shutdown', __event_system_shutdown)
| 6,606 | Python | .py | 135 | 39.488889 | 119 | 0.646438 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,016 | utils.py | truenas_middleware/src/middlewared/middlewared/plugins/vm/utils.py | from xml.etree import ElementTree as etree
ACTIVE_STATES = ['RUNNING', 'SUSPENDED']
SYSTEM_NVRAM_FOLDER_PATH = '/data/subsystems/vm/nvram'
LIBVIRT_URI = 'qemu+unix:///system?socket=/run/truenas_libvirt/libvirt-sock'
LIBVIRT_USER = 'libvirt-qemu'
NGINX_PREFIX = '/vm/display'
def create_element(*args, **kwargs):
attribute_dict = kwargs.pop('attribute_dict', {})
element = etree.Element(*args, **kwargs)
element.text = attribute_dict.get('text')
element.tail = attribute_dict.get('tail')
for child in attribute_dict.get('children', []):
element.append(child)
return element
def get_virsh_command_args():
return ['virsh', '-c', LIBVIRT_URI]
def convert_pci_id_to_vm_pci_slot(pci_id: str) -> str:
return f'pci_{pci_id.replace(".", "_").replace(":", "_")}'
def get_vm_nvram_file_name(vm_data: dict) -> str:
return f'{vm_data["id"]}_{vm_data["name"]}_VARS.fd'
def get_default_status() -> dict:
return {
'state': 'ERROR',
'pid': None,
'domain_state': 'ERROR',
}
| 1,044 | Python | .py | 26 | 35.769231 | 76 | 0.656064 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,017 | numeric_set.py | truenas_middleware/src/middlewared/middlewared/plugins/vm/numeric_set.py | class NumericSet:
def __call__(self, value):
parse_numeric_set(value)
def parse_numeric_set(value):
if value == '':
return []
cpus = {}
parts = value.split(',')
for part in parts:
part = part.split('-')
if len(part) == 1:
cpu = int(part[0])
cpus[cpu] = None
elif len(part) == 2:
start = int(part[0])
end = int(part[1])
if start >= end:
raise ValueError(f'End of range has to greater that start: {start}-{end}')
for cpu in range(start, end + 1):
cpus[cpu] = None
else:
raise ValueError(f'Range has to be in format start-end: {part}')
return list(cpus)
| 743 | Python | .py | 23 | 22.956522 | 90 | 0.509777 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,018 | events.py | truenas_middleware/src/middlewared/middlewared/plugins/vm/events.py | import libvirt
import threading
from middlewared.service import private, Service
from .connection import LibvirtConnectionMixin
class VMService(Service, LibvirtConnectionMixin):
@private
def setup_libvirt_events(self):
self._check_setup_connection()
def callback(conn, dom, event, detail, opaque):
"""
0: 'DEFINED',
1: 'UNDEFINED',
2: 'STARTED',
3: 'SUSPENDED',
4: 'RESUMED',
5: 'STOPPED',
6: 'SHUTDOWN',
7: 'PMSUSPENDED'
Above is event mapping for internal reference
"""
vm_id = dom.name().split('_')[0]
vm = None
if vm_id.isdigit():
if vms := self.middleware.call_sync('vm.query', [['id', '=', int(vm_id)]], {'force_sql_filters': True}):
if dom.name() == f'{vms[0]["id"]}_{vms[0]["name"]}':
vm = vms[0]
vm.pop('devices', None)
if vm is None:
emit_type = 'REMOVED'
elif event == 0:
emit_type = 'ADDED'
else:
emit_type = 'CHANGED'
vm_state_mapping = {
0: 'NOSTATE',
1: 'RUNNING',
2: 'BLOCKED',
3: 'SUSPENDED', # Actual libvirt event here is PAUSED
4: 'SHUTDOWN',
5: 'SHUTOFF',
6: 'CRASHED',
7: 'PMSUSPENDED',
}
try:
if event == 1:
if emit_type == 'REMOVED':
state = 'NOSTATE'
else:
# We undefine/define domain numerous times based on if vm has any new changes
# registered, this is going to reflect that
state = 'UPDATING CONFIGURATION'
else:
state = vm_state_mapping.get(dom.state()[0], 'UNKNOWN')
except libvirt.libvirtError:
state = 'UNKNOWN'
# We do not send an event on removed because that would already be done by vm.delete
if vm is not None:
vm['status']['state'] = state
self.middleware.send_event(
'vm.query', emit_type, id=int(vm_id), fields=vm, state=vm_state_mapping.get(event, 'UNKNOWN')
)
def event_loop_execution():
while self.LIBVIRT_CONNECTION and self.LIBVIRT_CONNECTION._o and self.LIBVIRT_CONNECTION.isAlive():
libvirt.virEventRunDefaultImpl()
event_thread = threading.Thread(target=event_loop_execution, name='libvirt_event_loop')
event_thread.setDaemon(True)
event_thread.start()
self.LIBVIRT_CONNECTION.domainEventRegister(callback, None)
self.LIBVIRT_CONNECTION.setKeepAlive(5, 3)
| 2,926 | Python | .py | 69 | 28.086957 | 120 | 0.507379 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,019 | capabilities.py | truenas_middleware/src/middlewared/middlewared/plugins/vm/capabilities.py | from collections import defaultdict
from xml.etree import ElementTree as etree
from middlewared.schema import accepts, Dict, returns
from middlewared.service import private, Service
from .connection import LibvirtConnectionMixin
class VMService(Service, LibvirtConnectionMixin):
CAPABILITIES = None
@private
def update_capabilities_cache(self):
self._check_setup_connection()
xml = etree.fromstring(self.LIBVIRT_CONNECTION.getCapabilities())
supported_archs = defaultdict(list)
for guest in xml.findall('guest'):
arch = guest.find('arch')
if not arch or not arch.get('name'):
continue
arch_name = arch.get('name')
for machine_type in filter(lambda m: m.text, arch.findall('machine')):
supported_archs[arch_name].append(machine_type.text)
self.CAPABILITIES = supported_archs
@accepts(roles=['VM_READ'])
@returns(Dict(
additional_attrs=True,
example={
'x86_64': ['pc-i440fx-5.2', 'pc-q35-5.2', 'pc-i440fx-2.7'],
'i686': ['pc-i440fx-3.0', 'xenfv'],
}
))
async def guest_architecture_and_machine_choices(self):
"""
Retrieve choices for supported guest architecture types and machine choices.
Keys in the response would be supported guest architecture(s) on the host and their respective values would
be supported machine type(s) for the specific architecture on the host.
"""
if not self.CAPABILITIES:
await self.middleware.call('vm.update_capabilities_cache')
return self.CAPABILITIES
| 1,656 | Python | .py | 37 | 36.351351 | 115 | 0.668117 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,020 | vms.py | truenas_middleware/src/middlewared/middlewared/plugins/vm/vms.py | import asyncio
import errno
import functools
import os
import re
import shlex
import shutil
import uuid
import middlewared.sqlalchemy as sa
from middlewared.plugins.zfs_.utils import zvol_path_to_name
from middlewared.schema import accepts, Bool, Dict, Int, List, Patch, returns, Str, ValidationErrors
from middlewared.service import CallError, CRUDService, item_method, job, private
from middlewared.validators import Range, UUID
from middlewared.plugins.vm.numeric_set import parse_numeric_set, NumericSet
from .utils import ACTIVE_STATES, get_default_status, get_vm_nvram_file_name, SYSTEM_NVRAM_FOLDER_PATH
from .vm_supervisor import VMSupervisorMixin
BOOT_LOADER_OPTIONS = {
'UEFI': 'UEFI',
'UEFI_CSM': 'Legacy BIOS',
}
LIBVIRT_LOCK = asyncio.Lock()
RE_NAME = re.compile(r'^[a-zA-Z_0-9]+$')
class VMModel(sa.Model):
__tablename__ = 'vm_vm'
id = sa.Column(sa.Integer(), primary_key=True)
name = sa.Column(sa.String(150))
description = sa.Column(sa.String(250))
vcpus = sa.Column(sa.Integer(), default=1)
memory = sa.Column(sa.Integer())
min_memory = sa.Column(sa.Integer(), nullable=True)
autostart = sa.Column(sa.Boolean(), default=False)
time = sa.Column(sa.String(5), default='LOCAL')
bootloader = sa.Column(sa.String(50), default='UEFI')
cores = sa.Column(sa.Integer(), default=1)
threads = sa.Column(sa.Integer(), default=1)
hyperv_enlightenments = sa.Column(sa.Boolean(), default=False)
shutdown_timeout = sa.Column(sa.Integer(), default=90)
cpu_mode = sa.Column(sa.Text())
cpu_model = sa.Column(sa.Text(), nullable=True)
cpuset = sa.Column(sa.Text(), default=None, nullable=True)
nodeset = sa.Column(sa.Text(), default=None, nullable=True)
pin_vcpus = sa.Column(sa.Boolean(), default=False)
hide_from_msr = sa.Column(sa.Boolean(), default=False)
suspend_on_snapshot = sa.Column(sa.Boolean(), default=False)
ensure_display_device = sa.Column(sa.Boolean(), default=True)
arch_type = sa.Column(sa.String(255), default=None, nullable=True)
machine_type = sa.Column(sa.String(255), default=None, nullable=True)
uuid = sa.Column(sa.String(255))
command_line_args = sa.Column(sa.Text(), default='', nullable=False)
bootloader_ovmf = sa.Column(sa.String(1024), default='OVMF_CODE.fd')
trusted_platform_module = sa.Column(sa.Boolean(), default=False)
enable_cpu_topology_extension = sa.Column(sa.Boolean(), default=False)
@functools.cache
def ovmf_options():
return [path for path in os.listdir('/usr/share/OVMF') if re.findall(r'^OVMF_CODE.*.fd', path)]
class VMService(CRUDService, VMSupervisorMixin):
class Config:
namespace = 'vm'
datastore = 'vm.vm'
datastore_extend = 'vm.extend_vm'
datastore_extend_context = 'vm.extend_context'
cli_namespace = 'service.vm'
role_prefix = 'VM'
ENTRY = Patch(
'vm_create',
'vm_entry',
('add', List('devices')),
('add', Dict(
'status',
Str('state', required=True),
Int('pid', null=True, required=True),
Str('domain_state', required=True),
)),
('add', Bool('display_available')),
('add', Int('id')),
)
@accepts(roles=['VM_READ'])
@returns(Dict(additional_attrs=True))
def bootloader_ovmf_choices(self):
"""
Retrieve bootloader ovmf choices
"""
return {path: path for path in ovmf_options()}
@private
def extend_context(self, rows, extra):
status = {}
shutting_down = self.middleware.call_sync('system.state') == 'SHUTTING_DOWN'
kvm_supported = self._is_kvm_supported()
if shutting_down is False and rows and kvm_supported:
self._safely_check_setup_connection(5)
libvirt_running = shutting_down is False and self._is_connection_alive()
for row in rows:
status[row['id']] = self.status_impl(row) if libvirt_running else get_default_status()
return {
'status': status,
}
@accepts(roles=['VM_READ'])
@returns(Dict(
*[Str(k, enum=[v]) for k, v in BOOT_LOADER_OPTIONS.items()],
))
async def bootloader_options(self):
"""
Supported motherboard firmware options.
"""
return BOOT_LOADER_OPTIONS
@private
async def extend_vm(self, vm, context):
vm['devices'] = await self.middleware.call(
'vm.device.query',
[('vm', '=', vm['id'])],
{'force_sql_filters': True},
)
vm['display_available'] = any(device['dtype'] == 'DISPLAY' for device in vm['devices'])
vm['status'] = context['status'][vm['id']]
return vm
@accepts(Dict(
'vm_create',
Str('command_line_args', default=''),
Str('cpu_mode', default='CUSTOM', enum=[
'CUSTOM', 'HOST-MODEL', 'HOST-PASSTHROUGH']),
Str('cpu_model', default=None, null=True),
Str('name', required=True),
Str('description'),
Int('vcpus', default=1),
Int('cores', default=1),
Int('threads', default=1),
Str('cpuset', default=None, null=True, validators=[NumericSet()]),
Str('nodeset', default=None, null=True, validators=[NumericSet()]),
Bool('enable_cpu_topology_extension', default=False),
Bool('pin_vcpus', default=False),
Bool('suspend_on_snapshot', default=False),
Bool('trusted_platform_module', default=False),
Int('memory', required=True, validators=[Range(min_=20)]),
Int('min_memory', null=True, validators=[Range(min_=20)], default=None),
Bool('hyperv_enlightenments', default=False),
Str('bootloader', enum=list(BOOT_LOADER_OPTIONS.keys()), default='UEFI'),
Str('bootloader_ovmf', default='OVMF_CODE.fd'),
Bool('autostart', default=True),
Bool('hide_from_msr', default=False),
Bool('ensure_display_device', default=True),
Str('time', enum=['LOCAL', 'UTC'], default='LOCAL'),
Int('shutdown_timeout', default=90,
validators=[Range(min_=5, max_=300)]),
Str('arch_type', null=True, default=None),
Str('machine_type', null=True, default=None),
Str('uuid', null=True, default=None, validators=[UUID()]),
register=True,
))
async def do_create(self, data):
"""
Create a Virtual Machine (VM).
Maximum of 16 guest virtual CPUs are allowed. By default, every virtual CPU is configured as a
separate package. Multiple cores can be configured per CPU by specifying `cores` attributes.
`vcpus` specifies total number of CPU sockets. `cores` specifies number of cores per socket. `threads`
specifies number of threads per core.
`ensure_display_device` when set ( the default ) will ensure that the guest always has access to a video device.
For headless installations like ubuntu server this is required for the guest to operate properly. However
for cases where consumer would like to use GPU passthrough and does not want a display device added should set
this to `false`.
`arch_type` refers to architecture type and can be specified for the guest. By default the value is `null` and
system in this case will choose a reasonable default based on host.
`machine_type` refers to machine type of the guest based on the architecture type selected with `arch_type`.
By default the value is `null` and system in this case will choose a reasonable default based on `arch_type`
configuration.
`shutdown_timeout` indicates the time in seconds the system waits for the VM to cleanly shutdown. During system
shutdown, if the VM hasn't exited after a hardware shutdown signal has been sent by the system within
`shutdown_timeout` seconds, system initiates poweroff for the VM to stop it.
`hide_from_msr` is a boolean which when set will hide the KVM hypervisor from standard MSR based discovery and
is useful to enable when doing GPU passthrough.
`hyperv_enlightenments` can be used to enable subset of predefined Hyper-V enlightenments for Windows guests.
These enlightenments improve performance and enable otherwise missing features.
`suspend_on_snapshot` is a boolean attribute which when enabled will automatically pause/suspend VMs when
a snapshot is being taken for periodic snapshot tasks. For manual snapshots, if user has specified vms to
be paused, they will be in that case.
"""
async with LIBVIRT_LOCK:
await self.middleware.run_in_thread(self._check_setup_connection)
verrors = ValidationErrors()
await self.common_validation(verrors, 'vm_create', data)
verrors.check()
vm_id = await self.middleware.call('datastore.insert', 'vm.vm', data)
await self.middleware.run_in_thread(self._add, vm_id)
await self.middleware.call('etc.generate', 'libvirt_guests')
return await self.get_instance(vm_id)
@private
async def common_validation(self, verrors, schema_name, data, old=None):
if data['bootloader_ovmf'] not in await self.middleware.call('vm.bootloader_ovmf_choices'):
verrors.add(
f'{schema_name}.bootloader_ovmf',
'Invalid bootloader ovmf choice specified'
)
if not data.get('uuid'):
data['uuid'] = str(uuid.uuid4())
if not await self.middleware.call('vm.license_active'):
verrors.add(
f'{schema_name}.name',
'System is not licensed to use VMs'
)
if data['min_memory'] and data['min_memory'] > data['memory']:
verrors.add(
f'{schema_name}.min_memory',
'Minimum memory should not be greater than defined/maximum memory'
)
try:
shlex.split(data['command_line_args'])
except ValueError as e:
verrors.add(
f'{schema_name}.command_line_args',
f'Parse error: {e.args[0]}'
)
vcpus = data['vcpus'] * data['cores'] * data['threads']
if vcpus:
flags = await self.middleware.call('vm.flags')
max_vcpus = await self.middleware.call('vm.maximum_supported_vcpus')
if vcpus > max_vcpus:
verrors.add(
f'{schema_name}.vcpus',
f'Maximum {max_vcpus} vcpus are supported.'
f'Please ensure the product of "{schema_name}.vcpus", "{schema_name}.cores" and '
f'"{schema_name}.threads" is less then {max_vcpus}.'
)
elif flags['intel_vmx']:
if vcpus > 1 and flags['unrestricted_guest'] is False:
verrors.add(
f'{schema_name}.vcpus', 'Only one Virtual CPU is allowed in this system.')
elif flags['amd_rvi']:
if vcpus > 1 and flags['amd_asids'] is False:
verrors.add(
f'{schema_name}.vcpus', 'Only one virtual CPU is allowed in this system.'
)
elif not await self.middleware.call('vm.supports_virtualization'):
verrors.add(
schema_name, 'This system does not support virtualization.'
)
if data.get('arch_type') or data.get('machine_type'):
choices = await self.middleware.call('vm.guest_architecture_and_machine_choices')
if data.get('arch_type') and data['arch_type'] not in choices:
verrors.add(f'{schema_name}.arch_type',
'Specified architecture type is not supported on this system')
if data.get('machine_type'):
if not data.get('arch_type'):
verrors.add(
f'{schema_name}.arch_type', f'Must be specified when "{schema_name}.machine_type" is set'
)
elif data['arch_type'] in choices and data['machine_type'] not in choices[data['arch_type']]:
verrors.add(
f'{schema_name}.machine_type',
f'Specified machine type is not supported for {choices[data["arch_type"]]!r} architecture type'
)
if data.get('cpu_mode') != 'CUSTOM' and data.get('cpu_model'):
verrors.add(
f'{schema_name}.cpu_model',
'This attribute should not be specified when "cpu_mode" is not "CUSTOM".'
)
elif data.get('cpu_model') and data['cpu_model'] not in await self.middleware.call('vm.cpu_model_choices'):
verrors.add(f'{schema_name}.cpu_model',
'Please select a valid CPU model.')
if 'name' in data:
filters = [('name', '=', data['name'])]
if old:
filters.append(('id', '!=', old['id']))
if await self.middleware.call('vm.query', filters):
verrors.add(
f'{schema_name}.name',
'This name already exists.', errno.EEXIST
)
elif not RE_NAME.search(data['name']):
verrors.add(
f'{schema_name}.name',
'Only alphanumeric characters are allowed.'
)
if data['pin_vcpus']:
if not data['cpuset']:
verrors.add(
f'{schema_name}.cpuset',
f'Must be specified when "{schema_name}.pin_vcpus" is set.'
)
elif len(parse_numeric_set(data['cpuset'])) != vcpus:
verrors.add(
f'{schema_name}.pin_vcpus',
f'Number of cpus in "{schema_name}.cpuset" must be equal to total number vpcus if pinning is enabled.'
)
# TODO: Let's please implement PCI express hierarchy as the limit on devices in KVM is quite high
# with reports of users having thousands of disks
# Let's validate that the VM has the correct no of slots available to accommodate currently configured devices
@accepts(
Int('id', required=True),
Patch(
'vm_entry',
'vm_update',
('rm', {'name': 'devices'}),
('rm', {'name': 'display_available'}),
('rm', {'name': 'status'}),
('attr', {'update': True}),
)
)
async def do_update(self, id_, data):
"""
Update all information of a specific VM.
`devices` is a list of virtualized hardware to attach to the virtual machine. If `devices` is not present,
no change is made to devices. If either the device list order or data stored by the device changes when the
attribute is passed, these actions are taken:
1) If there is no device in the `devices` list which was previously attached to the VM, that device is
removed from the virtual machine.
2) Devices are updated in the `devices` list when they contain a valid `id` attribute that corresponds to
an existing device.
3) Devices that do not have an `id` attribute are created and attached to `id` VM.
"""
old = await self.get_instance(id_)
new = old.copy()
new.update(data)
if new['name'] != old['name']:
await self.middleware.run_in_thread(self._check_setup_connection)
if old['status']['state'] in ACTIVE_STATES:
raise CallError('VM name can only be changed when VM is inactive')
if old['name'] not in self.vms:
raise CallError(f'Unable to locate domain for {old["name"]}')
verrors = ValidationErrors()
await self.common_validation(verrors, 'vm_update', new, old=old)
verrors.check()
for key in ('devices', 'status', 'display_available'):
new.pop(key, None)
await self.middleware.call('datastore.update', 'vm.vm', id_, new)
vm_data = await self.get_instance(id_)
if new['name'] != old['name']:
await self.middleware.run_in_thread(self._rename_domain, old, vm_data)
try:
new_path = os.path.join(SYSTEM_NVRAM_FOLDER_PATH, get_vm_nvram_file_name(new))
await self.middleware.run_in_thread(
os.rename, os.path.join(SYSTEM_NVRAM_FOLDER_PATH, get_vm_nvram_file_name(old)), new_path
)
except FileNotFoundError:
if old['bootloader'] == new['bootloader'] == 'UEFI':
# So we only want to raise an error if bootloader is UEFI because for BIOS
# nvram file will not exist and it is fine. If bootloader is changed from
# BIOS to UEFI, even then we will not have it and it is fine so we don't want
# to raise an error in that case.
raise CallError(
f'VM name has been updated but nvram file for {old["name"]} does not exist '
f'which can result in {new["name"]} VM not booting properly.'
)
if old['shutdown_timeout'] != new['shutdown_timeout']:
await self.middleware.call('etc.generate', 'libvirt_guests')
return await self.get_instance(id_)
@accepts(
Int('id'),
Dict(
'vm_delete',
Bool('zvols', default=False),
Bool('force', default=False),
),
)
async def do_delete(self, id_, data):
"""
Delete a VM.
"""
async with LIBVIRT_LOCK:
vm = await self.get_instance(id_)
# Deletion should be allowed even if host does not support virtualization
if self._is_kvm_supported():
await self.middleware.run_in_thread(self._check_setup_connection)
status = await self.middleware.call('vm.status', id_)
else:
status = vm['status']
force_delete = data.get('force')
if status['state'] in ACTIVE_STATES:
await self.middleware.call('vm.poweroff', id_)
# We would like to wait at least 7 seconds to have the vm
# complete it's post vm actions which might require interaction with it's domain
await asyncio.sleep(7)
elif status.get('state') == 'ERROR' and not force_delete:
raise CallError('Unable to retrieve VM status. Failed to destroy VM')
if data['zvols']:
devices = await self.middleware.call('vm.device.query', [
('vm', '=', id_), ('dtype', '=', 'DISK')
])
for zvol in devices:
if not zvol['attributes']['path'].startswith('/dev/zvol/'):
continue
disk_name = zvol_path_to_name(zvol['attributes']['path'])
try:
await self.middleware.call('zfs.dataset.delete', disk_name, {'recursive': True})
except Exception:
if not force_delete:
raise
else:
self.logger.error(
'Failed to delete %r volume when removing %r VM', disk_name, vm['name'], exc_info=True
)
try:
await self.middleware.run_in_thread(self._undefine_domain, vm['name'])
except Exception:
if not force_delete:
raise
else:
self.logger.error('Failed to un-define %r VM\'s domain', vm['name'], exc_info=True)
# We remove vm devices first
for device in vm['devices']:
await self.middleware.call('vm.device.delete', device['id'], {'force': data['force']})
result = await self.middleware.call('datastore.delete', 'vm.vm', id_)
if not await self.middleware.call('vm.query'):
await self.middleware.call('vm.deinitialize_vms')
self._clear()
else:
await self.middleware.call('etc.generate', 'libvirt_guests')
return result
@item_method
@accepts(Int('id'), roles=['VM_READ'])
@returns(Dict(
'vm_status',
Str('state', required=True),
Int('pid', null=True, required=True),
Str('domain_state', required=True),
))
def status(self, id_):
"""
Get the status of `id` VM.
Returns a dict:
- state, RUNNING / STOPPED / SUSPENDED
- pid, process id if RUNNING
"""
vm = self.middleware.call_sync('datastore.query', 'vm.vm', [['id', '=', id_]], {'get': True})
self._check_setup_connection()
return self.status_impl(vm)
@private
def status_impl(self, vm):
if self._has_domain(vm['name']):
try:
# Whatever happens, query shouldn't fail
return self._status(vm['name'])
except Exception:
self.logger.debug('Failed to retrieve VM status for %r', vm['name'], exc_info=True)
return get_default_status()
@accepts(Int('id'), roles=['VM_READ'])
@returns(Str(null=True))
def log_file_path(self, vm_id):
"""
Retrieve log file path of `id` VM.
It will return path of the log file if it exists and `null` otherwise.
"""
vm = self.middleware.call_sync('vm.get_instance', vm_id)
path = f'/var/log/libvirt/qemu/{vm["id"]}_{vm["name"]}.log'
return path if os.path.exists(path) else None
@accepts(Int('id'), roles=['VM_READ'])
@returns()
@job(pipes=['output'])
def log_file_download(self, job, vm_id):
"""
Retrieve log file contents of `id` VM.
It will download empty file if log file does not exist.
"""
if path := self.log_file_path(vm_id):
with open(path, 'rb') as f:
shutil.copyfileobj(f, job.pipes.output.w)
| 22,323 | Python | .py | 454 | 37.817181 | 122 | 0.589064 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,021 | vm_supervisor.py | truenas_middleware/src/middlewared/middlewared/plugins/vm/vm_supervisor.py | import contextlib
from middlewared.service import CallError
from .connection import LibvirtConnectionMixin
from .supervisor import VMSupervisor
from .utils import ACTIVE_STATES
class VMSupervisorMixin(LibvirtConnectionMixin):
vms = {}
def _add(self, vm_id):
vm = self.middleware.call_sync('vm.get_instance', vm_id)
self._add_with_vm_data(vm)
def _add_with_vm_data(self, vm):
self.vms[vm['name']] = VMSupervisor(vm, self.middleware)
def _has_domain(self, vm_name):
return vm_name in self.vms and self.vms[vm_name].domain
def _rename_domain(self, old, new):
vm = self.vms.pop(old['name'])
vm.update_domain(new)
self.vms[new['name']] = vm
def _clear(self):
VMSupervisorMixin.vms = {}
def _vm_from_name(self, vm_name):
return self.middleware.call_sync('vm.query', [['name', '=', vm_name]], {'get': True, 'force_sql_filters': True})
def _undefine_domain(self, vm_name):
domain = self.vms.pop(vm_name, None)
if domain and domain.domain:
domain.undefine_domain()
else:
VMSupervisor(self._vm_from_name(vm_name), self.middleware).undefine_domain()
def _check_add_domain(self, vm_name):
if not self._has_domain(vm_name):
try:
self._add(self._vm_from_name(vm_name)['id'])
except Exception as e:
raise CallError(f'Unable to define domain for {vm_name}: {e}')
if not self._has_domain(vm_name):
raise CallError(f'Libvirt domain for {vm_name} does not exist')
def _check_domain_status(self, vm_name, desired_status='RUNNING'):
if not self._has_domain(vm_name):
raise CallError(f'Libvirt Domain for {vm_name} does not exist')
desired_status = desired_status if isinstance(desired_status, list) else [desired_status]
configured_status = 'ERROR'
with contextlib.suppress(Exception):
configured_status = self._status(vm_name)['state']
if configured_status == 'ERROR':
raise CallError(f'Unable to determine {vm_name!r} VM state')
if configured_status not in desired_status:
raise CallError(f'VM state is currently not {" / ".join(desired_status)!r}')
def _start(self, vm_name):
self._check_add_domain(vm_name)
self.vms[vm_name].start(vm_data=self._vm_from_name(vm_name))
def _poweroff(self, vm_name):
self._check_domain_status(vm_name, ACTIVE_STATES)
self.vms[vm_name].poweroff()
def _stop(self, vm_name, shutdown_timeout):
self._check_domain_status(vm_name)
self.vms[vm_name].stop(shutdown_timeout)
def _suspend(self, vm_name):
self._check_domain_status(vm_name)
self.vms[vm_name].suspend()
def _resume(self, vm_name):
self._check_domain_status(vm_name, 'SUSPENDED')
self.vms[vm_name].resume()
def _status(self, vm_name):
self._check_setup_connection()
return self.vms[vm_name].status()
def _memory_info(self, vm_name):
self._check_setup_connection()
self._check_domain_status(vm_name, ACTIVE_STATES)
return self.vms[vm_name].memory_usage()
| 3,234 | Python | .py | 69 | 38.449275 | 120 | 0.640242 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,022 | connection.py | truenas_middleware/src/middlewared/middlewared/plugins/vm/connection.py | import contextlib
import libvirt
import os
from middlewared.service import CallError
from .utils import LIBVIRT_URI
class LibvirtConnectionMixin:
LIBVIRT_CONNECTION = None
KVM_SUPPORTED = None
def _open(self):
try:
# We want to do this before initializing libvirt connection
libvirt.virEventRegisterDefaultImpl()
LibvirtConnectionMixin.LIBVIRT_CONNECTION = libvirt.open(LIBVIRT_URI)
except libvirt.libvirtError as e:
raise CallError(f'Failed to open libvirt connection: {e}')
def _close(self):
try:
self.LIBVIRT_CONNECTION.close()
except libvirt.libvirtError as e:
raise CallError(f'Failed to close libvirt connection: {e}')
else:
LibvirtConnectionMixin.LIBVIRT_CONNECTION = None
def _is_kvm_supported(self):
# We check if /dev/kvm exists to ensure that kvm can be consumed on this machine.
# Libvirt will still start even if kvm cannot be used on the machine which would falsely
# give the impression that virtualization can be used. We have checks in place to check if system
# supports virtualization but if we incorporate that check in all of the vm exposed methods which
# consume libvirt, it would be an expensive call as we figure that out by making a subprocess call
if self.KVM_SUPPORTED is None:
self.KVM_SUPPORTED = os.path.exists('/dev/kvm')
return self.KVM_SUPPORTED
def _is_libvirt_connection_alive(self):
with contextlib.suppress(libvirt.libvirtError):
# We see isAlive call failed for a user in NAS-109072, it would be better
# if we handle this to ensure that system recognises libvirt connection
# is no longer active and a new one should be initiated.
return (
self.LIBVIRT_CONNECTION and self.LIBVIRT_CONNECTION.isAlive() and
isinstance(self.LIBVIRT_CONNECTION.listAllDomains(), list)
)
return False
def _list_domains(self):
with contextlib.suppress(libvirt.libvirtError):
return {domain.name(): domain.state() for domain in self.LIBVIRT_CONNECTION.listAllDomains()}
def _is_connection_alive(self):
return self._is_kvm_supported() and self._is_libvirt_connection_alive()
def _system_supports_virtualization(self):
if not self._is_kvm_supported():
raise CallError('This system does not support virtualization.')
def _check_connection_alive(self):
self._system_supports_virtualization()
if not self._is_libvirt_connection_alive():
raise CallError('Failed to connect to libvirt')
def _safely_check_setup_connection(self, timeout: int = 10):
if not self._is_connection_alive():
self.middleware.call_sync('vm.setup_libvirt_connection', timeout)
def _check_setup_connection(self):
self._safely_check_setup_connection()
self._check_connection_alive()
| 3,040 | Python | .py | 59 | 42.474576 | 106 | 0.685098 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,023 | disk_utils.py | truenas_middleware/src/middlewared/middlewared/plugins/vm/disk_utils.py | import errno
import os
import re
import shlex
import subprocess
from middlewared.plugins.zfs_.utils import zvol_name_to_path
from middlewared.schema import accepts, Bool, Dict, returns, Str
from middlewared.service import CallError, Service, job
# Valid Disk Formats we can export
VALID_DISK_FORMATS = ['qcow2', 'qed', 'raw', 'vdi', 'vpc', 'vmdk' ]
class VMService(Service):
@accepts(Dict(
'vm_info',
Str('diskimg', required=True),
Str('zvol', required=True)
))
@returns(Bool())
@job(lock_queue_size=1, lock=lambda args: f"zvol_disk_image_{args[-1]['zvol']}")
def import_disk_image(self, job, data):
"""
Imports a specified disk image.
Utilized qemu-img with the auto-detect functionality to auto-convert
any supported disk image format to RAW -> ZVOL
As of this implementation it supports:
- QCOW2
- QED
- RAW
- VDI
- VPC
- VMDK
`diskimg` is an required parameter for the incoming disk image
`zvol` is the required target for the imported disk image
"""
if not self.middleware.call_sync('zfs.dataset.query', [('id', '=', data['zvol'])]):
raise CallError(f"zvol {data['zvol']} does not exist.", errno.ENOENT)
if os.path.exists(data['diskimg']) is False:
raise CallError('Disk Image does not exist.', errno.ENOENT)
if os.path.exists(zvol_name_to_path(data['zvol'])) is False:
raise CallError('Zvol device does not exist.', errno.ENOENT)
zvol_device_path = str(zvol_name_to_path(data['zvol']))
# Use quotes safely and assemble the command
imgsafe = shlex.quote(data['diskimg'])
devsafe = shlex.quote(zvol_device_path)
command = f"qemu-img convert -p -O raw {imgsafe} {devsafe}"
self.logger.warning('Running Disk Import using: "' + command + '"')
cp = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1, universal_newlines=True)
re_progress = re.compile(r'(\d+\.\d+)')
stderr = ''
for line in iter(cp.stdout.readline, ""):
progress = re_progress.search(line.lstrip())
if progress:
try:
progress = round(float(progress.group(1)))
job.set_progress(progress, "Disk Import Progress")
except ValueError:
self.logger.warning('Invalid progress in: "' + progress.group(1) + '"')
else:
stderr += line
self.logger.warning('No progress reported from qemu-img: "' + line.lstrip() + '"')
cp.wait()
if cp.returncode:
raise CallError(f'Failed to import disk: {stderr}')
return True
@accepts(Dict(
'vm_info',
Str('format', required=True),
Str('directory', required=True),
Str('zvol', required=True)
))
@returns(Bool())
@job(lock_queue_size=1, lock=lambda args: f"zvol_disk_image_{args[-1]['zvol']}")
def export_disk_image(self, job, data):
"""
Exports a zvol to a formatted VM disk image.
Utilized qemu-img with the conversion functionality to export a zvol to
any supported disk image format, from RAW -> ${OTHER}. The resulting file
will be set to inherit the permissions of the target directory.
As of this implementation it supports the following {format} options :
- QCOW2
- QED
- RAW
- VDI
- VPC
- VMDK
`format` is an required parameter for the exported disk image
`directory` is an required parameter for the export disk image
`zvol` is the source for the disk image
"""
if not self.middleware.call_sync('zfs.dataset.query', [('id', '=', data['zvol'])]):
raise CallError(f"zvol {data['zvol']} does not exist.", errno.ENOENT)
if os.path.isdir(data['directory']) is False:
raise CallError(f"Export directory {data['directory']} does not exist.", errno.ENOENT)
if os.path.exists(zvol_name_to_path(data['zvol'])) is False:
raise CallError('Zvol device does not exist.', errno.ENOENT)
# Check that a supported format was specified
format = data['format'].lower()
if format not in VALID_DISK_FORMATS:
raise CallError('Invalid disk format specified.', errno.ENOENT)
# Grab the owner / group of the parent directory
parent_stat = os.stat(data['directory'])
owner = parent_stat.st_uid
group = parent_stat.st_gid
# Get the raw zvol device path
zvol_device_path = str(zvol_name_to_path(data['zvol']))
# Set the target file location
zvolbasename = os.path.basename(data['zvol'])
targetfile = f"{data['directory']}/vmdisk-{zvolbasename}.{format}"
# Use quotes safely and assemble the command
filesafe = shlex.quote(targetfile)
devsafe = shlex.quote(zvol_device_path)
command = f"qemu-img convert -p -f raw -O {data['format']} {devsafe} {filesafe}"
self.logger.warning('Running Disk export using: "' + command + '"')
cp = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1, universal_newlines=True)
re_progress = re.compile(r'(\d+\.\d+)')
stderr = ''
for line in iter(cp.stdout.readline, ""):
progress = re_progress.search(line.lstrip())
if progress:
try:
progress = round(float(progress.group(1)))
job.set_progress(progress, "Disk Export Progress")
except ValueError:
self.logger.warning('Invalid progress in: "' + progress.group(1) + '"')
else:
stderr += line
self.logger.warning('No progress reported from qemu-img: "' + line.lstrip() + '"')
cp.wait()
if cp.returncode:
raise CallError(f'Failed to export disk: {stderr}')
# Set the owner / group of the target file to inherit that of the saved parent directory
os.chown(targetfile, owner, group)
return True
| 6,295 | Python | .py | 132 | 37.856061 | 136 | 0.612492 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,024 | attachments.py | truenas_middleware/src/middlewared/middlewared/plugins/vm/attachments.py | import collections
import os.path
from middlewared.common.attachment import FSAttachmentDelegate
from middlewared.common.ports import PortDelegate
from middlewared.plugins.zfs_.utils import zvol_path_to_name
from middlewared.service import private, Service
from .utils import ACTIVE_STATES
async def determine_recursive_search(recursive, device, child_datasets):
# TODO: Add unit tests for this please
if recursive:
return True
elif device['dtype'] == 'DISK':
return False
# What we want to do here is make sure that any raw files or cdrom files are not living in the child
# dataset and not affected by the parent snapshot as they live on a different filesystem
path = device['attributes']['path'].removeprefix('/mnt/')
for split_count in range(path.count('/')):
potential_ds = path.rsplit('/', split_count)[0]
if potential_ds in child_datasets:
return False
else:
return True
class VMService(Service):
@private
async def periodic_snapshot_task_begin(self, task_id):
task = await self.middleware.call('pool.snapshottask.query', [['id', '=', task_id]], {'get': True})
return await self.query_snapshot_begin(task['dataset'], task['recursive'])
@private
async def query_snapshot_begin(self, dataset, recursive):
vms = collections.defaultdict(list)
datasets = {
d['id']: d for d in await self.middleware.call(
'pool.dataset.query', [['id', '^', f'{dataset}/']], {'extra': {'properties': []}}
)
}
to_ignore_vms = await self.get_vms_to_ignore_for_querying_attachments(True, [['suspend_on_snapshot', '=', False]])
for device in await self.middleware.call(
'vm.device.query', [
['dtype', 'in', ('DISK', 'RAW', 'CDROM')],
['vm', 'nin', to_ignore_vms],
]
):
path = device['attributes'].get('path')
if not path:
continue
elif path.startswith('/dev/zvol'):
path = os.path.join('/mnt', zvol_path_to_name(path))
dataset_path = os.path.join('/mnt', dataset)
if await determine_recursive_search(recursive, device, datasets):
if await self.middleware.call('filesystem.is_child', path, dataset_path):
vms[device['vm']].append(device)
elif dataset_path == path:
vms[device['vm']].append(device)
return vms
@private
async def get_vms_to_ignore_for_querying_attachments(self, enabled, extra_filters=None):
extra_filters = extra_filters or []
return {
vm['id']: vm for vm in await self.middleware.call(
'vm.query', [('status.state', 'nin' if enabled else 'in', ACTIVE_STATES)] + extra_filters
)
}
class VMFSAttachmentDelegate(FSAttachmentDelegate):
name = 'vm'
title = 'VM'
async def query(self, path, enabled, options=None):
vms_attached = []
ignored_vms = await self.middleware.call('vm.get_vms_to_ignore_for_querying_attachments', enabled)
for device in await self.middleware.call('datastore.query', 'vm.device'):
if (device['dtype'] not in ('DISK', 'RAW', 'CDROM')) or device['vm']['id'] in ignored_vms:
continue
disk = device['attributes'].get('path')
if not disk:
continue
if disk.startswith('/dev/zvol'):
disk = os.path.join('/mnt', zvol_path_to_name(disk))
if await self.middleware.call('filesystem.is_child', disk, path):
vm = {
'id': device['vm'].get('id'),
'name': device['vm'].get('name'),
}
if vm not in vms_attached:
vms_attached.append(vm)
return vms_attached
async def delete(self, attachments):
for attachment in attachments:
try:
await self.middleware.call('vm.stop', attachment['id'])
except Exception:
self.middleware.logger.warning('Unable to vm.stop %r', attachment['id'])
async def toggle(self, attachments, enabled):
for attachment in attachments:
action = 'vm.start' if enabled else 'vm.stop'
try:
await self.middleware.call(action, attachment['id'])
except Exception:
self.middleware.logger.warning('Unable to %s %r', action, attachment['id'])
async def stop(self, attachments):
await self.toggle(attachments, False)
async def start(self, attachments):
await self.toggle(attachments, True)
class VMPortDelegate(PortDelegate):
name = 'vm devices'
namespace = 'vm.device'
title = 'VM Device Service'
async def get_ports(self):
ports = []
vms = {vm['id']: vm['name'] for vm in await self.middleware.call('vm.query')}
for device in await self.middleware.call('vm.device.query', [['dtype', '=', 'DISPLAY']]):
ports.append({
'description': f'{vms[device["vm"]]!r} VM',
'ports': [
(device['attributes']['bind'], device['attributes']['port']),
(device['attributes']['bind'], device['attributes']['web_port']),
]
})
return ports
async def setup(middleware):
middleware.create_task(
middleware.call('pool.dataset.register_attachment_delegate', VMFSAttachmentDelegate(middleware))
)
await middleware.call('port.register_attachment_delegate', VMPortDelegate(middleware))
| 5,702 | Python | .py | 122 | 36.409836 | 122 | 0.59964 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,025 | info.py | truenas_middleware/src/middlewared/middlewared/plugins/vm/info.py | import os
import re
import subprocess
from xml.etree import ElementTree as etree
from middlewared.schema import Bool, Dict, Int, returns, Str
from middlewared.service import accepts, private, Service
from middlewared.utils import run
from .connection import LibvirtConnectionMixin
from .utils import get_virsh_command_args
RE_AMD_NASID = re.compile(r'NASID:.*\((.*)\)')
RE_VENDOR_AMD = re.compile(r'AuthenticAMD')
RE_VENDOR_INTEL = re.compile(r'GenuineIntel')
class VMService(Service, LibvirtConnectionMixin):
CPU_MODEL_CHOICES = {}
@accepts(roles=['VM_READ'])
@returns(Bool())
def supports_virtualization(self):
"""
Returns "true" if system supports virtualization, "false" otherwise
"""
return self._is_kvm_supported()
@private
async def license_active(self):
"""
If this is HA capable hardware and has NOT been licensed to run VMs
then this will return False. Otherwise this will return true.
"""
can_run_vms = True
if await self.middleware.call('system.is_ha_capable'):
license_ = await self.middleware.call('system.license')
can_run_vms = license_ is not None and 'VM' in license_['features']
return can_run_vms
@accepts(roles=['VM_READ'])
@returns(Dict(
Bool('supported', required=True),
Str('error', null=True, required=True),
))
def virtualization_details(self):
"""
Retrieve details if virtualization is supported on the system and in case why it's not supported if it isn't.
"""
return {
'supported': self._is_kvm_supported(),
'error': None if self._is_kvm_supported() else 'Your CPU does not support KVM extensions',
}
@accepts(roles=['VM_READ'])
@returns(Int())
async def maximum_supported_vcpus(self):
"""
Returns maximum supported VCPU's
"""
return 255
@accepts(roles=['VM_READ'])
@returns(Dict(
'cpu_flags',
Bool('intel_vmx', required=True),
Bool('unrestricted_guest', required=True),
Bool('amd_rvi', required=True),
Bool('amd_asids', required=True),
))
async def flags(self):
"""
Returns a dictionary with CPU flags for the hypervisor.
"""
flags = {
'intel_vmx': False,
'unrestricted_guest': False,
'amd_rvi': False,
'amd_asids': False,
}
supports_vm = await self.middleware.call('vm.supports_virtualization')
if not supports_vm:
return flags
cp = await run(['lscpu'], check=False)
if cp.returncode:
self.middleware.logger.error('Failed to retrieve CPU details: %s', cp.stderr.decode())
return flags
if RE_VENDOR_INTEL.findall(cp.stdout.decode()):
flags['intel_vmx'] = True
unrestricted_guest_path = '/sys/module/kvm_intel/parameters/unrestricted_guest'
if os.path.exists(unrestricted_guest_path):
with open(unrestricted_guest_path, 'r') as f:
flags['unrestricted_guest'] = f.read().strip().lower() == 'y'
elif RE_VENDOR_AMD.findall(cp.stdout.decode()):
flags['amd_rvi'] = True
cp = await run(['cpuid', '-l', '0x8000000A'], check=False)
if cp.returncode:
self.middleware.logger.error('Failed to execute "cpuid -l 0x8000000A": %s', cp.stderr.decode())
else:
flags['amd_asids'] = all(v != '0' for v in (RE_AMD_NASID.findall(cp.stdout.decode()) or ['0']) if v)
return flags
@accepts(Int('id'))
@returns(Str('console_device'))
async def get_console(self, id_):
"""
Get the console device from a given guest.
"""
vm = await self.middleware.call('vm.get_instance', id_)
return f'{vm["id"]}_{vm["name"]}'
@accepts(roles=['VM_READ'])
@returns(Dict(
additional_attrs=True,
example={
'486': '486',
'pentium': 'pentium',
}
))
def cpu_model_choices(self):
"""
Retrieve CPU Model choices which can be used with a VM guest to emulate the CPU in the guest.
"""
self.middleware.call_sync('vm.check_setup_libvirt')
base_path = '/usr/share/libvirt/cpu_map'
if self.CPU_MODEL_CHOICES or not os.path.exists(base_path):
return self.CPU_MODEL_CHOICES
mapping = {}
with open(os.path.join(base_path, 'index.xml'), 'r') as f:
index_xml = etree.fromstring(f.read().strip())
for arch in filter(lambda a: a.tag == 'arch' and a.get('name'), list(index_xml)):
cp = subprocess.Popen(
get_virsh_command_args() + ['cpu-models', arch.get('name') if arch.get('name') != 'x86' else 'x86_64'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
stdout = cp.communicate()[0]
if cp.returncode:
continue
mapping.update({m: m for m in filter(bool, stdout.decode().strip().split('\n'))})
self.CPU_MODEL_CHOICES.update(mapping)
return self.CPU_MODEL_CHOICES
| 5,250 | Python | .py | 129 | 31.782946 | 119 | 0.59902 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,026 | domain_xml.py | truenas_middleware/src/middlewared/middlewared/plugins/vm/supervisor/domain_xml.py | import os
import shlex
from middlewared.plugins.vm.devices import CDROM, DISK, PCI, RAW, DISPLAY, USB
from middlewared.plugins.vm.numeric_set import parse_numeric_set
from middlewared.plugins.vm.utils import SYSTEM_NVRAM_FOLDER_PATH, get_vm_nvram_file_name
from middlewared.utils import Nid
from .utils import create_element
def domain_children(vm_data, context):
children = [
create_element('name', attribute_dict={'text': f'{vm_data["id"]}_{vm_data["name"]}'}),
create_element('uuid', attribute_dict={'text': vm_data['uuid']}),
create_element('title', attribute_dict={'text': vm_data['name']}),
create_element('description', attribute_dict={'text': vm_data['description']}),
# OS/boot related xml - returns an iterable
os_xml(vm_data),
# CPU related xml
*cpu_xml(vm_data, context),
# Memory related xml
*memory_xml(vm_data),
# Add features
features_xml(vm_data),
# Clock offset
clock_xml(vm_data),
# Command line args
commandline_xml(vm_data),
# Devices
devices_xml(vm_data, context),
]
# Wire memory if PCI passthru device is configured
# Implicit configuration for now.
#
# To avoid surprising side effects from implicit configuration, wiring of memory
# should preferably be an explicit vm configuration option and trigger error
# message if not selected when PCI passthru is configured.
#
if any(isinstance(device, PCI) for device in context['devices']):
children.append(
create_element(
'memoryBacking', attribute_dict={
'children': [
create_element('locked'),
]
}
)
)
return children
def clock_xml(vm_data):
timers = []
if vm_data['hyperv_enlightenments']:
timers = [create_element('timer', name='hypervclock', present='yes')]
return create_element(
'clock', attribute_dict={'children': timers},
offset='localtime' if vm_data['time'] == 'LOCAL' else 'utc'
)
def commandline_xml(vm_data):
return create_element(
'commandline', xmlns='http://libvirt.org/schemas/domain/qemu/1.0', attribute_dict={
'children': [create_element('arg', value=arg) for arg in shlex.split(vm_data['command_line_args'])]
}
)
def cpu_xml(vm_data, context):
features = []
if vm_data['cpu_mode'] == 'HOST-PASSTHROUGH':
features.append(create_element('cache', mode='passthrough'))
if vm_data['enable_cpu_topology_extension']:
features.append(create_element('feature', policy='require', name='topoext'))
cpu_nodes = [
create_element(
'cpu', attribute_dict={
'children': [
create_element(
'topology', sockets=str(vm_data['vcpus']), cores=str(vm_data['cores']),
threads=str(vm_data['threads'])
),
] + ([
create_element(
'model', fallback='forbid', attribute_dict={'text': vm_data['cpu_model']}
)
# Right now this is best effort for the domain to start with specified CPU Model and not fallback
# However if some features are missing in the host, qemu will right now still start the domain
# and mark them as missing. We should perhaps make this configurable in the future to control
# if domain should/should not be started
] if vm_data['cpu_mode'] == 'CUSTOM' and vm_data['cpu_model'] and context['cpu_model_choices'].get(
vm_data['cpu_model']
) else []) + features,
}, mode=vm_data['cpu_mode'].lower(),
),
# VCPU related xml
create_element(
'vcpu',
attribute_dict={
'text': str(vm_data['vcpus'] * vm_data['cores'] * vm_data['threads']),
}, **({'cpuset': vm_data['cpuset']} if vm_data['cpuset'] else {}),
)
]
if vm_data['pin_vcpus'] and vm_data['cpuset']:
cpu_nodes.append(create_element('cputune', attribute_dict={
'children': [
create_element('vcpupin', vcpu=str(i), cpuset=str(cpu))
for i, cpu in enumerate(parse_numeric_set(vm_data['cpuset']))
]
}))
if vm_data['nodeset']:
cpu_nodes.append(create_element(
'numatune', attribute_dict={
'children': [
create_element('memory', nodeset=vm_data['nodeset']),
]
},
))
return cpu_nodes
def devices_xml(vm_data, context):
boot_no = Nid(1)
scsi_device_no = Nid(1)
usb_controller_no = Nid(1)
# nec-xhci is added by default for each domain by libvirt so we update our mapping accordingly
usb_controllers = {'nec-xhci': 0}
virtual_device_no = Nid(1)
devices = []
for device in context['devices']:
if isinstance(device, (DISK, CDROM, RAW)):
if device.data['attributes'].get('type') == 'VIRTIO':
disk_no = virtual_device_no()
else:
disk_no = scsi_device_no()
device_xml = device.xml(disk_number=disk_no, boot_number=boot_no())
elif isinstance(device, USB):
device_xml = []
if device.controller_type not in usb_controllers:
usb_controllers[device.controller_type] = usb_controller_no()
device_xml.append(create_element(
'controller', type='usb', index=str(usb_controllers[device.controller_type]),
model=device.controller_type)
)
usb_device_xml = device.xml(controller_mapping=usb_controllers)
if isinstance(usb_device_xml, (tuple, list)):
device_xml.extend(usb_device_xml)
else:
device_xml.append(usb_device_xml)
else:
device_xml = device.xml()
devices.extend(device_xml if isinstance(device_xml, (tuple, list)) else [device_xml])
spice_server_available = display_device_available = False
for device in filter(lambda d: isinstance(d, DISPLAY), context['devices']):
display_device_available = True
if device.is_spice_type():
spice_server_available = True
break
if vm_data['ensure_display_device'] and not display_device_available:
# We should add a video device if there is no display device configured because most by
# default if not all headless servers like ubuntu etc require it to boot
devices.append(create_element('video'))
if spice_server_available:
# We always add spicevmc channel device when a spice display device is available to allow users
# to install guest agents for improved vm experience
devices.append(create_element(
'channel', type='spicevmc', attribute_dict={
'children': [create_element('target', type='virtio', name='com.redhat.spice.0')]
}
))
if vm_data['trusted_platform_module']:
devices.append(create_element(
'tpm', model='tpm-crb', attribute_dict={
'children': [create_element('backend', type='emulator', version='2.0')]
},
))
devices.append(create_element('channel', type='unix', attribute_dict={
'children': [create_element('target', type='virtio', name='org.qemu.guest_agent.0')]
}))
devices.append(create_element('serial', type='pty'))
if vm_data['min_memory']:
# memballoon device needs to be added if memory ballooning is enabled
devices.append(create_element('memballoon', model='virtio', autodeflate='on'))
return create_element('devices', attribute_dict={'children': devices})
def features_xml(vm_data):
features = []
if vm_data['hide_from_msr']:
features.append(
create_element('kvm', attribute_dict={'children': [create_element('hidden', state='on')]})
)
if vm_data['hyperv_enlightenments']:
features.append(get_hyperv_xml())
return create_element(
'features', attribute_dict={
'children': [
create_element('acpi'),
create_element('apic'),
create_element('msrs', unknown='ignore'),
] + features,
}
)
# Documentation for each enlightenment can be found from:
# https://github.com/qemu/qemu/blob/master/docs/system/i386/hyperv.rst
def get_hyperv_xml():
return create_element(
'hyperv', attribute_dict={
'children': [
create_element('relaxed', state='on'),
create_element('vapic', state='on'),
create_element('spinlocks', state='on', retries='8191'),
create_element('reset', state='on'),
create_element('frequencies', state='on'),
# All enlightenments under vpindex depend on it.
create_element('vpindex', state='on'),
create_element('synic', state='on'),
create_element('ipi', state='on'),
create_element('tlbflush', state='on'),
create_element('stimer', state='on')
],
}
)
def memory_xml(vm_data):
memory_xml_nodes = [create_element('memory', unit='M', attribute_dict={'text': str(vm_data['memory'])})]
# Memory Ballooning - this will be memory which will always be allocated to the VM
# If not specified, this defaults to `memory`
if vm_data['min_memory']:
memory_xml_nodes.append(
create_element('currentMemory', unit='M', attribute_dict={'text': str(vm_data['min_memory'])})
)
return memory_xml_nodes
def os_xml(vm_data):
children = [create_element(
'type',
attribute_dict={'text': 'hvm'}, **{
k[:-5]: vm_data[k] for k in filter(lambda t: vm_data[t], ('arch_type', 'machine_type'))
}
)]
if vm_data['bootloader'] == 'UEFI':
children.extend([
create_element(
'loader', attribute_dict={'text': f'/usr/share/OVMF/{vm_data["bootloader_ovmf"]}'},
readonly='yes', type='pflash',
),
create_element('nvram', attribute_dict={
'text': os.path.join(SYSTEM_NVRAM_FOLDER_PATH, get_vm_nvram_file_name(vm_data)),
})
])
return create_element('os', attribute_dict={'children': children})
| 10,684 | Python | .py | 238 | 34.44958 | 117 | 0.585246 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,027 | utils.py | truenas_middleware/src/middlewared/middlewared/plugins/vm/supervisor/utils.py | import enum
import libvirt
from middlewared.plugins.vm.utils import create_element # noqa
class DomainState(enum.Enum):
NOSTATE = libvirt.VIR_DOMAIN_NOSTATE
RUNNING = libvirt.VIR_DOMAIN_RUNNING
BLOCKED = libvirt.VIR_DOMAIN_BLOCKED
PAUSED = libvirt.VIR_DOMAIN_PAUSED
SHUTDOWN = libvirt.VIR_DOMAIN_SHUTDOWN
SHUTOFF = libvirt.VIR_DOMAIN_SHUTOFF
CRASHED = libvirt.VIR_DOMAIN_CRASHED
PMSUSPENDED = libvirt.VIR_DOMAIN_PMSUSPENDED
| 460 | Python | .py | 12 | 34.416667 | 63 | 0.779775 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,028 | supervisor.py | truenas_middleware/src/middlewared/middlewared/plugins/vm/supervisor/supervisor.py | import contextlib
import itertools
import libvirt
import os
import sys
import threading
import time
from xml.etree import ElementTree as etree
from middlewared.service import CallError
from middlewared.plugins.vm.connection import LibvirtConnectionMixin
from middlewared.plugins.vm.devices import CDROM, DISK, NIC, PCI, RAW, DISPLAY, USB # noqa
from middlewared.plugins.vm.utils import ACTIVE_STATES
from .domain_xml import domain_children
from .utils import create_element, DomainState
class VMSupervisor(LibvirtConnectionMixin):
def __init__(self, vm_data, middleware=None):
self.vm_data = vm_data
self.middleware = middleware
self.devices = []
self._check_setup_connection()
self.libvirt_domain_name = f'{self.vm_data["id"]}_{self.vm_data["name"]}'
self._domain = self.stop_devices_thread = None
self.update_domain()
@property
def domain(self):
return self.domain_health_check()
def domain_health_check(self):
try:
self._domain.state()
except (AttributeError, libvirt.libvirtError):
self.update_domain(update_devices=False)
return self._domain
def update_domain(self, vm_data=None, update_devices=True):
# This can be called to update domain to reflect any changes introduced to the VM
if update_devices:
self.update_vm_data(vm_data)
try:
self._domain = self.LIBVIRT_CONNECTION.lookupByName(self.libvirt_domain_name)
except libvirt.libvirtError:
self._domain = None
else:
if not self._domain.isActive():
# We have a domain defined and it is not running
self.undefine_domain(for_update=True)
if not self._domain:
# This ensures that when a domain has been renamed, we undefine the previous domain name - if object
# persists in this case of VMSupervisor - else it's the users responsibility to take care of this case
new_name = f'{self.vm_data["id"]}_{self.vm_data["name"]}'
if new_name != self.libvirt_domain_name:
old_nvram_filename = f'/var/lib/libvirt/qemu/nvram/{self.libvirt_domain_name}_VARS.fd'
with contextlib.suppress(FileNotFoundError):
os.rename(old_nvram_filename,
f'/var/lib/libvirt/qemu/nvram/{new_name}_VARS.fd')
self.libvirt_domain_name = new_name
self.__define_domain()
def status(self):
domain = self.domain
domain_state = DomainState(domain.state()[0])
pid_path = os.path.join('/var/run/libvirt', 'qemu', f'{self.libvirt_domain_name}.pid')
if domain.isActive():
state = 'SUSPENDED' if domain_state == DomainState.PAUSED else 'RUNNING'
else:
state = 'STOPPED'
data = {
'state': state,
'pid': None,
'domain_state': domain_state.name,
}
if domain_state in (DomainState.PAUSED, DomainState.RUNNING):
with contextlib.suppress(FileNotFoundError):
# Do not make a stat call to check if file exists or not
with open(pid_path, 'r') as f:
data['pid'] = int(f.read())
return data
def memory_usage(self):
# We return this in bytes
return self.domain.memoryStats().get('actual', 0) * 1024
def __define_domain(self):
if self._domain:
raise CallError(f'{self.libvirt_domain_name} domain has already been defined')
vm_xml = etree.tostring(self.construct_xml()).decode()
if not self.LIBVIRT_CONNECTION.defineXML(vm_xml):
raise CallError(f'Unable to define persistent domain for {self.libvirt_domain_name}')
self._domain = self.LIBVIRT_CONNECTION.lookupByName(self.libvirt_domain_name)
def undefine_domain(self, for_update=False):
if self._domain.isActive():
raise CallError(f'Domain {self.libvirt_domain_name} is active. Please stop it first')
flags = 0
if for_update:
flags |= libvirt.VIR_DOMAIN_UNDEFINE_KEEP_NVRAM
else:
flags |= libvirt.VIR_DOMAIN_UNDEFINE_NVRAM
self._domain.undefineFlags(flags)
self._domain = None
def __getattribute__(self, item):
retrieved_item = object.__getattribute__(self, item)
if callable(retrieved_item) and item in ('start', 'stop', 'poweroff', 'undefine_domain', 'status'):
self.domain_health_check()
if not getattr(self, '_domain', None):
raise RuntimeError('Domain attribute not defined, please re-instantiate the VM class')
return retrieved_item
def update_vm_data(self, vm_data=None):
self.vm_data = vm_data or self.vm_data
self.devices = [
getattr(sys.modules[__name__], device['dtype'])(device, self.middleware)
for device in sorted(self.vm_data['devices'], key=lambda x: (x['order'], x['id']))
]
def unavailable_devices(self):
return [d for d in self.devices if not d.is_available()]
def vm_devices_context(self):
return {
'vms': self.middleware.call_sync('vm.query'),
'vm_devices': self.middleware.call_sync('vm.device.query'),
}
def start(self, vm_data=None):
if self.domain.isActive():
raise CallError(f'{self.libvirt_domain_name} domain is already active')
self.update_vm_data(vm_data)
errors = []
context = self.vm_devices_context()
for device in self.devices:
try:
device.pre_start_vm_device_setup(context)
except Exception as e:
errors.append(str(e))
if errors:
errors = '\n'.join(errors)
raise CallError(f'Failed setting up devices before VM start:\n{errors}')
unavailable_devices = self.unavailable_devices()
if unavailable_devices:
raise CallError(
f'VM will not start as {", ".join([str(d) for d in unavailable_devices])} device(s) are not available.'
)
successful = []
errors = []
for device in self.devices:
try:
device.pre_start_vm()
except Exception as e:
errors.append(f'Failed to setup {device.data["dtype"]} device: {e}')
for d in itertools.chain([device], successful):
try:
d.pre_start_vm_rollback()
except Exception as d_error:
errors.append(
f'Failed to rollback pre start changes for {d.data["dtype"]} device: {d_error}'
)
break
else:
successful.append(device)
if errors:
raise CallError('\n'.join(errors))
try:
self.update_domain(vm_data, update_devices=False)
if self.domain.create() < 0:
raise CallError(f'Failed to boot {self.vm_data["name"]} domain')
except (libvirt.libvirtError, CallError) as e:
errors = [str(e)]
for device in self.devices:
try:
device.pre_start_vm_rollback()
except Exception as d_error:
errors.append(f'Failed to rollback pre start changes for {device.data["dtype"]} device: {d_error}')
raise CallError('\n'.join(errors))
# We initialize this when we are certain that the VM has indeed booted
self.stop_devices_thread = threading.Thread(
name=f'post_stop_devices_{self.libvirt_domain_name}', target=self.run_post_stop_actions
)
self.stop_devices_thread.start()
errors = []
for device in self.devices:
try:
device.post_start_vm()
except Exception as e:
errors.append(f'Failed to execute post start actions for {device.data["dtype"]} device: {e}')
else:
if errors:
raise CallError('\n'.join(errors))
def _before_stopping_checks(self):
if not self.domain.isActive():
raise CallError(f'{self.libvirt_domain_name} domain is not active')
def run_post_stop_actions(self):
while self.status()['state'] in ACTIVE_STATES:
time.sleep(5)
errors = []
context = self.vm_devices_context()
for device in self.devices:
try:
device.post_stop_vm(context)
except Exception as e:
errors.append(f'Failed to execute post stop actions for {device.data["dtype"]} device: {e}')
else:
if errors:
raise CallError('\n'.join(errors))
def stop(self, shutdown_timeout=None):
self._before_stopping_checks()
self.domain.shutdown()
shutdown_timeout = shutdown_timeout or self.vm_data['shutdown_timeout']
# We wait for timeout seconds before initiating post stop activities for the vm
# This is done because the shutdown call above is non-blocking
while shutdown_timeout > 0 and self.status()['state'] == 'RUNNING':
shutdown_timeout -= 5
time.sleep(5)
def poweroff(self):
self._before_stopping_checks()
self.domain.destroy()
def suspend(self):
self._before_stopping_checks()
self.domain.suspend()
def _before_resuming_checks(self):
if self.status()['state'] != 'SUSPENDED':
raise CallError(f'{self.libvirt_domain_name!r} domain is not paused')
def resume(self):
self._before_resuming_checks()
self.domain.resume()
def get_domain_children(self):
context = {
'cpu_model_choices': self.middleware.call_sync('vm.cpu_model_choices'),
'devices': self.devices,
}
return domain_children(self.vm_data, context)
def construct_xml(self):
return create_element(
'domain', type='kvm', id=str(self.vm_data['id']), attribute_dict={'children': self.get_domain_children()}
)
| 10,250 | Python | .py | 225 | 34.626667 | 119 | 0.603789 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,029 | device.py | truenas_middleware/src/middlewared/middlewared/plugins/vm/devices/device.py | from abc import ABC
from middlewared.validators import validate_schema
class Device(ABC):
schema = NotImplemented
def __init__(self, data, middleware=None):
self.data = data
self.middleware = middleware
def xml(self, *args, **kwargs):
raise NotImplementedError
def is_available(self):
raise NotImplementedError
def pre_start_vm(self, *args, **kwargs):
pass
def pre_start_vm_rollback(self, *args, **kwargs):
pass
def post_start_vm(self, *args, **kwargs):
pass
def post_stop_vm(self, *args, **kwargs):
pass
def __str__(self):
return f'{self.__class__.__name__} Device: {self.identity()}'
def identity(self):
raise NotImplementedError
def pre_start_vm_device_setup(self, *args, **kwargs):
pass
def validate(self, device, old=None, vm_instance=None, update=True):
verrors = validate_schema(list(self.schema.attrs.values()), device['attributes'])
verrors.check()
self._validate(device, verrors, old, vm_instance, update)
verrors.check()
def _validate(self, device, verrors, old=None, vm_instance=None, update=True):
raise NotImplementedError
| 1,233 | Python | .py | 32 | 31.40625 | 89 | 0.652321 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,030 | pci.py | truenas_middleware/src/middlewared/middlewared/plugins/vm/devices/pci.py | import subprocess
from middlewared.service import CallError
from middlewared.schema import Dict, Str
from middlewared.utils import filter_list
from .device import Device
from .utils import ACTIVE_STATES, create_element, LIBVIRT_URI
class PCIBase(Device):
def is_available(self):
return self.get_details()['available']
def in_use_by_vm(self, vms, vm_devices):
return any(vm['status']['state'] in ACTIVE_STATES for vm in self.get_vms_using_device(vms, vm_devices))
def get_vms_using_device(self, vms, vm_devices):
devs = filter_list(vm_devices, self.vm_device_filters())
return filter_list(vms, [['id', 'in', [dev['vm'] for dev in devs]]])
def vm_device_filters(self):
raise NotImplementedError()
def pre_start_vm_device_setup(self, context):
if self.in_use_by_vm(context['vms'], context['vm_devices']):
raise CallError(f'{self.data["dtype"]} device is already being used by another active VM')
class PCI(PCIBase):
schema = Dict(
'attributes',
Str('pptdev', required=True, empty=False),
)
def vm_device_filters(self):
return [['attributes.pptdev', '=', self.passthru_device()], ['dtype', '=', 'PCI']]
def detach_device(self):
cp = subprocess.Popen(
['virsh', '-c', LIBVIRT_URI, 'nodedev-detach', self.passthru_device()],
stderr=subprocess.PIPE, stdout=subprocess.DEVNULL
)
stderr = cp.communicate()[1]
if cp.returncode:
raise CallError(f'Unable to detach {self.passthru_device()} PCI device: {stderr.decode()}')
def reattach_device(self):
cp = subprocess.Popen(
['virsh', '-c', LIBVIRT_URI, 'nodedev-reattach', self.passthru_device()],
stderr=subprocess.PIPE, stdout=subprocess.DEVNULL
)
stderr = cp.communicate()[1]
if cp.returncode:
raise CallError(f'Unable to re-attach {self.passthru_device()} PCI device: {stderr.decode()}')
def pre_start_vm_device_setup(self, *args, **kwargs):
super().pre_start_vm_device_setup(*args, **kwargs)
device = self.get_details()
if not device['error'] and not device['available']:
self.detach_device()
def identity(self):
return str(self.passthru_device())
def passthru_device(self):
return str(self.data['attributes']['pptdev'])
def post_stop_vm(self, context):
# safe to re-attach
if not self.get_details()['error'] and not self.in_use_by_vm(context['vms'], context['vm_devices']):
try:
self.reattach_device()
except CallError:
self.middleware.logger.error('Failed to re-attach %s device', self.passthru_device(), exc_info=True)
def get_details(self):
return self.middleware.call_sync('vm.device.passthrough_device', self.passthru_device())
def xml(self, *args, **kwargs):
address_info = {
k: hex(int(v)) for k, v in self.get_details()['capability'].items()
if k in ('domain', 'bus', 'slot', 'function')
}
return create_element(
'hostdev', mode='subsystem', type='pci', managed='yes', attribute_dict={
'children': [
create_element('source', attribute_dict={'children': [create_element('address', **address_info)]}),
]
}
)
def _validate(self, device, verrors, old=None, vm_instance=None, update=True):
pptdev = device['attributes'].get('pptdev')
device_details = self.middleware.call_sync('vm.device.passthrough_device', pptdev)
if device_details['error']:
verrors.add(
'attribute.pptdev',
f'Not a valid choice. The PCI device is not available for passthru: {device_details["error"]}'
)
elif device_details['critical']:
verrors.add(
'attribute.pptdev',
f'{device_details["controller_type"]!r} based PCI devices are critical for system function '
'and cannot be used for PCI passthrough'
)
if self.middleware.call_sync('system.is_ha_capable'):
verrors.add('attribute.pptdev', 'HA capable systems do not support PCI passthrough')
if not self.middleware.call_sync('vm.device.iommu_enabled'):
verrors.add('attribute.pptdev', 'IOMMU support is required.')
if old and vm_instance and vm_instance['status']['state'] in ACTIVE_STATES and old[
'attributes'
].get('pptdev') != pptdev:
verrors.add(
'attribute.pptdev',
'Changing PCI device is not allowed while the VM is active.'
)
| 4,767 | Python | .py | 97 | 39.247423 | 119 | 0.61555 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,031 | cdrom.py | truenas_middleware/src/middlewared/middlewared/plugins/vm/devices/cdrom.py | import os
from middlewared.plugins.boot import BOOT_POOL_NAME
from middlewared.schema import Dict, File
from middlewared.service import CallError
from middlewared.utils.zfs import query_imported_fast_impl
from middlewared.validators import check_path_resides_within_volume_sync, Match
from .device import Device
from .utils import create_element, disk_from_number, LIBVIRT_USER
class CDROM(Device):
schema = Dict(
'attributes',
File(
'path', required=True, validators=[
Match(
r'^/mnt/[^{}]*$',
explanation='Path must not contain "{", "}" characters, and it should start with "/mnt/"'
),
], empty=False
),
)
def identity(self):
return self.data['attributes']['path']
def is_available(self):
return os.path.exists(self.identity())
def xml(self, *args, **kwargs):
disk_number = kwargs.pop('disk_number')
return create_element(
'disk', type='file', device='cdrom', attribute_dict={
'children': [
create_element('driver', name='qemu', type='raw'),
create_element('source', file=self.data['attributes']['path']),
create_element('target', dev=f'sd{disk_from_number(disk_number)}', bus='sata'),
create_element('boot', order=str(kwargs.pop('boot_number'))),
]
}
)
def _validate(self, device, verrors, old=None, vm_instance=None, update=True):
path = device['attributes']['path']
check_path_resides_within_volume_sync(
verrors, 'attributes.path', path, [
i['name'] for i in query_imported_fast_impl().values() if i['name'] != BOOT_POOL_NAME
]
)
if not self.middleware.call_sync('vm.device.disk_uniqueness_integrity_check', device, vm_instance):
verrors.add(
'attributes.path',
f'{vm_instance["name"]} has "{self.identity()}" already configured'
)
if not verrors:
# We would like to check now if libvirt will actually be able to read the iso file
# How this works is that if libvirt user is not able to read the file, libvirt automatically changes
# ownership of the iso file to the libvirt user so that it is able to read however there are cases where
# even this can fail with perms like 000 or maybe parent path(s) not allowing access.
# To mitigate this, we can do the following:
# 1) See if owner of the file is libvirt user
# 2) If it's not libvirt user:
# a) Check if libvirt user can access the file
# b) Change ownership of the file to libvirt user as libvirt would eventually do
# 3) Check if libvirt user can access the file
libvirt_user = self.middleware.call_sync('user.get_user_obj', {"username": LIBVIRT_USER})
current_owner = os.stat(path)
is_valid = False
if current_owner.st_uid != libvirt_user['pw_uid']:
if self.middleware.call_sync('filesystem.can_access_as_user', LIBVIRT_USER, path, {'read': True}):
is_valid = True
else:
os.chown(path, libvirt_user['pw_uid'], libvirt_user['pw_gid'])
if not is_valid:
try:
self.middleware.call_sync(
'filesystem.check_path_execute', path, 'USER', libvirt_user['pw_uid'], False
)
except CallError as e:
verrors.add('attributes.path', e.errmsg)
if not self.middleware.call_sync(
'filesystem.can_access_as_user', LIBVIRT_USER, path, {'read': True}
):
verrors.add(
'attributes.path',
f'{LIBVIRT_USER!r} user cannot read from {path!r} path. Please ensure correct '
'permissions are specified.'
)
# Now that we know libvirt user would not be able to read the file in any case,
# let's rollback the chown change we did
os.chown(path, current_owner.st_uid, current_owner.st_gid)
| 4,365 | Python | .py | 85 | 38.035294 | 116 | 0.571562 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,032 | usb.py | truenas_middleware/src/middlewared/middlewared/plugins/vm/devices/usb.py | from middlewared.schema import Dict, Str
from middlewared.validators import Match
from .pci import PCIBase
from .utils import create_element
USB_CONTROLLER_CHOICES = [
'piix3-uhci', 'piix4-uhci', 'ehci', 'ich9-ehci1',
'vt82c686b-uhci', 'pci-ohci', 'nec-xhci', 'qemu-xhci',
]
class USB(PCIBase):
schema = Dict(
'attributes',
Dict(
'usb',
Str(
'vendor_id', empty=False, required=True, validators=[Match(r'^0x.*')],
description='Vendor id must start with "0x" prefix e.g 0x0451'
),
Str(
'product_id', empty=False, required=True, validators=[Match(r'^0x.*')],
description='Product id must start with "0x" prefix e.g 0x16a8'
),
default=None,
null=True,
),
Str('controller_type', empty=False, default='nec-xhci', enum=USB_CONTROLLER_CHOICES),
Str('device', empty=False, null=True, default=None),
)
@property
def usb_device(self):
return self.data['attributes']['device']
@property
def controller_type(self):
return self.data['attributes']['controller_type']
@property
def usb_details(self):
return self.data['attributes']['usb']
def identity(self):
return self.usb_device or f'{self.usb_details["product_id"]}--{self.usb_details["vendor_id"]}'
def vm_device_filters(self):
if self.usb_device:
return [['attributes.device', '=', self.usb_device], ['dtype', '=', 'USB']]
else:
return [['attributes.usb', '=', self.usb_details], ['dtype', '=', 'USB']]
def get_details(self):
usb_device = self.usb_device
if not usb_device and self.usb_details:
usb_device = self.middleware.call_sync('vm.device.get_usb_port_from_usb_details', self.usb_details)
if usb_device:
return self.middleware.call_sync('vm.device.usb_passthrough_device', usb_device)
else:
return {
**self.middleware.call_sync('vm.device.get_basic_usb_passthrough_device_data'),
'error': 'Could not find matching device as no usb device has been specified',
}
def xml(self, *args, **kwargs):
controller_mapping = kwargs.pop('controller_mapping')
details = self.get_details()['capability']
if self.is_available():
return create_element(
'hostdev', mode='subsystem', type='usb', managed='yes', attribute_dict={
'children': [
create_element('source', attribute_dict={'children': [
create_element('vendor', id=details['vendor_id']),
create_element('product', id=details['product_id']),
create_element('address', bus=details['bus'], device=details['device']),
]}),
create_element('address', type='usb', bus=str(controller_mapping[self.controller_type])),
]
}
)
else:
return []
def _validate(self, device, verrors, old=None, vm_instance=None, update=True):
if device['attributes']['device'] and device['attributes']['usb']:
verrors.add(
'attributes.usb',
'Either device must be specified or USB details but not both'
)
elif not device['attributes']['device'] and not device['attributes']['usb']:
verrors.add(
'attributes.device',
'Either device or attributes.usb must be specified'
)
if self.middleware.call_sync('system.is_ha_capable'):
verrors.add('attributes.usb', 'HA capable systems do not support USB passthrough.')
if verrors:
return
if device['attributes']['device']:
self._validate_usb_port(device, verrors)
else:
self._validate_usb_details(device, verrors)
def _validate_usb_details(self, device, verrors):
usb_details = device['attributes']['usb']
if not self.middleware.call_sync('vm.device.get_usb_port_from_usb_details', usb_details):
verrors.add(
'attributes.usb',
'Unable to locate USB, please confirm its present in a USB port'
)
def _validate_usb_port(self, device, verrors):
usb_device = device['attributes']['device']
device_details = self.middleware.call_sync('vm.device.usb_passthrough_device', usb_device)
if device_details.get('error'):
verrors.add(
'attribute.device',
f'Not a valid choice. The device is not available for USB passthrough: {device_details["error"]}'
)
| 4,850 | Python | .py | 106 | 34.235849 | 113 | 0.57545 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,033 | display.py | truenas_middleware/src/middlewared/middlewared/plugins/vm/devices/display.py | import psutil
import subprocess
from urllib.parse import urlencode, quote_plus
from middlewared.schema import Bool, Dict, Int, Password, Str, ValidationErrors
from middlewared.validators import Range
from .device import Device
from .utils import create_element, NGINX_PREFIX
class DISPLAY(Device):
RESOLUTION_ENUM = [
'1920x1200', '1920x1080', '1600x1200', '1600x900',
'1400x1050', '1280x1024', '1280x720',
'1024x768', '800x600', '640x480',
]
schema = Dict(
'attributes',
Str('resolution', enum=RESOLUTION_ENUM, default='1024x768'),
Int('port', default=None, null=True, validators=[Range(min_=5900, max_=65535)]),
Int('web_port', default=None, null=True, validators=[Range(min_=5900, max_=65535)]),
Str('bind', default='127.0.0.1'),
Bool('wait', default=False),
Password('password', required=True, null=False, empty=False),
Bool('web', default=True),
Str('type', default='SPICE', enum=['SPICE']),
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.web_process = None
def identity(self):
data = self.data['attributes']
return f'{data["bind"]}:{data["port"]}'
def is_spice_type(self):
return self.data['attributes']['type'] == 'SPICE'
def web_uri(self, host, protocol='http'):
path = self.get_webui_info()['path'][1:]
params = {'path': path, 'autoconnect': 1}
get_params = f'?{urlencode(params, quote_via=quote_plus)}'
return f'{protocol}://{host}/{path}spice_auto.html{get_params}'
def is_available(self):
bind_ip_available = self.data['attributes']['bind'] in self.middleware.call_sync('vm.device.bind_choices')
return bind_ip_available and not self.validate_port_attrs(self.data)
def resolution(self):
return self.data['attributes']['resolution']
def xml(self, *args, **kwargs):
# FIXME: Resolution is not respected when we have more then 1 display device as we are not able to bind
# video element to a graphic element
attrs = self.data['attributes']
return create_element(
'graphics', type='spice', port=str(self.data['attributes']['port']),
attribute_dict={
'children': [
create_element('listen', type='address', address=self.data['attributes']['bind']),
]
}, **({} if not attrs['password'] else {'passwd': attrs['password']})
), create_element(
'controller', type='usb', model='nec-xhci'
), create_element('input', type='tablet', bus='usb'), create_element('video', attribute_dict={
'children': [
create_element('model', type='qxl', attribute_dict={
'children': [create_element(
'resolution', x=self.resolution().split('x')[0], y=self.resolution().split('x')[-1]
)]
})
]
})
def get_start_attrs(self):
port = self.data['attributes']['port']
bind = self.data['attributes']['bind']
web_port = self.data['attributes']['web_port']
return {
'web_bind': f':{web_port}' if bind == '0.0.0.0' else f'{bind}:{web_port}',
'server_addr': f'{bind}:{port}'
}
def post_start_vm(self, *args, **kwargs):
start_args = self.get_start_attrs()
self.web_process = subprocess.Popen(
[
'websockify', '--web', '/usr/share/spice-html5/',
'--wrap-mode=ignore', start_args['web_bind'], start_args['server_addr']
], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL,
)
def post_stop_vm(self, *args, **kwargs):
if self.web_process and psutil.pid_exists(self.web_process.pid):
self.middleware.call_sync('service.terminate_process', self.web_process.pid)
self.web_process = None
def get_webui_info(self):
return {
'id': self.data['id'],
'path': f'{NGINX_PREFIX}/{self.data["id"]}/',
'redirect_uri': f'{self.data["attributes"]["bind"]}:'
f'{self.data["attributes"]["web_port"]}',
}
def _validate(self, device, verrors, old=None, vm_instance=None, update=True):
if vm_instance:
if not update:
vm_instance['devices'].append(device)
self.middleware.call_sync('vm.device.validate_display_devices', verrors, vm_instance)
verrors = self.validate_port_attrs(device, verrors)
if device['attributes']['bind'] not in self.middleware.call_sync('vm.device.bind_choices'):
verrors.add('attributes.bind', 'Requested bind address is not valid')
def validate_port_attrs(self, device, verrors=None):
verrors = ValidationErrors() if verrors is None else verrors
display_devices_ports = self.middleware.call_sync(
'vm.all_used_display_device_ports', [['id', '!=', device.get('id')]]
)
new_ports = list((self.middleware.call_sync('vm.port_wizard')).values())
dev_attrs = device['attributes']
for port in filter(lambda p: p in new_ports, (dev_attrs.get('port'), dev_attrs.get('web_port'))):
new_ports.remove(port)
for key in ('port', 'web_port'):
if device['attributes'].get(key):
if dev_attrs[key] in display_devices_ports:
verrors.add(
f'attributes.{key}',
f'Specified display port({dev_attrs[key]}) is already in use by another Display device'
)
else:
verrors.extend(self.middleware.call_sync(
'port.validate_port', f'attributes.{key}', dev_attrs[key], dev_attrs['bind'], 'vm.device'
))
else:
device['attributes'][key] = new_ports.pop(0)
return verrors
| 6,037 | Python | .py | 122 | 38.647541 | 114 | 0.580037 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,034 | utils.py | truenas_middleware/src/middlewared/middlewared/plugins/vm/devices/utils.py | import string
from middlewared.plugins.vm.utils import ACTIVE_STATES, create_element, LIBVIRT_URI, LIBVIRT_USER, NGINX_PREFIX # noqa
def disk_from_number(number):
def i_divmod(n):
a, b = divmod(n, 26)
if b == 0:
return a - 1, b + 26
return a, b
chars = []
while number > 0:
number, d = i_divmod(number)
chars.append(string.ascii_lowercase[d - 1])
return ''.join(reversed(chars))
| 451 | Python | .py | 13 | 28.153846 | 118 | 0.619816 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,035 | __init__.py | truenas_middleware/src/middlewared/middlewared/plugins/vm/devices/__init__.py | from .cdrom import CDROM
from .nic import NIC
from .pci import PCI
from .storage_devices import DISK, RAW
from .display import DISPLAY
from .usb import USB
__all__ = ['CDROM', 'DEVICES', 'DISK', 'NIC', 'PCI', 'RAW', 'DISPLAY', 'USB']
DEVICES = {
device_class.__name__: device_class for device_class in (
CDROM, DISK, NIC, PCI, RAW, DISPLAY, USB
)
}
| 368 | Python | .py | 12 | 28.083333 | 77 | 0.674221 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,036 | nic.py | truenas_middleware/src/middlewared/middlewared/plugins/vm/devices/nic.py | import random
from middlewared.plugins.interface.netif import netif
from middlewared.schema import Bool, Dict, Str
from middlewared.service import CallError
from middlewared.validators import MACAddr
from .device import Device
from .utils import create_element
class NIC(Device):
schema = Dict(
'attributes',
Bool('trust_guest_rx_filters', default=False),
Str('type', enum=['E1000', 'VIRTIO'], default='E1000'),
Str('nic_attach', default=None, null=True),
Str('mac', default=None, null=True, validators=[MACAddr(separator=':')]),
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.bridge = self.bridge_created = self.nic_attach = None
def identity(self):
nic_attach = self.data['attributes'].get('nic_attach')
if not nic_attach:
nic_attach = netif.RoutingTable().default_route_ipv4.interface
return nic_attach
def is_available(self):
return self.identity() in netif.list_interfaces()
@staticmethod
def random_mac():
mac_address = [
0x00, 0xa0, 0x98, random.randint(0x00, 0x7f), random.randint(0x00, 0xff), random.randint(0x00, 0xff)
]
return ':'.join(['%02x' % x for x in mac_address])
def setup_nic_attach(self):
nic_attach = self.data['attributes'].get('nic_attach')
interfaces = netif.list_interfaces()
if nic_attach and nic_attach not in interfaces:
raise CallError(f'{nic_attach} not found.')
else:
if not nic_attach:
try:
nic_attach = netif.RoutingTable().default_route_ipv4.interface
nic = netif.get_interface(nic_attach)
except Exception as e:
raise CallError(f'Unable to retrieve default interface: {e}')
else:
nic = netif.get_interface(nic_attach)
if netif.InterfaceFlags.UP not in nic.flags:
nic.up()
self.nic_attach = nic.name
def xml_children(self):
return [
create_element('model', type='virtio' if self.data['attributes']['type'] == 'VIRTIO' else 'e1000'),
create_element(
'mac', address=self.data['attributes']['mac'] if
self.data['attributes'].get('mac') else self.random_mac()
),
]
def xml(self, *args, **kwargs):
self.setup_nic_attach()
if self.nic_attach.startswith('br'):
return create_element(
'interface', type='bridge', attribute_dict={
'children': [
create_element('source', bridge=self.nic_attach)
] + self.xml_children()
}
)
else:
trust_guest_rx_filters = 'yes' if self.data['attributes']['trust_guest_rx_filters'] else 'no'
return create_element(
'interface', type='direct', trustGuestRxFilters=trust_guest_rx_filters, attribute_dict={
'children': [
create_element('source', dev=self.nic_attach, mode='bridge')
] + self.xml_children()
}
)
def _validate(self, device, verrors, old=None, vm_instance=None, update=True):
nic = device['attributes'].get('nic_attach')
if nic:
nic_choices = self.middleware.call_sync('vm.device.nic_attach_choices')
if nic not in nic_choices:
verrors.add('attributes.nic_attach', 'Not a valid choice.')
elif nic.startswith('br') and device['attributes']['trust_guest_rx_filters']:
verrors.add(
'attributes.trust_guest_rx_filters',
'This can only be set when "nic_attach" is not a bridge device'
)
if device['attributes']['trust_guest_rx_filters'] and device['attributes']['type'] == 'E1000':
verrors.add(
'attributes.trust_guest_rx_filters',
'This can only be set when "type" of NIC device is "VIRTIO"'
)
| 4,158 | Python | .py | 91 | 34.021978 | 112 | 0.574778 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,037 | storage_devices.py | truenas_middleware/src/middlewared/middlewared/plugins/vm/devices/storage_devices.py | import errno
import os
from middlewared.plugins.zfs_.utils import zvol_name_to_path, zvol_path_to_name
from middlewared.plugins.zfs_.validation_utils import check_zvol_in_boot_pool_using_path
from middlewared.schema import Bool, Dict, Int, Str
from middlewared.validators import Match
from .device import Device
from .utils import create_element, disk_from_number
IOTYPE_CHOICES = ['NATIVE', 'THREADS', 'IO_URING']
class StorageDevice(Device):
TYPE = NotImplemented
def identity(self):
return self.data['attributes']['path']
def is_available(self):
return os.path.exists(self.identity())
def xml(self, *args, **kwargs):
disk_number = kwargs.pop('disk_number')
virtio = self.data['attributes']['type'] == 'VIRTIO'
logical_sectorsize = self.data['attributes']['logical_sectorsize']
physical_sectorsize = self.data['attributes']['physical_sectorsize']
iotype = self.data['attributes']['iotype']
return create_element(
'disk', type=self.TYPE, device='disk', attribute_dict={
'children': [
create_element('driver', name='qemu', type='raw', cache='none', io=iotype.lower(), discard='unmap'),
self.create_source_element(),
create_element(
'target', bus='sata' if not virtio else 'virtio',
dev=f'{"vd" if virtio else "sd"}{disk_from_number(disk_number)}'
),
create_element('boot', order=str(kwargs.pop('boot_number'))),
*([] if not logical_sectorsize else [create_element(
'blockio', logical_block_size=str(logical_sectorsize), **({} if not physical_sectorsize else {
'physical_block_size': str(physical_sectorsize)
})
)]),
]
}
)
def create_source_element(self):
raise NotImplementedError
def _validate(self, device, verrors, old=None, vm_instance=None, update=True):
if not self.middleware.call_sync('vm.device.disk_uniqueness_integrity_check', device, vm_instance):
verrors.add(
'attributes.path',
f'{vm_instance["name"]} has "{self.identity()}" already configured'
)
if device['attributes'].get('physical_sectorsize') and not device['attributes'].get('logical_sectorsize'):
verrors.add(
'attributes.logical_sectorsize',
'This field must be provided when physical_sectorsize is specified.'
)
class RAW(StorageDevice):
TYPE = 'file'
schema = Dict(
'attributes',
Str('path', required=True, validators=[Match(
r'^[^{}]*$', explanation='Path should not contain "{", "}" characters'
)], empty=False),
Str('type', enum=['AHCI', 'VIRTIO'], default='AHCI'),
Bool('exists'),
Bool('boot', default=False),
Int('size', default=None, null=True),
Int('logical_sectorsize', enum=[None, 512, 4096], default=None, null=True),
Int('physical_sectorsize', enum=[None, 512, 4096], default=None, null=True),
Str('iotype', enum=IOTYPE_CHOICES, default='THREADS'),
)
def create_source_element(self):
return create_element('source', file=self.data['attributes']['path'])
def _validate(self, device, verrors, old=None, vm_instance=None, update=True):
path = device['attributes']['path']
exists = device['attributes'].get('exists', True)
if exists and not os.path.exists(path):
verrors.add('attributes.path', 'Path must exist when "exists" is set.')
elif not exists:
if os.path.exists(path):
verrors.add('attributes.path', 'Path must not exist when "exists" is unset.')
elif not device['attributes'].get('size'):
verrors.add('attributes.size', 'Please provide a valid size for the raw file.')
if (
old and old['attributes'].get('size') != device['attributes'].get('size') and
not device['attributes'].get('size')
):
verrors.add('attributes.size', 'Please provide a valid size for the raw file.')
self.middleware.call_sync('vm.device.validate_path_field', verrors, 'attributes.path', path)
super()._validate(device, verrors, old, vm_instance, update)
class DISK(StorageDevice):
TYPE = 'block'
schema = Dict(
'attributes',
Str('path'),
Str('type', enum=['AHCI', 'VIRTIO'], default='AHCI'),
Bool('create_zvol'),
Str('zvol_name'),
Int('zvol_volsize'),
Int('logical_sectorsize', enum=[None, 512, 4096], default=None, null=True),
Int('physical_sectorsize', enum=[None, 512, 4096], default=None, null=True),
Str('iotype', enum=IOTYPE_CHOICES, default='THREADS'),
)
def create_source_element(self):
return create_element('source', dev=self.data['attributes']['path'])
def _validate(self, device, verrors, old=None, vm_instance=None, update=True):
create_zvol = device['attributes'].get('create_zvol')
path = device['attributes'].get('path')
if create_zvol:
for attr in ('zvol_name', 'zvol_volsize'):
if not device['attributes'].get(attr):
verrors.add(f'attributes.{attr}', 'This field is required.')
if device['attributes'].get('path'):
verrors.add('attributes.path', 'Must not be specified when creating zvol')
verrors.check()
# Add normalized path for the zvol
device['attributes']['path'] = zvol_name_to_path(device['attributes']['zvol_name'])
if zvol := self.middleware.call_sync(
'pool.dataset.query', [['id', '=', device['attributes']['zvol_name']]]
):
verrors.add('attributes.zvol_name', f'{zvol[0]["id"]!r} already exists.')
parentzvol = device['attributes']['zvol_name'].rsplit('/', 1)[0]
if parentzvol and not self.middleware.call_sync('pool.dataset.query', [('id', '=', parentzvol)]):
verrors.add(
'attributes.zvol_name',
f'Parent dataset {parentzvol} does not exist.', errno.ENOENT
)
else:
for attr in filter(lambda k: device['attributes'].get(k), ('zvol_name', 'zvol_volsize')):
verrors.add(f'attributes.{attr}', 'This field should not be specified when "create_zvol" is unset.')
if not path:
verrors.add('attributes.path', 'Disk path is required.')
elif not path.startswith('/dev/zvol/'):
verrors.add('attributes.path', 'Disk path must start with "/dev/zvol/"')
elif check_zvol_in_boot_pool_using_path(path):
verrors.add('attributes.path', 'Disk residing in boot pool cannot be consumed and is not supported')
else:
zvol = self.middleware.call_sync(
'zfs.dataset.query', [['id', '=', zvol_path_to_name(path)]], {'extra': {'properties': []}}
)
if not zvol:
verrors.add('attributes.path', 'Zvol referenced by path does not exist', errno.ENOENT)
elif zvol[0]['type'] != 'VOLUME':
verrors.add('attributes.path', 'Path specified does not reference to a VOLUME')
super()._validate(device, verrors, old, vm_instance, update)
| 7,619 | Python | .py | 141 | 42.198582 | 120 | 0.588955 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,038 | boot_loader.py | truenas_middleware/src/middlewared/middlewared/plugins/boot_/boot_loader.py | import os
import shutil
import tempfile
from middlewared.service import Service, private
from middlewared.utils import run
class BootService(Service):
@private
async def install_loader(self, dev):
legacy_schema = await self.middleware.call('boot.legacy_schema', dev)
if legacy_schema == 'EFI_ONLY':
efi_partition_number = 1
else:
efi_partition_number = 2
await run('grub-install', '--target=i386-pc', f'/dev/{dev}')
if legacy_schema == 'BIOS_ONLY':
return
partition = await self.middleware.call('disk.get_partition_for_disk', dev, efi_partition_number)
await run('mkdosfs', '-F', '32', '-s', '1', '-n', 'EFI', f'/dev/{partition}')
with tempfile.TemporaryDirectory() as tmpdirname:
efi_dir = os.path.join(tmpdirname, 'efi')
os.makedirs(efi_dir)
await run('mount', '-t', 'vfat', f'/dev/{partition}', efi_dir)
grub_cmd = [
'grub-install', '--target=x86_64-efi', f'--efi-directory={efi_dir}',
'--bootloader-id=debian', '--recheck', '--no-floppy',
]
if not os.path.exists('/sys/firmware/efi'):
grub_cmd.append('--no-nvram')
await run(*grub_cmd)
mounted_efi_dir = os.path.join(efi_dir, 'EFI')
os.makedirs(os.path.join(mounted_efi_dir, 'boot'), exist_ok=True)
shutil.copy(
os.path.join(mounted_efi_dir, 'debian/grubx64.efi'),
os.path.join(mounted_efi_dir, 'boot/bootx64.efi')
)
await run('umount', efi_dir)
| 1,643 | Python | .py | 36 | 35.111111 | 104 | 0.574375 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,039 | format.py | truenas_middleware/src/middlewared/middlewared/plugins/boot_/format.py | from middlewared.schema import accepts, Dict, Int, Str
from middlewared.service import CallError, private, Service
from middlewared.utils import run
class BootService(Service):
@accepts(
Str('dev'),
Dict(
'options',
Int('size'),
Str('legacy_schema', enum=[None, 'BIOS_ONLY', 'EFI_ONLY'], null=True, default=None),
)
)
@private
async def format(self, dev, options):
"""
Format a given disk `dev` using the appropriate partition layout
"""
job = await self.middleware.call('disk.wipe', dev, 'QUICK')
await job.wait()
if job.error:
raise CallError(job.error)
disk_details = await self.middleware.call('device.get_disk', dev)
if not disk_details:
raise CallError(f'Details for {dev} not found.')
commands = []
partitions = []
if options['legacy_schema'] == 'BIOS_ONLY':
partitions.extend([
('BIOS boot partition', 524288),
])
elif options['legacy_schema'] == 'EFI_ONLY':
partitions.extend([
('EFI System', 272629760),
])
else:
partitions.extend([
('BIOS boot partition', 1048576), # We allot 1MiB to bios boot partition
('EFI System', 536870912) # We allot 512MiB for EFI partition
])
if options.get('size'):
partitions.append(('Solaris /usr & Mac ZFS', options['size']))
# 73 sectors are reserved by Linux for GPT tables and
# our 4096 bytes alignment offset for the boot disk
partitions.append((
'GPT partition table', 73 * disk_details['sectorsize']
))
total_partition_size = sum(map(lambda y: y[1], partitions))
if disk_details['size'] < total_partition_size:
partitions = [
'%s: %s blocks' % (p[0], '{:,}'.format(p[1] // disk_details['sectorsize'])) for p in partitions
]
partitions.append(
'total of %s blocks' % '{:,}'.format(total_partition_size // disk_details['sectorsize'])
)
disk_blocks = '{:,}'.format(disk_details["blocks"])
raise CallError(
f'The new device ({dev}, {disk_details["size"] / (1024 ** 3)} GB, {disk_blocks} blocks) '
f'does not have enough space to to hold the required new partitions ({", ".join(partitions)}). '
'New mirrored devices might require more space than existing devices due to changes in the '
'booting procedure.'
)
zfs_part_size = f'+{options["size"] // 1024}K' if options.get('size') else 0
if options['legacy_schema']:
if options['legacy_schema'] == 'BIOS_ONLY':
commands.extend((
['sgdisk', f'-a{4096 // disk_details["sectorsize"]}', '-n1:0:+512K', '-t1:EF02', f'/dev/{dev}'],
))
elif options['legacy_schema'] == 'EFI_ONLY':
commands.extend((
['sgdisk', f'-a{4096 // disk_details["sectorsize"]}', '-n1:0:+260M', '-t1:EF00', f'/dev/{dev}'],
))
# Creating standard-size partitions first leads to better alignment and more compact disk usage
# and can help to fit larger data partition.
commands.extend([
['sgdisk', f'-n2:0:{zfs_part_size}', '-t2:BF01', f'/dev/{dev}'],
])
else:
commands.extend((
['sgdisk', f'-a{4096 // disk_details["sectorsize"]}', '-n1:0:+1024K', '-t1:EF02', f'/dev/{dev}'],
['sgdisk', '-n2:0:+524288K', '-t2:EF00', f'/dev/{dev}'],
))
# Creating standard-size partitions first leads to better alignment and more compact disk usage
# and can help to fit larger data partition.
commands.extend([
['sgdisk', f'-n3:0:{zfs_part_size}', '-t3:BF01', f'/dev/{dev}']
])
for command in commands:
p = await run(*command, check=False)
if p.returncode != 0:
raise CallError(
'{} failed:\n{}{}'.format(' '.join(command), p.stdout.decode('utf-8'), p.stderr.decode('utf-8'))
)
await self.middleware.call('device.settle_udev_events')
| 4,419 | Python | .py | 93 | 35.344086 | 116 | 0.538355 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,040 | device_info.py | truenas_middleware/src/middlewared/middlewared/plugins/device_/device_info.py | import re
import pyudev
import libsgio
from middlewared.plugins.disk_.disk_info import get_partition_size_info
from middlewared.schema import Dict, returns
from middlewared.service import Service, accepts, private
from middlewared.utils.disks import DISKS_TO_IGNORE, get_disk_names, get_disk_serial_from_block_device, safe_retrieval
from middlewared.utils.functools_ import cache
from middlewared.utils.gpu import get_gpus
from middlewared.utils.serial import serial_port_choices
RE_NVME_PRIV = re.compile(r'nvme[0-9]+c')
ISCSI_DEV_PATH = re.compile(
r'/devices/platform/host[0-9]+/session[0-9]+/target[0-9]+:[0-9]+:[0-9]+/[0-9]+:[0-9]+:[0-9]+:[0-9]+/block/.*'
)
def is_iscsi_device(dev):
"""Return True if the specified pyudev device is iSCSI based."""
# The implementation may change at a later date
return ISCSI_DEV_PATH.match(dev.device_path) is not None
class DeviceService(Service):
DISK_ROTATION_ERROR_LOG_CACHE = set()
@private
@cache
def host_type(self):
return self.middleware.call_sync('system.dmidecode_info')['system-product-name']
@private
def get_serials(self):
return serial_port_choices()
@private
def get_disk_serial(self, dev):
return get_disk_serial_from_block_device(dev)
@private
def get_disk_names(self):
"""
This endpoint serves almost exclusively to be called in our
reporting plugin. It just needs the block device names
(sda/nvme0n1/pmem0/etc) and so this will very quickly enumerate
that information.
NOTE: The return of this method should match the keys retrieved
when running `self.get_disks`.
"""
return get_disk_names()
@private
def get_disks(self, get_partitions=False, serial_only=False):
ctx = pyudev.Context()
disks = {}
for dev in ctx.list_devices(subsystem='block', DEVTYPE='disk'):
if dev.sys_name.startswith(DISKS_TO_IGNORE) or RE_NVME_PRIV.match(dev.sys_name):
continue
if is_iscsi_device(dev):
continue
try:
if serial_only:
disks[dev.sys_name] = self.get_disk_serial(dev)
else:
disks[dev.sys_name] = self.get_disk_details(ctx, dev, get_partitions)
except Exception:
self.logger.debug('Failed to retrieve disk details for %s', dev.sys_name, exc_info=True)
return disks
@private
def get_disk_partitions(self, dev):
parts = []
keys = tuple('ID_PART_ENTRY_' + i for i in ('TYPE', 'UUID', 'NUMBER', 'SIZE'))
parent = dev.sys_name
for i in filter(lambda x: all(x.get(k) for k in keys), dev.children):
part_num = int(i['ID_PART_ENTRY_NUMBER'])
part_name = self.middleware.call_sync('disk.get_partition_for_disk', parent, part_num)
pinfo = get_partition_size_info(parent, int(i['ID_PART_ENTRY_OFFSET']), int(i['ID_PART_ENTRY_SIZE']))
part = {
'name': part_name,
'id': part_name,
'path': f'/dev/{parent}',
'disk': parent,
'fs_label': i.get('ID_FS_LABEL'),
'partition_type': i['ID_PART_ENTRY_TYPE'],
'partition_number': part_num,
'partition_uuid': i['ID_PART_ENTRY_UUID'],
'start_sector': pinfo.start_sector,
'end_sector': pinfo.end_sector,
'start': pinfo.start_byte,
'end': pinfo.end_byte,
'size': pinfo.total_bytes,
'encrypted_provider': None,
}
for attr in filter(lambda x: x.startswith('holders/md'), i.attributes.available_attributes):
# looks like `holders/md123`
part['encrypted_provider'] = f'/dev/{attr.split("/", 1)[1].strip()}'
break
parts.append(part)
return parts
@private
def get_disk_details(self, ctx, dev, get_partitions=False):
blocks = self.safe_retrieval(dev.attributes, 'size', None, asint=True)
ident = serial = self.get_disk_serial(dev)
model = descr = self.safe_retrieval(dev.properties, 'ID_MODEL', None)
vendor = self.safe_retrieval(dev.properties, 'ID_VENDOR', None)
is_nvme = dev.sys_name.startswith('nvme') or (vendor and vendor.lower().strip() == 'nvme')
driver = self.safe_retrieval(dev.parent.properties, 'DRIVER', '') if not is_nvme else 'nvme'
sectorsize = self.safe_retrieval(dev.attributes, 'queue/logical_block_size', None, asint=True)
size = mediasize = None
if blocks:
size = mediasize = blocks * 512
disk = {
'name': dev.sys_name,
'sectorsize': sectorsize,
'number': dev.device_number,
'subsystem': self.safe_retrieval(dev.parent.properties, 'SUBSYSTEM', ''),
'driver': driver,
'hctl': self.safe_retrieval(dev.parent.properties, 'DEVPATH', '').split('/')[-1],
'size': size,
'mediasize': mediasize,
'vendor': vendor,
'ident': ident,
'serial': serial,
'model': model,
'descr': descr,
'lunid': self.safe_retrieval(dev.properties, 'ID_WWN', '').removeprefix('0x').removeprefix('eui.') or None,
'bus': self.safe_retrieval(dev.properties, 'ID_BUS', 'UNKNOWN').upper(),
'type': 'UNKNOWN',
'blocks': blocks,
'serial_lunid': None,
'rotationrate': None,
'stripesize': None, # remove this? (not used)
'parts': [],
}
if get_partitions:
disk['parts'] = self.get_disk_partitions(dev)
if self.safe_retrieval(dev.attributes, 'queue/rotational', None) == '1':
disk['type'] = 'HDD'
disk['rotationrate'] = self._get_rotation_rate(f'/dev/{dev.sys_name}')
else:
disk['type'] = 'SSD'
disk['rotationrate'] = None
if disk['serial'] and disk['lunid']:
disk['serial_lunid'] = f'{disk["serial"]}_{disk["lunid"]}'
disk['dif'] = self.is_dif_formatted(ctx, {'subsystem': disk['subsystem'], 'hctl': disk['hctl']})
return disk
@private
def is_dif_formatted(self, ctx, info):
"""
DIF is a feature added to the SCSI Standard. It adds 8 bytes to the end of each sector on disk.
It increases the size of the commonly-used 512-byte disk block from 512 to 520 bytes. The extra bytes comprise
the Data Integrity Field (DIF). The basic idea is that the HBA will calculate a checksum value for the data
block on writes, and store it in the DIF. The storage device will confirm the checksum on receive, and store
the data plus checksum. On a read, the checksum will be checked by the storage device and by the receiving HBA.
The Data Integrity Extension (DIX) allows this check to move up the stack: the application calculates the
checksum and passes it to the HBA, to be appended to the 512 byte data block. This provides a full end-to-end
data integrity check.
With support from the HBA, this means checksums will be computed/verified by the HBA for every block. This is
redundant and a waste of bus bandwidth with ZFS. These disks should be reformatted to use a normal sector size
without protection information before a pool can be created.
"""
dif = False
if (info['subsystem'] != 'scsi') or (info['hctl'].count(':') != 3):
# only check scsi devices
return dif
try:
dev = pyudev.Devices.from_path(ctx, f'/sys/class/scsi_disk/{info["hctl"]}')
except pyudev.DeviceNotFoundAtPathError:
return dif
except Exception:
# logging this is painful because it'll spam so
# ignore it for now...
return dif
else:
# 0 == disabled, > 0 == enabled
return bool(self.safe_retrieval(dev.attributes, 'protection_type', 0, asint=True))
@private
def safe_retrieval(self, prop, key, default, asint=False):
return safe_retrieval(prop, key, default, asint)
@private
def get_disk(self, name, get_partitions=False, serial_only=False):
context = pyudev.Context()
try:
block_device = pyudev.Devices.from_name(context, 'block', name)
if serial_only:
return {'serial': self.get_disk_serial(block_device)}
else:
return self.get_disk_details(context, block_device, get_partitions)
except pyudev.DeviceNotFoundByNameError:
return
except Exception:
self.logger.debug('Failed to retrieve disk details for %s', name, exc_info=True)
def _get_type_and_rotation_rate(self, disk_data, device_path):
if disk_data['rota']:
if self.HOST_TYPE == 'QEMU':
# qemu/kvm guests do not support necessary ioctl for
# retrieving rotational rate
type_ = 'HDD'
rotation_rate = None
else:
rotation_rate = self._get_rotation_rate(device_path)
if rotation_rate:
type_ = 'HDD'
else:
# Treat rotational devices without rotation rate as SSDs
# (some USB bridges report SSDs as rotational devices, see
# https://jira.ixsystems.com/browse/NAS-112230)
type_ = 'SSD'
else:
type_ = 'SSD'
rotation_rate = None
return type_, rotation_rate
def _get_rotation_rate(self, device_path):
try:
disk = libsgio.SCSIDevice(device_path)
rotation_rate = disk.rotation_rate()
except Exception:
if device_path not in self.DISK_ROTATION_ERROR_LOG_CACHE:
self.DISK_ROTATION_ERROR_LOG_CACHE.add(device_path)
self.logger.error('Ioctl failed while retrieving rotational rate for disk %s', device_path)
return
else:
self.DISK_ROTATION_ERROR_LOG_CACHE.discard(device_path)
if rotation_rate in (0, 1):
# 0 = not reported
# 1 = SSD
return
return rotation_rate
@private
def get_gpus(self):
gpus = get_gpus()
to_isolate_gpus = self.middleware.call_sync('system.advanced.config')['isolated_gpu_pci_ids']
for gpu in gpus:
gpu['available_to_host'] = gpu['addr']['pci_slot'] not in to_isolate_gpus
return gpus
| 10,761 | Python | .py | 225 | 36.991111 | 119 | 0.598857 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,041 | vrrp_events.py | truenas_middleware/src/middlewared/middlewared/plugins/device_/vrrp_events.py | from asyncio import sleep as asyncio_sleep
from dataclasses import dataclass
from collections import deque
from logging import getLogger
from math import floor
from os import mkfifo
from threading import Thread, Event
from time import sleep, time
from middlewared.service import Service
from middlewared.utils.prctl import set_name
LOGGER = getLogger('failover') # so logs show up in /var/log/failover.log
@dataclass
class VrrpObjs:
fifo_thread = None
event_thread = None
event_queue = deque(maxlen=1)
non_crit_ifaces = set()
class VrrpThreadService(Service):
class Config:
cli_private = True
private = True
def pause_events(self):
if VrrpObjs.fifo_thread is not None and VrrpObjs.fifo_thread.is_alive():
VrrpObjs.fifo_thread.pause()
if VrrpObjs.event_thread is not None and VrrpObjs.event_thread.is_alive():
VrrpObjs.event_thread.pause()
def unpause_events(self):
if VrrpObjs.fifo_thread is not None and VrrpObjs.fifo_thread.is_alive():
VrrpObjs.fifo_thread.unpause()
if VrrpObjs.event_thread is not None and VrrpObjs.event_thread.is_alive():
VrrpObjs.event_thread.unpause()
def set_non_crit_ifaces(self):
if VrrpObjs.fifo_thread is not None:
VrrpObjs.fifo_thread.non_crit_ifaces = set(
i['int_interface'] for i in self.middleware.call_sync(
'datastore.query', 'network.interfaces'
) if not i['int_critical']
)
class VrrpEventThread(Thread):
def __init__(self, **kwargs):
super(VrrpEventThread, self).__init__()
self.middleware = kwargs.get('middleware')
self.event_queue = VrrpObjs.event_queue
self.shutdown_event = Event()
self.pause_event = Event()
self.grace_period = 0.5
self.user_provided_timeout = kwargs.get('timeout') or 2
self.max_wait = self.user_provided_timeout + self.grace_period
self.settle_time = (self.max_wait / 2) + self.grace_period
self.max_rapid_settle_time = 5
self.rapid_event_settle_time = min(2 * self.user_provided_timeout, self.max_rapid_settle_time)
def shutdown(self):
self.shutdown_event.set()
def pause(self):
self.pause_event.set()
def unpause(self):
self.pause_event.clear()
@property
def user_provided_timeout(self):
return self.__upt
@user_provided_timeout.setter
def user_provided_timeout(self, value):
self.__upt = value
def run(self):
set_name('vrrp_event_thread')
LOGGER.info('vrrp event thread started')
last_event, backoff = None, False
while not self.shutdown_event.is_set():
if self.pause_event.is_set():
# A BACKUP event has to migrate all the VIPs
# off of the controller and the only way to
# (quickly) do that is to restart the vrrp service.
# However, restarting the VRRP service triggers
# more BACKUP events for the other interfaces
# so we will pause this thread while we become
# the backup controller and then unpause after
last_event = None
self.event_queue.clear()
sleep(0.2)
try:
this_event = self.event_queue[-1]
except IndexError:
# loop is started but we've received no events
sleep(0.2)
continue
if last_event is None:
# first event (in the loop) so sleep `max_wait`
# before we act upon it
last_event = this_event
sleep(self.max_wait)
continue
# These are the primary scenarios for which we need to handle
# 1. receive 1 event within `max_wait` period
# 2. receive 2 events with the most recent event being within
# the timeframe of `max_wait`
# 3. receive 2 events with the most recent event being greater
# than the `max_wait` timeframe
# 4. receive 2+ events with the most recent event being less
# than the `max_wait` timeframe (i.e. rapid events)
# The first 3 scenarios listed above are easy enough to handle
# because we send those messages as-is to be processed. The
# last scenario is the situation for which we need to try and
# have a "settle" time. If we continue to receive a rapid
# succesion of events, then we'll log a message and ignore the
# event since it will wreak havoc on the HA system.
time_diff_floor = floor((this_event['time'] - last_event['time']))
max_wait_floor = floor(self.max_wait)
if last_event == this_event or time_diff_floor > max_wait_floor:
# scenario #1 and scenario #3 listed above
last_event = None
backoff = False
self.event_queue.pop()
self.middleware.call_hook_sync('vrrp.fifo', data=this_event)
elif time_diff_floor == max_wait_floor:
# scenario #2 listed above
# NOTE:
# The events looke something like this:
# RECEIVED: 'INSTANCE "eno1_v4" BACKUP 254\n' at time: 1701967219.244696
# RECEIVED: 'INSTANCE "eno1_v4" MASTER 254\n' at time: 1701967221.2902775
# In the messages above, the time difference is ~2seconds which is the default
# timeout for not receiving a MASTER advertisement before VRRP takes over. So
# we'll send this event down the pipe.
last_event = None
backoff = False
self.event_queue.pop()
self.middleware.call_hook_sync('vrrp.fifo', data=this_event)
elif time_diff_floor < max_wait_floor:
# scenario #4 listed above
# NOTE:
# The events could look like this:
# RECEIVED: 'INSTANCE "eno1_v4" BACKUP 254\n' at time: 1701967219.244696
# RECEIVED: 'INSTANCE "eno1_v4" MASTER 254\n' at time: 1701967220.2902775
# This happens when both controllers of an HA system start near simultaneously
# (i.e. power-outage event most often) OR it could be happening because of an
# external networking problem. Either way, the VRRP service will send adverts
# but the moment the MASTER controller is determined, it'll send that advert
# and (while testing in-house), it is _always_ less than the default advert
# timeout (max_wait). We obviously can't ignore that event because doing so
# would prevent the HA system from coming up properly (no zpools, no fenced)
if not backoff:
backoff = True
last_event = this_event
sleep(self.rapid_event_settle_time)
else:
last_event = None
backoff = False
self.event_queue.pop()
LOGGER.warning('Detected rapid succession of failover events: (%r)', this_event)
else:
LOGGER.warning('Unhandled failover event. last_event: %r, this_event: %r', last_event, this_event)
last_event = None
backoff = False
self.event_queue.pop()
class VrrpFifoThread(Thread):
def __init__(self, *args, **kwargs):
super(VrrpFifoThread, self).__init__()
self._retry_timeout = 2 # timeout in seconds before retrying to connect to FIFO
self._vrrp_file = '/var/run/vrrpd.fifo'
self.pause_event = Event()
self.middleware = kwargs.get('middleware')
self.non_crit_ifaces = kwargs.get('non_crit_ifaces') or VrrpObjs.non_crit_ifaces
self.event_queue = VrrpObjs.event_queue
self.shutdown_line = '--SHUTDOWN--'
def shutdown(self):
with open(self._vrrp_file, 'w') as f:
f.write(f'{self.shutdown_line}\n')
def pause(self):
self.pause_event.set()
def unpause(self):
self.pause_event.clear()
def create_fifo(self):
try:
mkfifo(self._vrrp_file)
except FileExistsError:
pass
except Exception:
raise
def format_fifo_msg(self, msg):
if any((
not isinstance(msg, dict),
not msg.get('event'),
len(msg['event'].split()) != 4,
not msg.get('time'),
)):
LOGGER.error('Ignoring unexpected VRRP event message: %r', msg)
return
try:
info = msg['event'].split()
ifname = info[1].split('_')[0].strip('"') # interface
event = info[2] # the state that is being transititoned to
except Exception:
LOGGER.error('Failed parsing vrrp message', exc_info=True)
return
else:
if event not in ('MASTER', 'BACKUP', 'FAULT'):
return
if event == 'FAULT':
# a FAULT message is sent when iface goes down
event = 'BACKUP'
return {'ifname': ifname, 'event': event, 'time': msg['time']}
def run(self):
set_name('vrrp_fifo_thread')
try:
self.create_fifo()
except Exception:
LOGGER.error('FATAL: Unable to create VRRP fifo.', exc_info=True)
return
log_it = True
while True:
try:
with open(self._vrrp_file) as f:
LOGGER.info('vrrp fifo connection established')
for line in f:
if self.pause_event.is_set():
continue
event = line.strip()
if event == self.shutdown_line:
return
formatted = self.format_fifo_msg({'event': event, 'time': time()})
if not formatted:
continue
elif formatted['ifname'] in self.non_crit_ifaces:
LOGGER.debug(
'Received an event (%r) for a non-critical interface, ignoring.',
formatted
)
continue
self.event_queue.append(formatted)
except Exception:
if log_it:
LOGGER.warning(
'vrrp fifo connection not established, retrying every %d seconds',
self._retry_timeout,
exc_info=True
)
log_it = False
sleep(self._retry_timeout)
async def _start_stop_vrrp_threads(middleware):
while not await middleware.call('system.ready'):
await asyncio_sleep(0.2)
licensed = await middleware.call('failover.licensed')
if not licensed:
# maybe the system is being downgraded to non-HA
# (this is rare but still need to handle it) or
# system is being restarted/shutdown etc
if VrrpObjs.fifo_thread is not None and VrrpObjs.fifo_thread.is_alive():
await middleware.run_in_thread(VrrpObjs.fifo_thread.shutdown)
VrrpObjs.fifo_thread = None
if VrrpObjs.event_thread is not None and VrrpObjs.event_thread.is_alive():
await middleware.run_in_thread(VrrpObjs.event_thread.shutdown)
VrrpObjs.event_thread = None
VrrpObjs.event_queue.clear()
else:
# if this is a system that is being licensed for HA for the
# first time (without being rebooted) then we need to make
# sure we start these threads
timeout = (await middleware.call('failover.config'))['timeout']
nci = set(
i['int_interface'] for i in await middleware.call('datastore.query', 'network.interfaces')
if not i['int_critical']
)
if VrrpObjs.fifo_thread is None or not VrrpObjs.fifo_thread.is_alive():
VrrpObjs.fifo_thread = VrrpFifoThread(middleware=middleware, non_crit_ifaces=nci)
VrrpObjs.fifo_thread.start()
if VrrpObjs.event_thread is None or not VrrpObjs.event_thread.is_alive():
VrrpObjs.event_thread = VrrpEventThread(middleware=middleware, timeout=timeout)
VrrpObjs.event_thread.start()
async def _post_license_update(middleware, *args, **kwargs):
await _start_stop_vrrp_threads(middleware)
async def setup(middleware):
middleware.create_task(_start_stop_vrrp_threads(middleware))
middleware.register_hook('system.post_license_update', _post_license_update)
| 13,027 | Python | .py | 272 | 35.205882 | 114 | 0.582153 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,042 | udev_events.py | truenas_middleware/src/middlewared/middlewared/plugins/device_/udev_events.py | import pyudev
import subprocess
import time
from middlewared.service import private, Service
from middlewared.utils import run
from middlewared.utils.threading import start_daemon_thread
class DeviceService(Service):
@private
async def settle_udev_events(self):
cp = await run(['udevadm', 'settle'], stdout=subprocess.DEVNULL, check=False)
if cp.returncode != 0:
self.middleware.logger.error('Failed to settle udev events: %s', cp.stderr.decode())
@private
async def trigger_udev_events(self, device):
cp = await run(['udevadm', 'trigger', device], stdout=subprocess.DEVNULL, check=False)
if cp.returncode != 0:
self.middleware.logger.error('Failed to trigger udev events: %s', cp.stderr.decode())
def udev_events(middleware):
_256MB = 268435456 # for large quantity disk systems (100's or more)
while True:
# We always want to keep polling udev, let's log what error we are
# seeing and fix them as we come across them
try:
context = pyudev.Context()
monitor = pyudev.Monitor.from_netlink(context)
monitor.set_receive_buffer_size(_256MB)
monitor.filter_by(subsystem='block')
monitor.filter_by(subsystem='dlm')
monitor.filter_by(subsystem='net')
for device in iter(monitor.poll, None):
middleware.call_hook_sync(
f'udev.{device.subsystem}', data={**dict(device), 'SYS_NAME': device.sys_name}
)
except Exception:
middleware.logger.error('Polling udev failed', exc_info=True)
time.sleep(10)
def setup(middleware):
start_daemon_thread(target=udev_events, args=(middleware,))
| 1,758 | Python | .py | 38 | 37.763158 | 98 | 0.660432 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,043 | enums.py | truenas_middleware/src/middlewared/middlewared/plugins/jbof/enums.py | import enum
class Transport(enum.Enum):
NVME_ROCE = 'NVMe/ROCE'
NVME_TCP = 'NVMe/TCP'
def choices():
return [x.value for x in Transport]
class AddressMechanism(enum.Enum):
STATIC = 'static'
STATIC_SET = 'static_set'
DHCP = 'dhcp'
def choices():
return [x.value for x in AddressMechanism]
class ManagementProtocol(enum.Enum):
REDFISH = 'redfish'
def choices():
return [x.value for x in ManagementProtocol]
| 474 | Python | .py | 16 | 24.3125 | 52 | 0.670379 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,044 | crud.py | truenas_middleware/src/middlewared/middlewared/plugins/jbof/crud.py | import asyncio
import subprocess
import time
import middlewared.sqlalchemy as sa
from middlewared.plugins.jbof.redfish import (InvalidCredentialsError,
RedfishClient)
from middlewared.schema import (Bool, Dict, Int, IPAddr, List, Password, Patch,
Str, accepts, returns)
from middlewared.service import (CallError, CRUDService, ValidationErrors, job,
private)
from middlewared.utils.license import LICENSE_ADDHW_MAPPING
from middlewared.validators import Netmask, Range
from .functions import (decode_static_ip, get_sys_class_nvme,
initiator_ip_from_jbof_static_ip, initiator_static_ip,
jbof_static_ip, jbof_static_ip_from_initiator_ip,
static_ip_netmask_int, static_ip_netmask_str,
static_mtu)
class JBOFModel(sa.Model):
__tablename__ = 'storage_jbof'
id = sa.Column(sa.Integer(), primary_key=True)
jbof_description = sa.Column(sa.String(120), nullable=True)
# When performing static (code-based) assignment of data-plane IPs, we
# want each JBOD to have a deterministic unique index that counts up from
# zero (without any gaps, which rules out using the id). This will not be
# part of the public API.
jbof_index = sa.Column(sa.Integer(), unique=True)
jbof_uuid = sa.Column(sa.Text(), nullable=False, unique=True)
jbof_mgmt_ip1 = sa.Column(sa.String(45), nullable=False)
jbof_mgmt_ip2 = sa.Column(sa.String(45))
jbof_mgmt_username = sa.Column(sa.String(120))
jbof_mgmt_password = sa.Column(sa.EncryptedText())
class JBOFService(CRUDService):
class Config:
service = 'jbof'
datastore = 'storage.jbof'
datastore_prefix = "jbof_"
cli_private = True
role_prefix = 'JBOF'
ENTRY = Dict(
'jbof_entry',
Int('id', required=True),
Str('description'),
# Redfish
IPAddr('mgmt_ip1', required=True),
IPAddr('mgmt_ip2', required=False),
Str('mgmt_username', required=True),
Password('mgmt_password', required=True),
)
# Number of seconds we need to wait for a ES24N to start responding on
# a newly configured IP
JBOF_CONFIG_DELAY_SECS = 20
@private
async def add_index(self, data):
"""Add a private unique index (0-255) to the entry if not already present."""
if 'index' not in data:
index = await self.middleware.call('jbof.next_index')
if index is not None:
data['index'] = index
else:
raise CallError('Could not generate an index.')
return data
@private
async def validate(self, data, schema_name, old=None):
verrors = ValidationErrors()
# Check license
license_count = await self.middleware.call("jbof.licensed")
if license_count == 0:
verrors.add(f"{schema_name}.mgmt_ip1", "This feature is not licensed")
else:
if old is None:
# We're adding a new JBOF - have we exceeded the license?
count = await self.middleware.call('jbof.query', [], {'count': True})
if count >= license_count:
verrors.add(f"{schema_name}.mgmt_ip1",
f"Already configured the number of licensed emclosures: {license_count}")
# Ensure redfish connects to mgmt1 (incl login)
mgmt_ip1 = data.get('mgmt_ip1')
if not RedfishClient.is_redfish(mgmt_ip1):
verrors.add(f"{schema_name}.mgmt_ip1", "Not a redfish management interface")
else:
redfish1 = RedfishClient(f'https://{mgmt_ip1}')
try:
redfish1.login(data['mgmt_username'], data['mgmt_password'])
RedfishClient.cache_set(mgmt_ip1, redfish1)
except InvalidCredentialsError:
verrors.add(f"{schema_name}.mgmt_username", "Invalid username or password")
# If mgmt_ip2 was supplied, ensure it matches to the same system as mgmt_ip1
mgmt_ip2 = data.get('mgmt_ip2')
if mgmt_ip2:
if not RedfishClient.is_redfish(mgmt_ip2):
verrors.add(f"{schema_name}.mgmt_ip2", "Not a redfish management interface")
else:
redfish2 = RedfishClient(f'https://{mgmt_ip2}')
if redfish1.product != redfish2.product:
verrors.add(f"{schema_name}.mgmt_ip2", "Product does not match other IP address.")
if redfish1.uuid != redfish2.uuid:
verrors.add(f"{schema_name}.mgmt_ip2", "UUID does not match other IP address.")
# When adding a new JBOF - do we have a UUID clash?
if old is None:
existing_uuids = [d['uuid'] for d in (await self.middleware.call('jbof.query', [], {'select': ['uuid']}))]
if redfish1.uuid in existing_uuids:
verrors.add(f"{schema_name}.mgmt_ip1", "Supplied JBOF already in database (UUID)")
else:
# Inject UUID
data['uuid'] = redfish1.uuid
await self.add_index(data)
return verrors, data
@accepts(
Patch(
'jbof_entry', 'jbof_create',
('rm', {'name': 'id'}),
register=True
)
)
async def do_create(self, data):
"""
Create a new JBOF.
This will use the supplied Redfish credentials to configure the data plane on
the expansion shelf for direct connection to ROCE capable network cards on
the TrueNAS head unit.
`description` Optional description of the JBOF.
`mgmt_ip1` IP of 1st Redfish management interface.
`mgmt_ip2` Optional IP of 2nd Redfish management interface.
`mgmt_username` Redfish administrative username.
`mgmt_password` Redfish administrative password.
"""
verrors, data = await self.validate(data, 'jbof_create')
verrors.check()
mgmt_ip = data['mgmt_ip1']
shelf_index = data['index']
# Everything looks good so far. Attempt to hardwire the dataplane.
try:
await self.middleware.call('jbof.hardwire_dataplane', mgmt_ip, shelf_index, 'jbof_create.mgmt_ip1', verrors)
if verrors:
await self.middleware.call('jbof.unwire_dataplane', mgmt_ip, shelf_index)
except Exception as e:
self.logger.error('Failed to add JBOF', exc_info=True)
# Try a cleanup
try:
await self.middleware.call('jbof.unwire_dataplane', mgmt_ip, shelf_index)
except Exception:
pass
verrors.add('jbof_create.mgmt_ip1', f'Failed to add JBOF: {e}')
verrors.check()
# If the caller just supplied mgmt_ip1, let's fetch mgmt_ip2 to store in the DB
if data.get('mgmt_ip2') in ['', None]:
try:
if ip := await self.middleware.call('jbof.alt_mgmt_ip', mgmt_ip):
data['mgmt_ip2'] = ip
self.logger.info('Detected additional JBOF mgmt IP %r', ip)
else:
self.logger.warning('Unable to determine additional JBOF mgmt IP')
except Exception:
self.logger.warning('Unable to detect additional JBOF mgmt IP', exc_info=True)
data['id'] = await self.middleware.call(
'datastore.insert', self._config.datastore, data,
{'prefix': self._config.datastore_prefix})
return await self.get_instance(data['id'])
@accepts(
Int('id', required=True),
Patch(
'jbof_create', 'jbof_update',
('attr', {'update': True})
)
)
async def do_update(self, id_, data):
"""
Update JBOF of `id`
"""
old = await self.get_instance(id_)
new = old.copy()
new.update(data)
verrors, data = await self.validate(new, 'jbof_update', old)
verrors.check()
if old['uuid'] != new['uuid']:
self.logger.debug('Changed UUID of JBOF from %s to %s', old['uuid'], new['uuid'])
await self.middleware.call('jbof.unwire_dataplane', old['mgmt_ip1'], old['index'])
await self.middleware.call('jbof.hardwire_dataplane', new['mgmt_ip1'], new['index'],
'jbof_update.mgmt_ip1', verrors)
await self.middleware.call(
'datastore.update', self._config.datastore, id_, new,
{'prefix': self._config.datastore_prefix})
return await self.get_instance(id_)
@accepts(Int('id'), Bool('force', default=False))
async def do_delete(self, id_, force):
"""
Delete a JBOF by ID.
"""
# Will make a best-effort un tear down existing connections / wiring
# To do that we first need to fetch the config.
data = await self.get_instance(id_)
try:
await self.middleware.run_in_thread(self.ensure_redfish_client_cached, data)
except Exception as e:
if force:
# If we have lost communication with the redfish interface for any reason
# we might still want to proceed with removing the JBOF, even without tearing
# down the shelf configuration. However, we wil still want to undo the
# host configuration.
self.logger.debug('Unable to ensure redfish client for JBOF %r. Forcing.', data['id'])
else:
raise e
try:
await self.middleware.call('jbof.unwire_dataplane', data['mgmt_ip1'], data['index'])
except Exception:
self.logger.debug('Unable to unwire JBOF @%r', data['mgmt_ip1'])
await self.middleware.call('alert.oneshot_create', 'JBOFTearDownFailure', None)
# Now delete the entry
response = await self.middleware.call('datastore.delete', self._config.datastore, id_)
return response
@accepts()
async def reapply_config(self):
"""
Reapply the JBOF configuration to attached JBOFs.
If an IOM is replaced in a JBOF, then it is expected to be configured to have
the same redfish IP, user & password as was previously the case.
This API can then be called to configure each JBOF with the expected data-plane
IP configuration, and then attach NVMe drives.
"""
verrors = ValidationErrors()
await self.middleware.call('jbof.hardwire_shelves')
await self.middleware.call('jbof.attach_drives', 'jbof.reapply_config', verrors)
verrors.check()
@private
def get_mgmt_ips(self, mgmt_ip):
redfish = RedfishClient.cache_get(mgmt_ip)
return redfish.mgmt_ips()
@private
def alt_mgmt_ip(self, mgmt_ip):
other_mgmt_ips = filter(lambda x: x != mgmt_ip, self.get_mgmt_ips(mgmt_ip))
for ip in other_mgmt_ips:
if RedfishClient.is_redfish(ip):
return ip
# If unable to talk to one, pick the first one. Maybe connectivity will be restored later.
if len(other_mgmt_ips):
self.logger.info('Unable to validate connectivity to alternate JBOF mgmt IP %r', other_mgmt_ips[0])
@private
def ensure_redfish_client_cached(self, data):
"""Synchronous function to ensure we have a redfish client in cache."""
mgmt_ip = data['mgmt_ip1']
username = data.get('mgmt_username')
password = data.get('mgmt_password')
try:
return RedfishClient.cache_get(mgmt_ip)
except KeyError:
# This could take a while to login, etc ... hence synchronous wrapper.
redfish = RedfishClient(f'https://{mgmt_ip}', username, password)
RedfishClient.cache_set(mgmt_ip, redfish)
return redfish
@accepts(roles=['JBOF_READ'])
@returns(Int())
async def licensed(self):
"""Return a count of the number of JBOF units licensed."""
result = 0
# Do we have a license at all?
license_ = await self.middleware.call('system.license')
if not license_:
return result
# check if this node's system serial matches the serial in the license
local_serial = (await self.middleware.call('system.dmidecode_info'))['system-serial-number']
if local_serial not in (license_['system_serial'], license_['system_serial_ha']):
return result
# Check to see if we're licensed to attach a JBOF
if license_['addhw']:
for quantity, code in license_['addhw']:
if code not in LICENSE_ADDHW_MAPPING:
self.logger.warning('Unknown additional hardware code %d', code)
continue
name = LICENSE_ADDHW_MAPPING[code]
if name == 'ES24N':
result += quantity
return result
@private
@accepts(
Int('id', required=True),
Str('iom', enum=['IOM1', 'IOM2'], required=True),
Dict(
'iom_network',
Bool('dhcp'),
Str('fqdn'),
Str('hostname'),
List('ipv4_static_addresses', items=[Dict(
'ipv4_static_address',
IPAddr('address', v6=False),
Str('netmask', validators=[Netmask(ipv6=False, prefix_length=False)]),
IPAddr('gateway', v6=False))], default=None),
List('ipv6_static_addresses', items=[Dict(
'ipv6_static_address',
IPAddr('address', v4=False),
Int('prefixlen', validators=[Range(min_=1, max_=64)]))], default=None),
List('nameservers', items=[IPAddr('nameserver')], default=None),
),
Int('ethindex', default=1),
Bool('force', default=False),
Bool('check', default=True),
)
def set_mgmt_ip(self, id_, iom, data, ethindex, force, check):
"""Change the mamagement IP for a particular IOM"""
# Fetch the existing JBOF config
config = self.get_instance__sync(id_)
config_mgmt_ips = set([config['mgmt_ip1'], config['mgmt_ip2']])
redfish = self.ensure_redfish_client_cached(config)
old_iom_mgmt_ips = set(redfish.iom_mgmt_ips(iom))
if not check:
if data.get('dhcp'):
raise CallError('Can not bypass check when setting DHCP')
try:
new_static_ip = data.get('ipv4_static_addresses', [])[0]['address']
except Exception:
raise CallError('Can not determine new static IP')
if config['mgmt_ip1'] in old_iom_mgmt_ips:
ip_to_update = 'mgmt_ip1'
elif config['mgmt_ip2'] in old_iom_mgmt_ips:
ip_to_update = 'mgmt_ip2'
else:
raise CallError('Can not determine whether updating mgmt_ip1 or mgmt_ip2')
if not force:
# Do we need to switch redfish to the other IOM
if redfish.mgmt_ip() in old_iom_mgmt_ips:
other_iom = 'IOM2' if iom == 'IOM1' else 'IOM1'
for mgmt_ip in redfish.iom_mgmt_ips(other_iom):
if mgmt_ip in config_mgmt_ips:
redfish = self.ensure_redfish_client_cached({'mgmt_ip1': mgmt_ip,
'mgmt_username': config['mgmt_username'],
'mgmt_password': config['mgmt_password']})
break
if redfish.mgmt_ip() in redfish.iom_mgmt_ips(iom):
raise CallError('Can not modify IOM network config thru same IOM')
# Read the existing config via redfish
uri = f'/redfish/v1/Managers/{iom}/EthernetInterfaces/{ethindex}'
r = redfish.get(uri)
if not r.ok:
raise CallError('Unable to read existing network configuration of {iom}/{ethindex}')
orig_net_config = r.json()
newdata = {}
olddata = {}
if (dhcp := data.get('dhcp')) is not None:
newdata.update({'DHCPv4': {'DHCPEnabled': dhcp}})
olddata.update({'DHCPv4': orig_net_config['DHCPv4']})
if (fqdn := data.get('fqdn')) is not None:
newdata.update({'FQDN': fqdn})
olddata.update({'FQDN': orig_net_config['FQDN']})
if (hostname := data.get('hostname')) is not None:
newdata.update({'HostName': hostname})
olddata.update({'HostName': orig_net_config['HostName']})
if (ipv4_static_addresses := data.get('ipv4_static_addresses')) is not None:
newitems = []
for item in ipv4_static_addresses:
newitems.append({'Address': item['address'], 'Gateway': item['gateway'], 'SubnetMask': item['netmask']})
newdata.update({'IPv4StaticAddresses': newitems})
olddata.update({'IPv4StaticAddresses': orig_net_config['IPv4StaticAddresses']})
if (ipv6_static_addresses := data.get('ipv6_static_addresses')) is not None:
newitems = []
for item in ipv6_static_addresses:
newitems.append({'Address': item['address'], 'PrefixLength': item['prefixlen']})
newdata.update({'IPv6StaticAddresses': newitems})
olddata.update({'IPv6StaticAddresses': orig_net_config['IPv6StaticAddresses']})
if (nameservers := data.get('nameservers')) is not None:
newdata.update({'NameServers': nameservers})
olddata.update({'NameServers': orig_net_config['NameServers']})
try:
removed_active = False
added_active = False
redfish.post(uri, data=newdata)
# Give a few seconds for the changes to take effect
time.sleep(10)
if check:
new_iom_mgmt_ips = set(redfish.iom_mgmt_ips(iom))
if old_iom_mgmt_ips != new_iom_mgmt_ips:
# IPs have changed.
# 1. Was the IP that changed on one of the stored mgmt_ips
for removed_ip in old_iom_mgmt_ips - new_iom_mgmt_ips:
if removed_ip in config_mgmt_ips:
removed_active = True
break
if removed_active:
for added_ip in new_iom_mgmt_ips - old_iom_mgmt_ips:
if RedfishClient.is_redfish(added_ip):
added_active = True
break
if not added_active:
raise CallError(f'Unable to access redfish IP on {iom}')
# Update the config to reflect the new IP
if removed_ip == config['mgmt_ip1']:
self.middleware.call_sync(
'jbof.update', config['id'], {'mgmt_ip1': added_ip}
)
else:
self.middleware.call_sync(
'jbof.update', config['id'], {'mgmt_ip2': added_ip}
)
else:
# IPs did not change, still want to test connectivity
for ip in config_mgmt_ips:
if ip in old_iom_mgmt_ips:
if not RedfishClient.is_redfish(ip):
raise CallError(f'Unable to access redfish IP {ip}')
else:
# check is False ... don't attempt to communicate with the new IP
# just update the database.
new = config.copy()
new.update({ip_to_update: new_static_ip})
self.middleware.call_sync(
'datastore.update', self._config.datastore, config['id'], new,
{'prefix': self._config.datastore_prefix}
)
except Exception as e:
self.logger.error(f'Unable to modify mgmt ip for {iom}/{ethindex}', exc_info=True)
try:
redfish.post(uri, data=olddata)
except Exception:
self.logger.error(f'Unable to restore original mgmt ip for {iom}/{ethindex}', exc_info=True)
raise e
@private
async def next_index(self):
existing_indices = [d['index'] for d in (await self.middleware.call('jbof.query', [], {'select': ['index']}))]
for index in range(256):
if index not in existing_indices:
return index
@private
async def hardwire_dataplane(self, mgmt_ip, shelf_index, schema, verrors):
"""Hardware the dataplane interfaces of the specified JBOF.
Configure the data plane network interfaces on the JBOF to
previously determined subnets.
Then attempt to connect using all the available RDMA capable
interfaces.
"""
await self.middleware.call('jbof.hardwire_shelf', mgmt_ip, shelf_index)
await self.middleware.call('jbof.hardwire_host', mgmt_ip, shelf_index, schema, verrors)
if not verrors:
await self.middleware.call('jbof.attach_drives', schema, verrors)
@private
def fabric_interface_choices(self, mgmt_ip):
redfish = RedfishClient.cache_get(mgmt_ip)
return redfish.fabric_ethernet_interfaces()
@private
def fabric_interface_macs(self, mgmt_ip):
"""Return a dict keyed by IP address where the value is the corresponding MAC address."""
redfish = RedfishClient.cache_get(mgmt_ip)
macs = {}
for uri in self.fabric_interface_choices(mgmt_ip):
netdata = redfish.get_uri(uri)
for address in netdata['IPv4Addresses']:
macs[address['Address']] = netdata['MACAddress']
return macs
@private
def hardwire_shelf(self, mgmt_ip, shelf_index):
redfish = RedfishClient.cache_get(mgmt_ip)
shelf_interfaces = redfish.fabric_ethernet_interfaces()
# Let's record the link status for each interface
up_before = set()
for uri in shelf_interfaces:
status = redfish.link_status(uri)
if status == 'LinkUp':
up_before.add(uri)
# Modify all the interfaces
for (eth_index, uri) in enumerate(shelf_interfaces):
address = jbof_static_ip(shelf_index, eth_index)
redfish.configure_fabric_interface(uri, address, static_ip_netmask_str(address), mtusize=static_mtu())
# Wait for all previously up interfaces to come up again
up_after = set()
retries = 0
while retries < JBOFService.JBOF_CONFIG_DELAY_SECS and up_before - up_after:
for uri in up_before:
if uri not in up_after:
status = redfish.link_status(uri)
if status == 'LinkUp':
up_after.add(uri)
time.sleep(1)
retries += 1
if up_before - up_after:
self.logger.debug('Timed-out waiting for interfaces to come up')
# Allow this to continue as we still might manage to ping it.
else:
self.logger.debug('Configured JBOF #%r', shelf_index)
@private
async def hardwire_shelves(self):
"""Apply the expected datapath IPs to all configured shelves."""
jbofs = await self.middleware.call('jbof.query')
if jbofs:
exceptions = await asyncio.gather(
*[self.middleware.call('jbof.hardwire_shelf', jbof['mgmt_ip1'], jbof['index']) for jbof in jbofs],
return_exceptions=True
)
failures = []
for jbof, exc in zip(jbofs, exceptions):
if isinstance(exc, Exception):
failures.append(str(exc))
else:
self.logger.info('Successfully hardwired JBOF %r (index %r)', jbof['description'], jbof['index'])
if failures:
self.logger.error(f'Failure hardwiring JBOFs: {", ".join(failures)}')
@private
def unwire_shelf(self, mgmt_ip):
redfish = RedfishClient.cache_get(mgmt_ip)
for uri in redfish.fabric_ethernet_interfaces():
redfish.configure_fabric_interface(uri, '0.0.0.0', '255.255.255.0', True, mtusize=1500)
@private
async def hardwire_host(self, mgmt_ip, shelf_index, schema, verrors):
"""Discover which direct links exist to the specified expansion shelf."""
# See how many interfaces are available on the expansion shelf
shelf_ip_to_mac = await self.middleware.call('jbof.fabric_interface_macs', mgmt_ip)
# Setup a dict with the expected IP pairs
shelf_ip_to_host_ip = {}
for idx, _ in enumerate((await self.middleware.call('jbof.fabric_interface_choices', mgmt_ip))):
shelf_ip_to_host_ip[jbof_static_ip(shelf_index, idx)] = initiator_static_ip(shelf_index, idx)
# Let's check that we have the expected hardwired IPs on the shelf
if set(shelf_ip_to_mac) != set(shelf_ip_to_host_ip):
# This should not happen
verrors.add(schema, 'JBOF does not have expected IPs.'
f'Expected: {shelf_ip_to_host_ip}, has: {shelf_ip_to_mac}')
return
if await self.middleware.call('failover.licensed'):
# HA system
if not await self.middleware.call('failover.remote_connected'):
verrors.add(schema, 'Unable to contact remote controller')
return
this_node = await self.middleware.call('failover.node')
if this_node == 'MANUAL':
verrors.add(schema, 'Unable to determine this controllers position in chassis')
return
connected_shelf_ips = []
results = await asyncio.gather(
*[self.hardwire_node(node, shelf_index, shelf_ip_to_mac) for node in ('A', 'B')]
)
for (node, connected_shelf_ips) in zip(('A', 'B'), results):
if not connected_shelf_ips:
# Failed to connect any IPs => error
verrors.add(schema, f'Unable to communicate with the expansion shelf (node {node})')
return
elif len(connected_shelf_ips) > 1:
# Too many connections exist (currently do not support multipath)
verrors.add(schema, f'Too many connections wired to the expansion shelf (node {node})')
return
self.logger.debug('Configured node %r: %r', node, connected_shelf_ips)
else:
connected_shelf_ips = await self.hardwire_node('', shelf_index, shelf_ip_to_mac)
if not connected_shelf_ips:
# Failed to connect any IPs => error
verrors.add(schema, 'Unable to communicate with the expansion shelf')
return
elif len(connected_shelf_ips) > 1:
# Too many connections exist (currently do not support multipath)
verrors.add(schema, 'Too many connections wired to the expansion shelf')
return
self.logger.debug('Configured node: %r', connected_shelf_ips)
@private
async def hardwire_node(self, node, shelf_index, shelf_ip_to_mac, skip_ips=[]):
localnode = not node or node == await self.middleware.call('failover.node')
# Next see what RDMA-capable links are available on the host
# Also setup a map for frequent use below
if localnode:
links = await self.middleware.call('rdma.get_link_choices')
else:
try:
links = await self.middleware.call('failover.call_remote', 'rdma.get_link_choices')
except CallError as e:
if e.errno != CallError.ENOMETHOD:
raise
self.logger.warning('Cannot hardwire remote node')
return []
# First check to see if any interfaces that were previously configured
# for this shelf are no longer applicable (they might have been moved to
# a different port on the JBOF).
connected_shelf_ips = set()
dirty = False
configured_interfaces = await self.middleware.call('rdma.interface.query')
configured_interface_names = [interface['ifname'] for interface in configured_interfaces
if interface['node'] == node]
for interface in configured_interfaces:
if node and node != interface['node']:
continue
host_ip = interface['address']
shelf_ip = jbof_static_ip_from_initiator_ip(host_ip)
value = decode_static_ip(host_ip)
if value and value[0] == shelf_index:
# This is supposed to be connected to our shelf. Check connectivity.
if await self.middleware.call('rdma.interface.ping', node, interface['ifname'],
shelf_ip, shelf_ip_to_mac[shelf_ip]):
# This config looks good, keep it.
connected_shelf_ips.add(shelf_ip)
if node:
self.logger.info(f'Validated existing link on node {node}: {host_ip} -> {shelf_ip}')
else:
self.logger.info(f'Validated existing link: {host_ip} -> {shelf_ip}')
else:
self.logger.info('Removing RDMA interface that cannot connect to JBOF')
await self.middleware.call('rdma.interface.delete', interface['id'])
dirty = True
for shelf_ip in shelf_ip_to_mac:
if shelf_ip in connected_shelf_ips or shelf_ip in skip_ips:
continue
# Try each remaining interface
if dirty:
configured_interfaces = await self.middleware.call('rdma.interface.query')
configured_interface_names = [interface['ifname'] for interface in configured_interfaces
if interface['node'] == node]
dirty = False
for link in links:
ifname = link['rdma']
if ifname not in configured_interface_names:
host_ip = initiator_ip_from_jbof_static_ip(shelf_ip)
payload = {
'ifname': ifname,
'address': host_ip,
'prefixlen': static_ip_netmask_int(),
'mtu': static_mtu(),
'check': {'ping_ip': shelf_ip,
'ping_mac': shelf_ip_to_mac[shelf_ip]}
}
if node:
payload['node'] = node
if await self.middleware.call('rdma.interface.create', payload):
dirty = True
connected_shelf_ips.add(shelf_ip)
# break out of the ifname loop
if node:
self.logger.info(f'Created link on node {node}: {host_ip} -> {shelf_ip}')
else:
self.logger.info(f'Created link: {host_ip} -> {shelf_ip}')
break
return list(connected_shelf_ips)
@private
async def attach_drives(self, schema, verrors):
"""Attach drives from all configured JBOF expansion shelves."""
if await self.middleware.call('failover.licensed'):
# HA system
if not await self.middleware.call('failover.remote_connected'):
verrors.add(schema, 'Unable to contact remote controller')
return
this_node = await self.middleware.call('failover.node')
if this_node == 'MANUAL':
verrors.add(schema, 'Unable to determine this controllers position in chassis')
return
await asyncio.gather(*[self.attach_drives_to_node(node) for node in ('A', 'B')])
else:
await self.attach_drives_to_node('')
@private
async def attach_drives_to_node(self, node):
localnode = not node or node == await self.middleware.call('failover.node')
configured_interfaces = await self.middleware.call('rdma.interface.query')
if localnode:
for interface in configured_interfaces:
if interface['node'] != node:
continue
jbof_ip = jbof_static_ip_from_initiator_ip(interface['address'])
await self.middleware.call('jbof.nvme_connect', jbof_ip)
else:
for interface in configured_interfaces:
if interface['node'] != node:
continue
jbof_ip = jbof_static_ip_from_initiator_ip(interface['address'])
try:
await self.middleware.call('failover.call_remote', 'jbof.nvme_connect', [jbof_ip])
except CallError as e:
if e.errno != CallError.ENOMETHOD:
raise
@private
def nvme_connect(self, ip, nr_io_queues=16):
command = ['nvme', 'connect-all', '-t', 'rdma', '-a', ip, '--persistent', '-i', f'{nr_io_queues}']
ret = subprocess.run(command, capture_output=True)
if ret.returncode:
error = ret.stderr.decode() if ret.stderr else ret.stdout.decode()
if not error:
error = 'No error message reported'
self.logger.debug('Failed to execute command: %r with error: %r', " ".join(command), error)
raise CallError(f'Failed connect NVMe disks: {error}')
return True
@private
def nvme_disconnect(self, ips):
"""Iterate through all nvme devices that have a transport protocol
of RDMA and disconnect from this host"""
nqns = []
for nvme, info in get_sys_class_nvme().items():
if info['transport_protocol'] == 'rdma' and any(ip == info['transport_address'] for ip in ips):
nqns.append(info['subsysnqn'])
len_nqns = len(nqns)
if len_nqns > 0:
self.logger.debug('Disconnecting %r NQNs', len_nqns)
command = ['nvme', 'disconnect', '-n', ','.join(nqns)]
ret = subprocess.run(command, capture_output=True)
if ret.returncode:
error = ret.stderr.decode() if ret.stderr else ret.stdout.decode()
if not error:
error = 'No error message reported'
raise CallError(f'Failed disconnect NVMe disks: {error}')
@private
async def shelf_interface_count(self, mgmt_ip):
try:
return len(await self.middleware.call('jbof.fabric_interface_choices', mgmt_ip))
except Exception:
# Really only expect 4, but we'll over-estimate for now, as we check them anyway
return 6
@private
async def unwire_host(self, mgmt_ip, shelf_index):
"""Unware the dataplane interfaces of the specified JBOF."""
possible_host_ips = []
possible_shelf_ips = []
shelf_interface_count = await self.shelf_interface_count(mgmt_ip)
# If shelf_interface_count is e.g. 4 then we want to iterate over [0,1,2,3]
for eth_index in range(shelf_interface_count):
possible_host_ips.append(initiator_static_ip(shelf_index, eth_index))
possible_shelf_ips.append(jbof_static_ip(shelf_index, eth_index))
# Disconnect NVMe disks
if await self.middleware.call('failover.licensed'):
# HA system
try:
await asyncio.gather(
self.middleware.call('jbof.nvme_disconnect', possible_shelf_ips),
self.middleware.call('failover.call_remote', 'jbof.nvme_disconnect', [possible_shelf_ips])
)
except CallError as e:
if e.errno != CallError.ENOMETHOD:
raise
# If other controller is not updated to include this method then nothing to tear down
else:
await self.middleware.call('jbof.nvme_disconnect', possible_shelf_ips)
# Disconnect interfaces
for interface in await self.middleware.call('rdma.interface.query', [['address', 'in', possible_host_ips]]):
await self.middleware.call('rdma.interface.delete', interface['id'])
@private
async def unwire_dataplane(self, mgmt_ip, shelf_index):
"""Unware the dataplane interfaces of the specified JBOF."""
await self.middleware.call('jbof.unwire_host', mgmt_ip, shelf_index)
await self.middleware.call('jbof.unwire_shelf', mgmt_ip)
@private
async def configure(self):
interfaces = await self.middleware.call('rdma.interface.configure')
for interface in interfaces:
jbof_ip = jbof_static_ip_from_initiator_ip(interface['address'])
await self.middleware.call('jbof.nvme_connect', jbof_ip)
@private
async def configure_jbof(self, node, shelf_index):
"""Bring up a particular previously-configured JBOF on this node."""
possible_host_ips = []
jbof_ips = []
# Seeing as we're just going to be using possible_host_ips to filter a DB query,
# don't bother checking with the JBOF for its shelf_interface_count ... just
# assume a ridiculously high number (12) instead, so we interate over [0,1,11]
shelf_interface_count = 12
for eth_index in range(shelf_interface_count):
possible_host_ips.append(initiator_static_ip(shelf_index, eth_index))
# First bring up the interfaces on this host
interfaces = await self.middleware.call('rdma.interface.query', [['address', 'in', possible_host_ips],
['node', '=', node]])
for interface in interfaces:
await self.middleware.call('rdma.interface.local_configure_interface',
interface['ifname'],
interface['address'],
interface['prefixlen'],
interface['mtu'])
jbof_ips.append(jbof_static_ip_from_initiator_ip(interface['address']))
# Next do the NVMe connect
# Include some retry code, but expect it won't get used.
retries = 5
while retries:
retries -= 1
# Note that we iterate over a COPY of jbof_ips so that we can remove items
for jbof_ip in jbof_ips[:]:
try:
await self.middleware.call('jbof.nvme_connect', jbof_ip)
jbof_ips.remove(jbof_ip)
self.logger.debug(f'Connected NVMe/RoCE: {jbof_ip}')
except CallError:
if retries:
self.logger.info(f'Failed to connect to {jbof_ip}, will retry')
await asyncio.sleep(1)
else:
raise
if not jbof_ips:
return
@private
@job(lock='configure_job')
async def configure_job(self, job, reload_fenced=False):
"""Bring up any previously configured JBOF NVMe/RoCE configuration.
Each JBOF will be brought up in parallel.
Result will be a dict with keys 'failed' (boolean) and 'message' (str).
"""
job.set_progress(0, 'Configure RDMA interfaces')
failed = False
if await self.middleware.call('failover.licensed'):
node = await self.middleware.call('failover.node')
else:
node = ''
jbofs = await self.middleware.call('jbof.query')
if not jbofs:
err = 'No JBOFs need to be configured'
job.set_progress(100, err)
return {'failed': failed, 'message': err}
# Bring up the JBOFs in parallel.
exceptions = await asyncio.gather(
*[self.configure_jbof(node, jbof['index']) for jbof in jbofs],
return_exceptions=True
)
failures = []
for exc in exceptions:
if isinstance(exc, Exception):
failures.append(str(exc))
# Report progress so far.
if reload_fenced:
percent_available = 90
else:
percent_available = 100
if failures:
# We know all_count is > 0 because of the return above.
all_count = len(jbofs)
fail_count = len(failures)
percent = (percent_available * (all_count - fail_count)) // all_count
err = f'Failure connecting {fail_count} JBOFs: {", ".join(failures)}'
self.logger.error(err)
job.set_progress(percent, err)
failed = True
else:
percent = percent_available
err = 'Completed boot-time bring up of NVMe/RoCE'
job.set_progress(percent, err)
# Reload fenced if requested
if reload_fenced and (await self.middleware.call('failover.fenced.run_info'))['running']:
try:
await self.middleware.call('failover.fenced.signal', {'reload': True})
self.logger.debug('Reloaded fenced')
job.set_progress(percent + 10, err + ', reloaded fenced')
except Exception:
self.logger.error('Unhandled exception reloading fenced', exc_info=True)
job.set_progress(percent, err + ', failed to reload fenced')
failed = True
else:
job.set_progress(percent + 10, err)
# This gets returned as the job.result
return {'failed': failed, 'message': err}
async def _clear_reboot_alerts(middleware, event_type, args):
await middleware.call('alert.oneshot_delete', 'JBOFTearDownFailure', None)
async def setup(middleware):
RedfishClient.setup()
# Deliberately do NOT handle the case where the system is already
# ready, as we only want the following to occur after a boot, not
# on a middlwared restart.
middleware.event_subscribe("system.ready", _clear_reboot_alerts)
| 42,614 | Python | .py | 838 | 37.597852 | 120 | 0.577597 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,045 | functions.py | truenas_middleware/src/middlewared/middlewared/plugins/jbof/functions.py | import ipaddress
from pathlib import Path
from middlewared.utils.disks import RE_IS_PART
def jbof_static_ip(shelf_index, eth_index):
return f'169.254.{20 + shelf_index}.{(eth_index <<2)+1}'
def initiator_static_ip(shelf_index, eth_index):
return f'169.254.{20 + shelf_index}.{(eth_index <<2)+2}'
def static_ip_netmask_int():
return 30
def static_ip_netmask_str(ip='169.254.20.0'):
return str(ipaddress.IPv4Network(f'{ip}/{static_ip_netmask_int()}', strict=False).netmask)
def static_mtu():
return 5000
def decode_static_ip(ip):
"""Decode a static IP.
Returns a tuple of (shelf_index, eth_index) or None
"""
try:
ipaddress.ip_address(ip)
if ip.startswith('169.254.'):
vals = [int(x) for x in ip.split('.')]
if vals[2] < 20:
return
shelf_index = vals[2] - 20
eth_index = vals[3] >> 2
return (shelf_index, eth_index)
except ValueError:
pass
def jbof_static_ip_from_initiator_ip(ip):
result = decode_static_ip(ip)
if result:
return jbof_static_ip(*result)
def initiator_ip_from_jbof_static_ip(ip):
result = decode_static_ip(ip)
if result:
return initiator_static_ip(*result)
def get_sys_class_nvme():
data = dict()
for i in filter(lambda x: x.is_dir(), Path('/sys/class/nvme').iterdir()):
data[i.name] = {
'model': (i / 'model').read_text().strip(),
'serial': (i / 'serial').read_text().strip(),
'subsysnqn': (i / 'subsysnqn').read_text().strip(),
'transport_address': (i / 'address').read_text().strip(),
'transport_protocol': (i / 'transport').read_text().strip(),
'state': (i / 'state').read_text().strip(),
}
if data[i.name]['transport_protocol'] == 'rdma':
data[i.name]['hostnqn'] = (i / 'hostnqn').read_text().strip()
data[i.name]['transport_address'] = data[i.name]['transport_address'].split('=')[1].split(',')[0].strip()
namespaces, partitions = list(), list()
for j in filter(lambda x: x.is_dir() and x.name.startswith(f'{i.name}n'), i.iterdir()):
# nvme1n1/n2/n3 etc
namespaces.append(j.name)
for k in filter(lambda x: RE_IS_PART.search(x.name), j.iterdir()):
# nvme1n1p1/p2/p3 etc
partitions.append(k.name)
data[i.name]['namespaces'] = namespaces
data[i.name]['partitions'] = partitions
return data
| 2,534 | Python | .py | 60 | 34.116667 | 117 | 0.589555 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,046 | client.py | truenas_middleware/src/middlewared/middlewared/plugins/jbof/redfish/client.py | import asyncio
import enum
import errno
import json
import logging
import socket
from urllib.parse import urlencode
import aiohttp
import requests
from middlewared.service import CallError
from middlewared.utils import MIDDLEWARE_RUN_DIR, filter_list
from truenas_api_client import Client
from urllib3.exceptions import InsecureRequestWarning
DEFAULT_REDFISH_TIMEOUT_SECS = 10
HEADER = {'Content-Type': 'application/json', 'Vary': 'accept'}
REDFISH_ROOT_PATH = '/redfish/v1'
ODATA_ID = '@odata.id'
LOGGER = logging.getLogger(__name__)
REDFISH_SESSIONS = '/redfish/v1/SessionService/Sessions'
class InvalidCredentialsError(Exception):
pass
class AuthMethod(enum.Enum):
BASIC = 'basic'
SESSION = 'session'
def choices():
return [x.value for x in AuthMethod]
def authtype_to_enum(authtype):
if authtype in (AuthMethod.BASIC, AuthMethod.BASIC.value):
return AuthMethod.BASIC
elif authtype in (AuthMethod.SESSION, AuthMethod.SESSION.value):
return AuthMethod.SESSION
raise ValueError('Invalid auth method', authtype)
class AbstractRedfishClient:
@property
def uuid(self):
return self.root['UUID']
@property
def product(self):
return self.root['Product']
def _members(self, data):
result = {}
for manager in data['Members']:
uri = manager[ODATA_ID]
result[uri.split('/')[-1]] = uri
return result
class RedfishClient(AbstractRedfishClient):
client_cache = {}
def __init__(
self,
base_url,
username=None,
password=None,
authtype=AuthMethod.BASIC,
default_prefix=REDFISH_ROOT_PATH,
verify=False,
timeout=DEFAULT_REDFISH_TIMEOUT_SECS
):
self.log_requests = False
self.base_url = base_url.rstrip('/')
self.username = username
self.password = password
self.authtype = AuthMethod.authtype_to_enum(authtype)
self.prefix = default_prefix
self.verify = verify
self.auth = None
self.auth_token = None
self.session_key = None
self.authorization_key = None
self.session_location = None
self.timeout = timeout
self.cache = {}
self.root = self.get_root_object()
try:
self.login_url = self.root['Links']['Sessions']['@odata.id']
except KeyError:
self.login_url = REDFISH_SESSIONS
if username and password:
self.login()
@classmethod
def setup(cls):
# Silence InsecureRequestWarning: Unverified HTTPS request is being made to host
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
@classmethod
def ping(cls, mgmt_ip, timeout=DEFAULT_REDFISH_TIMEOUT_SECS):
r = requests.get(f'https://{mgmt_ip.rstrip("/")}{REDFISH_ROOT_PATH}', verify=False, timeout=timeout)
if r.ok:
try:
return r.json()
except requests.exceptions.JSONDecodeError:
pass
@classmethod
def is_redfish(cls, mgmt_ip, timeout=DEFAULT_REDFISH_TIMEOUT_SECS):
try:
data = cls.ping(mgmt_ip, timeout)
return data and 'RedfishVersion' in data
except requests.exceptions.Timeout:
LOGGER.debug('Timed out querying redfish host %r', mgmt_ip)
return False
@classmethod
def cache_set(cls, key, value):
cls.client_cache[key] = value
@classmethod
def cache_get(cls, mgmt_ip, jbof_query=None):
try:
return cls.client_cache[mgmt_ip]
except KeyError:
redfish, jbofs = None, list()
filters, options = [['OR', [['mgmt_ip1', '=', mgmt_ip], ['mgmt_ip2', '=', mgmt_ip]]]], dict()
if jbof_query is not None:
jbofs = jbof_query
else:
with Client(f'ws+unix://{MIDDLEWARE_RUN_DIR}/middlewared-internal.sock', py_exceptions=True) as c:
jbofs = c.call('jbof.query')
for jbof in filter_list(jbofs, filters, options):
redfish = RedfishClient(f'https://{mgmt_ip}', jbof['mgmt_username'], jbof['mgmt_password'])
RedfishClient.cache_set(mgmt_ip, redfish)
return redfish
def _cached_fetch(self, cache_key, uri, use_cached=True):
if use_cached and cache_key in self.cache:
return self.cache[cache_key]
r = self.get(uri)
if r.ok:
self.cache[cache_key] = self._members(r.json())
return self.cache[cache_key]
def chassis(self, use_cached=True):
return self._cached_fetch('chassis', '/Chassis', use_cached)
def managers(self, use_cached=True):
return self._cached_fetch('managers', '/Managers', use_cached)
def mgmt_ethernet_interfaces(self, iom, use_cached=True):
uri = f'{self.managers()[iom]}/EthernetInterfaces'
return self._cached_fetch(f'{iom}/mgmt_ethernet_interfaces', uri, use_cached)
def mgmt_ip(self):
return socket.gethostbyname(self.base_url.split('/')[-1])
def iom_eth_mgmt_ips(self, eth_uri):
result = []
# Do not want any cached value. IPs can change.
data = self.get_uri(eth_uri, False)
for ipv4_address in data.get('IPv4Addresses', []):
addr = ipv4_address.get('Address')
if addr:
result.append(addr)
return result
def iom_mgmt_ips(self, iom):
result = []
for eth_uri in self.mgmt_ethernet_interfaces(iom).values():
# Do not want any cached value. IPs can change.
result.extend(self.iom_eth_mgmt_ips(eth_uri))
return result
def mgmt_ips(self):
result = []
for iom in self.managers():
result.extend(self.iom_mgmt_ips(iom))
return result
def network_device_functions(self, iom, use_cached=True):
return self._cached_fetch(
f'{iom}/network_device_functions',
f'/Chassis/{iom}/NetworkAdapters/1/NetworkDeviceFunctions', use_cached
)
def fabric_ethernet_interfaces(self, use_cached=True):
result = []
for iom in self.managers(use_cached):
for ndfuri in self.network_device_functions(iom, use_cached).values():
result.append(f'{ndfuri}/EthernetInterfaces/1')
result.sort()
return result
def get_uri(self, uri, use_cached=True):
if use_cached and uri in self.cache:
return self.cache[uri]
r = self.get(uri)
if r.ok:
self.cache[uri] = r.json()
return self.cache[uri]
def get_root_object(self):
return self.get(self.prefix).json()
def login(self, username=None, password=None, authtype=None):
self.username = username if username else self.username
self.password = password if password else self.password
if authtype:
self.authtype = AuthMethod.authtype_to_enum(authtype)
if self.authtype == AuthMethod.BASIC:
self.get(self.login_url, auth=(self.username, self.password))
# No exception thrown ...
self.auth = (self.username, self.password)
elif self.authtype == AuthMethod.SESSION:
data = {'UserName': self.username, 'Password': self.password}
resp = self.post(self.login_url, data=data)
self.resp = resp
if not resp.ok:
raise InvalidCredentialsError('Could not authenticate credentials supplied')
self.auth_token = resp.headers.get('X-Auth-Token')
if self.auth_token:
self.session_id = resp.json()['Id']
self.session_location = resp.headers.get('Location')
else:
raise ValueError('Invalid auth supplied:', authtype)
def logout(self):
if self.authtype == AuthMethod.BASIC:
self.auth = None
elif self.authtype == AuthMethod.SESSION:
self.delete(self.session_location)
self.auth_token = self.session_id = self.session_location = None
self.username = None
self.password = None
def get(self, url, **kwargs):
return self._make_request('get', url, **kwargs)
def post(self, url, **kwargs):
return self._make_request('post', url, **kwargs)
def put(self, url, **kwargs):
return self._make_request('put', url, **kwargs)
def delete(self, url, **kwargs):
return self._make_request('delete', url, **kwargs)
def _make_request(self, method, url, **kwargs):
"""
Function is responsible for sending the API request.
`method`: String representing what type of http request to make
(i.e. get, put, post, delete)
`url`: String representing the api endpoint to send the https
request. Can provide the endpoint by itself
(i.e. /Chassis/IOM1/NetworkAdapters) and the correct
prefix will be added or you can provide the full url to the
endpoint (i.e. http://ip-here/redfish/v1/endpoint-here)
`kwargs['data']`: Dict representing the "payload" to send along
with the http request.
"""
if method == 'get':
req = requests.get
elif method == 'post':
req = requests.post
elif method == 'put':
req = requests.put
elif method == 'delete':
req = requests.delete
else:
raise ValueError(f'Invalid request type: {method}')
if not url.startswith('https://'):
if url.startswith(self.prefix):
url = f'{self.base_url}{url}'
else:
url = f'{self.base_url}{self.prefix}{url}'
if 'auth' in kwargs:
auth = kwargs['auth']
else:
auth = self.auth
timeout = kwargs.get('timeout', self.timeout)
payload = kwargs.get('data', {})
headers = kwargs.get('headers', {})
if self.log_requests:
LOGGER.debug('%r %r %r', method.upper(), url, payload)
if payload:
if isinstance(payload, dict) or isinstance(payload, list):
if headers.get('Content-Type', None) == 'multipart/form-data':
# See python-redfish-library on how to handle if ever necessary
raise ValueError('Currently do not support this content-type')
else:
headers['Content-Type'] = 'application/json'
payload = json.dumps(payload)
elif isinstance(payload, bytes):
headers['Content-Type'] = 'application/octet-stream'
payload = payload
else:
headers['Content-Type'] = 'application/x-www-form-urlencoded'
payload = urlencode(payload)
if self.authtype == AuthMethod.BASIC:
r = req(url, auth=auth, verify=self.verify, headers=headers, data=payload, timeout=timeout)
else:
if self.auth_token:
headers.update({'X-Auth-Token': self.auth_token})
r = req(url, verify=self.verify, headers=headers, data=payload, timeout=timeout)
if r.status_code == 401:
raise InvalidCredentialsError('HTTP 401 Unauthorized returned: Invalid credentials supplied')
return r
def configure_fabric_interface(
self,
uri,
address,
subnet_mask,
dhcp_enabled=False,
gateway='0.0.0.0',
mtusize=5000,
enabled=True
):
return self.post(uri, data={
'DHCPv4': {'DHCPEnabled': dhcp_enabled},
'IPv4StaticAddresses': [{'Address': address, 'Gateway': gateway, 'SubnetMask': subnet_mask}],
'MTUSize': mtusize,
'InterfaceEnabled': enabled,
})
def link_status(self, uri):
r = self.get(uri)
if r.ok:
return r.json().get('LinkStatus')
class AsyncRedfishClient(AbstractRedfishClient):
"""
Asynchronous Redfish client which supports multipath.
Various instantiation mechanisms are available, including `cache_get` where
objects will be cached for re-use.
"""
# Cache where objects will be stored, keyed by JBOF UUID.
client_cache = {}
def __init__(
self,
base_urls,
username=None,
password=None,
authtype=AuthMethod.BASIC,
default_prefix=REDFISH_ROOT_PATH,
timeout=DEFAULT_REDFISH_TIMEOUT_SECS
):
self.log_requests = False
self.base_urls = [base_url.rstrip('/') for base_url in base_urls]
self.username = username
self.password = password
self.authtype = AuthMethod.authtype_to_enum(authtype)
self.prefix = default_prefix
self.auth = None
self.auth_token = None
self.session_key = None
self.authorization_key = None
self.session_location = {}
self.timeout = aiohttp.ClientTimeout(total=timeout)
self.cache = {}
self.root = None
self._sessions = {}
# Implement a little value cache
self._attributes = {}
self.logged = set()
def _add_session(self, base_url, session):
"""Add a session corresponding to the base_url"""
self._sessions[base_url] = session
async def _del_session(self, base_url, session):
"""This is called when a session is no longer responding"""
if session:
# Don't attempt to logout. The session is kaput
await session.close()
if base_url in self._sessions:
del self._sessions[base_url]
def sessions(self):
"""Generator to yield the (base_url, session), good ones first"""
good = set(self._sessions.keys())
bad = set(self.base_urls) - good
for base_url in good:
yield base_url, self._sessions[base_url]
for base_url in bad:
yield base_url, None
def get_attribute(self, name, defval=None):
"""Retrieve some data associated with the client."""
return self._attributes.get(name, defval)
def set_attribute(self, name, value):
"""Associate some data with the client."""
self._attributes[name] = value
@classmethod
async def create(cls,
base_urls,
username=None,
password=None,
authtype=AuthMethod.BASIC,
default_prefix=REDFISH_ROOT_PATH,
timeout=DEFAULT_REDFISH_TIMEOUT_SECS):
"""Async factory to create an AsyncRedfishClient object."""
self = cls(base_urls, username, password, authtype, default_prefix, timeout)
self.root = await self.get_root_object()
try:
self.login_url = self.root['Links']['Sessions']['@odata.id']
except KeyError:
self.login_url = REDFISH_SESSIONS
return self
@classmethod
def cache_set(cls, key, value):
cls.client_cache[key] = value
@classmethod
def cache_unset(cls, key):
del cls.client_cache[key]
@classmethod
async def cache_get(cls, uuid, jbof_query=None):
"""Fetch AsyncRedfishClient object from cache, creating if necessary."""
try:
return cls.client_cache[uuid]
except KeyError:
redfish, jbofs = None, list()
filters, options = [['uuid', '=', uuid]], dict()
if jbof_query is not None:
jbofs = jbof_query
else:
with Client(f'ws+unix://{MIDDLEWARE_RUN_DIR}/middlewared-internal.sock', py_exceptions=True) as c:
jbofs = c.call('jbof.query', filters)
for jbof in filter_list(jbofs, filters, options):
base_urls = []
for key in ['mgmt_ip1', 'mgmt_ip2']:
if mgmt_ip := jbof.get(key):
base_urls.append(f'https://{mgmt_ip}')
redfish = await cls.create(base_urls, jbof['mgmt_username'], jbof['mgmt_password'])
cls.cache_set(uuid, redfish)
return redfish
async def _login(self, base_url, username=None, password=None, authtype=None):
"""Login using the specified path and credentials."""
self.username = username if username else self.username
self.password = password if password else self.password
if authtype:
self.authtype = AuthMethod.authtype_to_enum(authtype)
async with aiohttp.ClientSession(base_url, timeout=self.timeout) as session:
if self.authtype == AuthMethod.BASIC:
auth = aiohttp.BasicAuth(self.username, self.password)
async with session.get(self.login_url, ssl=False, auth=auth) as response:
if not response.ok:
raise InvalidCredentialsError('Could not authenticate credentials supplied')
newsession = aiohttp.ClientSession(base_url, timeout=self.timeout, raise_for_status=True, auth=auth)
elif self.authtype == AuthMethod.SESSION:
data = {'UserName': self.username, 'Password': self.password}
async with session.post(self.login_url, ssl=False, json=data) as response:
if not response.ok:
raise InvalidCredentialsError('Could not authenticate credentials supplied')
auth_token = response.headers.get('X-Auth-Token')
if auth_token:
# Save the Location for logout purposes
self.session_location[base_url] = response.headers.get('Location')
newsession = aiohttp.ClientSession(base_url, timeout=self.timeout, raise_for_status=True, headers={'X-Auth-Token': auth_token})
else:
raise ValueError('Invalid auth supplied:', authtype)
# Save the session for reuse
self._add_session(base_url, newsession)
# Clear any silenced exception logging
try:
self.logged.remove(base_url)
except KeyError:
pass
return newsession
async def _logout(self, base_url, session):
if self.authtype == AuthMethod.BASIC:
self.auth = None
elif self.authtype == AuthMethod.SESSION:
if location := self.session_location.get(base_url):
async with session.delete(location, ssl=False) as response:
if response.ok:
try:
del self.session_location[base_url]
except KeyError:
# Should not occur, but protect in case parallel calls
pass
async def get_root_object(self):
"""Fetch the root object."""
# We're not going to try to be clever about which base_urls are used for this.
base_url_count = len(self.base_urls)
for index, base_url in enumerate(self.base_urls, 1):
try:
async with aiohttp.ClientSession(base_url, timeout=self.timeout, raise_for_status=True) as session:
async with session.get(self.prefix, ssl=False) as response:
return await response.json()
except asyncio.TimeoutError:
if index == base_url_count:
raise CallError('Connection timed out', errno.ETIMEDOUT)
else:
continue
except Exception:
continue
raise CallError('Failed to obtain root object', errno.EBADMSG)
async def get(self, uri):
if not uri.startswith(self.prefix):
uri = f'{self.prefix}{uri}'
# Iterate over the available paths
for base_url, session in self.sessions():
try:
if not session:
session = await self._login(base_url)
async with session.get(uri, ssl=False) as response:
if response.ok:
return await response.json()
except asyncio.TimeoutError:
LOGGER.debug('Timed out GET %r: %r', base_url, uri)
await self._del_session(base_url, session)
continue
except Exception:
if base_url not in self.logged:
LOGGER.debug('Failed GET %r: %r', base_url, uri, exc_info=True)
self.logged.add(base_url)
await self._del_session(base_url, session)
continue
raise CallError(f'Failed to GET {uri}:', errno.EBADMSG)
async def post(self, uri, **kwargs):
if not uri.startswith(self.prefix):
uri = f'{self.prefix}{uri}'
payload = kwargs.get('data', {})
# Iterate over the available paths
for base_url, session in self.sessions():
try:
if not session:
session = await self._login(base_url)
async with session.post(uri, ssl=False, json=payload) as response:
if response.ok:
return await response.json()
except asyncio.TimeoutError:
LOGGER.debug('Timed out POST %r: %r', base_url, uri)
await self._del_session(base_url, session)
continue
except Exception:
if base_url not in self.logged:
LOGGER.debug('Failed POST %r: %r', base_url, uri, exc_info=True)
self.logged.add(base_url)
await self._del_session(base_url, session)
continue
raise CallError(f'Failed to POST {uri}:', errno.EBADMSG)
async def close(self):
for base_url, session in self._sessions.items():
await self._logout(base_url, session)
await session.close()
self._sessions = {}
async def _cached_fetch(self, cache_key, uri, use_cached=True):
if use_cached and cache_key in self.cache:
return self.cache[cache_key]
r = await self.get(uri)
if r:
self.cache[cache_key] = self._members(r)
return self.cache[cache_key]
async def chassis(self, use_cached=True):
return await self._cached_fetch('chassis', '/Chassis', use_cached)
async def managers(self, use_cached=True):
return await self._cached_fetch('managers', '/Managers', use_cached)
| 22,674 | Python | .py | 516 | 32.813953 | 143 | 0.593747 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,047 | target.py | truenas_middleware/src/middlewared/middlewared/plugins/jbof/redfish/target.py | import requests
import urllib3
from middlewared.service import Service
DEFAULT_REDFISH_TIMEOUT_SECS = 10
HEADER = {'Content-Type': 'application/json', 'Vary': 'accept'}
REDFISH_ROOT_PATH = '/redfish/v1'
class JBOFRedfishService(Service):
class Config:
namespace = 'jbof.redfish'
private = True
def ping(self, mgmt_ip, timeout=DEFAULT_REDFISH_TIMEOUT_SECS):
base_url = f'https://{mgmt_ip}/{REDFISH_ROOT_PATH}'
r = requests.get(base_url, verify=False, timeout=timeout)
try:
return r.json()
except requests.exceptions.JSONDecodeError:
return None
def is_redfish(self, mgmt_ip, timeout=DEFAULT_REDFISH_TIMEOUT_SECS):
try:
data = self.ping(mgmt_ip, timeout)
if data:
return "RedfishVersion" in data
except requests.exceptions.Timeout:
self.logger.debug('Failed to query redfish host %s', mgmt_ip)
return False
def _members(self, mgmt_ip, mgmt_username, mgmt_password, path, timeout=DEFAULT_REDFISH_TIMEOUT_SECS):
"""Return a list containing the names of the IO Managers."""
result = []
r = self.make_request(mgmt_ip, 'get', path,
username=mgmt_username, password=mgmt_password, timeout=timeout)
for member in r.get('Members', []):
if member['@odata.id'].startswith(path):
result.append(member['@odata.id'].split('/')[-1])
return result
def managers(self, mgmt_ip, mgmt_username, mgmt_password, timeout=DEFAULT_REDFISH_TIMEOUT_SECS):
"""Return a list containing the names of the IO Managers."""
return self._members(mgmt_ip, mgmt_username, mgmt_password, f'{REDFISH_ROOT_PATH}/Managers', timeout)
def ethernet_interfaces(self, mgmt_ip, mgmt_username, mgmt_password, manager, timeout=DEFAULT_REDFISH_TIMEOUT_SECS):
"""Return a list containing the names of the EthernetInterfaces in an IO Managers."""
return self._members(mgmt_ip, mgmt_username, mgmt_password,
f'{REDFISH_ROOT_PATH}/Managers/{manager}/EthernetInterfaces', timeout)
def ethernet_interface(self, mgmt_ip, mgmt_username, mgmt_password, manager, interface,
timeout=DEFAULT_REDFISH_TIMEOUT_SECS):
"""Return a list containing the names of the EthernetInterfaces in an IO Managers."""
return self.make_request(mgmt_ip, 'get',
f'{REDFISH_ROOT_PATH}/Managers/{manager}/EthernetInterfaces/{interface}',
username=mgmt_username,
password=mgmt_password,
timeout=timeout)
def make_request(self, mgmt_ip, _type, url, **kwargs):
"""
Function is responsible for sending the API request.
`_type`: String representing what type of http request to make
(i.e. get, put, post, delete)
`url`: String representing the api endpoint to send the https
request. Can provide the endpoint by itself
(i.e. /Chassis/IOM1/NetworkAdapters) and the correct
prefix will be added or you can provide the full url to the
endpoint (i.e. http://ip-here/redfish/v1/endpoint-here)
`kwargs['data']`: Dict representing the "payload" to send along
with the http request.
"""
if _type == 'get':
req = requests.get
elif _type == 'post':
req = requests.post
elif _type == 'put':
req = requests.put
elif _type == 'delete':
req = requests.delete
else:
raise ValueError(f'Invalid request type: {_type}')
if not url.startswith('https://'):
if url.startswith(REDFISH_ROOT_PATH):
url = f'https://{mgmt_ip}{url}'
else:
url = f'https://{mgmt_ip}{REDFISH_ROOT_PATH}{url}'
auth = kwargs.get('auth', None)
if not auth:
auth = (kwargs.get('username', 'Admin'), kwargs.get('password', ''))
return req(url, auth=auth, verify=False, data=kwargs.get('data', {})).json()
async def setup(middleware):
# Silence InsecureRequestWarning: Unverified HTTPS request is being made to host
requests.packages.urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
| 4,471 | Python | .py | 84 | 41.392857 | 120 | 0.61232 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,048 | utils_snmp_user.py | truenas_middleware/src/middlewared/middlewared/plugins/snmp_/utils_snmp_user.py | import os
import subprocess
from cryptography.fernet import Fernet
from logging import getLogger
from middlewared.service import CallError
from middlewared.utils.crypto import generate_string
LOGGER = getLogger(__name__)
class SNMPSystem():
# This is an snmpd auto-generated file. We use it to create the SNMPv3 users.
PRIV_CONF = '/var/lib/snmp/snmpd.conf'
# SNMP System User authentication
PRIV_KEY = None
# SNMP System User info
SYSTEM_USER = {
'name': 'snmpSystemUser', 'auth_type': 'SHA', 'key': None, 'size': 0
}
def _get_authuser_secret():
"""
Get the auth user saved secret.
Internal helper function for use by this module.
Return decoded string.
"""
secret = ""
if not SNMPSystem.SYSTEM_USER['key']:
# No system user key registered
LOGGER.debug("No system user key is registered")
return secret
if SNMPSystem.PRIV_KEY:
secret = Fernet(SNMPSystem.SYSTEM_USER['key']).decrypt(SNMPSystem.PRIV_KEY).decode()
return secret
def _set_authuser_secret(secret):
"""
Save the auth user secret.
Internal helper function for use by this module.
INPUT: ascii string (not encoded)
"""
SNMPSystem.PRIV_KEY = Fernet(SNMPSystem.SYSTEM_USER['key']).encrypt(secret.encode()) # noqa: (F841, assigned but not used)
return
def _add_system_user():
"""
Add the v3 system user.
For internal use by this module.
NOTES: SNMP must be stopped before calling.
The private config file is assumed to be in a regenerated state with no v3 users
"""
SNMPSystem.SYSTEM_USER['key'] = Fernet.generate_key()
auth_pwd = generate_string(32)
priv_config = {
'v3_username': SNMPSystem.SYSTEM_USER['name'],
'v3_authtype': SNMPSystem.SYSTEM_USER['auth_type'],
'v3_password': f"{auth_pwd}"
}
add_snmp_user(priv_config)
_set_authuser_secret(auth_pwd)
def add_snmp_user(snmp):
"""
Build the createUser message and add it to the private config file.
NOTE: The SNMP daemon should be stopped before calling this routine and
the new user will be available after starting SNMP.
"""
# The private config file must exist, i.e. SNMP must have been started at least once
if not os.path.exists(SNMPSystem.PRIV_CONF):
return
# BuilSNMPSystem. 'createUser' message
create_v3_user = f"createUser {snmp['v3_username']} "
user_pwd = snmp['v3_password']
create_v3_user += f'{snmp["v3_authtype"]} "{user_pwd}" '
if snmp.get('v3_privproto'):
user_phrase = snmp['v3_privpassphrase']
create_v3_user += f'{snmp["v3_privproto"]} "{user_phrase}" '
create_v3_user += '\n'
# Example: createUser newPrivUser MD5 "abcd1234" DES "abcd1234"
with open(SNMPSystem.PRIV_CONF, 'a') as f:
f.write(create_v3_user)
def delete_snmp_user(user):
"""
Delete the SNMPv3 user
RETURN: stdout message
NOTE: SNMP must be running for this call to succeed
"""
if pwd := _get_authuser_secret():
# snmpusm -v3 -l authPriv -u JoeUser -a MD5 -A "abcd1234" -x AES -X "A pass phrase" localhost delete JoeUser
cmd = [
'snmpusm', '-v3', '-u', f'{SNMPSystem.SYSTEM_USER["name"]}',
'-l', 'authNoPriv', '-a', f'{SNMPSystem.SYSTEM_USER["auth_type"]}', '-A', f'{pwd}',
'localhost', 'delete', user
]
# This call will timeout if SNMP is not running
subprocess.run(cmd, capture_output=True)
else:
raise CallError
def get_users_cmd():
cmd = []
if pwd := _get_authuser_secret():
# snmpwalk -v3 -u ixAuthUser -l authNoPriv -a MD5 -A "abcd1234" localhost iso.3.6.1.6.3.15.1.2.2.1.3
cmd = ['snmpwalk', '-v3', '-u', f'{SNMPSystem.SYSTEM_USER["name"]}',
'-l', 'authNoPriv', '-a', f'{SNMPSystem.SYSTEM_USER["auth_type"]}', '-A', f'{pwd}',
'localhost', 'iso.3.6.1.6.3.15.1.2.2.1.3']
else:
LOGGER.debug("Unable to get authuser secret")
return cmd
| 4,059 | Python | .py | 101 | 34.009901 | 127 | 0.64664 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,049 | m_series_bios.py | truenas_middleware/src/middlewared/middlewared/plugins/hardware/m_series_bios.py | import datetime
from middlewared.service import Service
from middlewared.utils.functools_ import cache
class MseriesBiosService(Service):
class Config:
private = True
namespace = 'mseries.bios'
@cache
def is_old_version(self):
chassis = self.middleware.call_sync("truenas.get_chassis_hardware")
bios_dates = {
"TRUENAS-M40": datetime.date(2020, 2, 20),
"TRUENAS-M50": datetime.date(2020, 12, 3),
"TRUENAS-M60": datetime.date(2020, 12, 3),
}
min_bios_date = next((v for k, v in bios_dates.items() if chassis.startswith(k)), None)
if min_bios_date and (bios := self.middleware.call_sync("system.dmidecode_info")["bios-release-date"]):
return bios < min_bios_date
| 782 | Python | .py | 18 | 35.722222 | 111 | 0.649539 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,050 | mem_info.py | truenas_middleware/src/middlewared/middlewared/plugins/hardware/mem_info.py | from pathlib import Path
from middlewared.service import Service
from middlewared.schema import accepts, returns, Dict
class HardwareMemoryService(Service):
class Config:
namespace = 'hardware.memory'
cli_namespace = 'system.hardware.memory'
@accepts()
@returns(Dict('mem_ctrl', additional_attrs=True))
def error_info(self):
results = {}
mc_path = Path('/sys/devices/system/edac/mc')
if not mc_path.exists():
return results
dimm_or_rank = 'dimm'
mc_idx = 0
for mc in filter(lambda x: x.is_dir() and x.name.startswith('mc'), mc_path.iterdir()):
mc_info = {mc.name: {}}
if mc_idx == 0 and not (mc / f'{dimm_or_rank}{mc_idx}').exists():
# AMD systems use "rank" as top-level dir while Intel uses dimm
dimm_or_rank = 'rank'
# top-level memory controller information
for key, _file in (
('corrected_errors', 'ce_count'),
('uncorrected_errors', 'ue_count'),
('corrected_errors_with_no_dimm_info', 'ce_noinfo_count'),
('uncorrected_errors_with_no_dimm_info', 'ue_noinfo_count'),
):
try:
value = int((mc / _file).read_text().strip())
except (FileNotFoundError, ValueError):
value = None
mc_info[mc.name].update({key: value})
# specific dimm module memory information
for dimm in filter(lambda x: x.is_dir() and x.name.startswith(dimm_or_rank), mc.iterdir()):
# looks like /sys/devices/edac/mc0/dimm(or rank){0/1/2}
mc_info[mc.name][dimm.name] = {}
for key, _file in (
('corrected_errors', 'dimm_ce_count'),
('uncorrected_errors', 'dimm_ue_count'),
):
try:
value = int((dimm / _file).read_text().strip())
except (FileNotFoundError, ValueError):
value = None
mc_info[mc.name][dimm.name].update({key: value})
mc_idx += 1
results.update(mc_info)
return results
| 2,265 | Python | .py | 49 | 32.653061 | 103 | 0.529946 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,051 | m_series_nvdimm.py | truenas_middleware/src/middlewared/middlewared/plugins/hardware/m_series_nvdimm.py | import glob
import re
import subprocess
from middlewared.service import Service
class MseriesNvdimmService(Service):
class Config:
private = True
namespace = 'mseries.nvdimm'
def run_ixnvdimm(self, nvmem_dev):
out = subprocess.run(
["ixnvdimm", nvmem_dev],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
encoding="utf-8",
errors="ignore",
).stdout
specrev = subprocess.run(
['ixnvdimm', '-r', nvmem_dev, 'SPECREV'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
encoding="utf-8",
errors="ignore",
).stdout
return out, specrev
def get_running_firmware_vers_and_detect_old_bios(self, output):
result = {'running_firmware': None, 'old_bios': True}
if m := re.search(r"selected: [0-9]+ running: ([0-9]+)", output):
running_slot = int(m.group(1))
if m := re.search(rf"slot{running_slot}: ([0-9])([0-9])", output):
result['running_firmware'] = f"{m.group(1)}.{m.group(2)}"
result['old_bios'] = False
return result
def get_module_health(self, output):
if (m := re.search(r"Module Health:[^\n]+", output)):
return m.group().split("Module Health: ")[-1].strip()
def vendor_info(self, output):
mapping = {
'0x2c80_0x4e32_0x31_0x3480_0x4131_0x01': {
'vendor': '0x2c80', 'device': '0x4e32', 'rev_id': '0x31',
'subvendor': '0x3480', 'subdevice': '0x4131', 'subrev_id': '0x01',
'part_num': '18ASF2G72PF12G6V21AB',
'size': '16GB', 'clock_speed': '2666MHz',
'qualified_firmware': ['2.6'],
'recommended_firmware': '2.6',
},
'0x2c80_0x4e36_0x31_0x3480_0x4231_0x02': {
'vendor': '0x2c80', 'device': '0x4e36', 'rev_id': '0x31',
'subvendor': '0x3480', 'subdevice': '0x4231', 'subrev_id': '0x02',
'part_num': '18ASF2G72PF12G9WP1AB',
'size': '16GB', 'clock_speed': '2933MHz',
'qualified_firmware': ['2.2'],
'recommended_firmware': '2.2',
},
'0x2c80_0x4e33_0x31_0x3480_0x4231_0x01': {
'vendor': '0x2c80', 'device': '0x4e33', 'rev_id': '0x31',
'subvendor': '0x3480', 'subdevice': '0x4231', 'subrev_id': '0x01',
'part_num': '36ASS4G72PF12G9PR1AB',
'size': '32GB', 'clock_speed': '2933MHz',
'qualified_firmware': ['2.4'],
'recommended_firmware': '2.4',
},
'0xce01_0x4e38_0x33_0xc180_0x4331_0x01': {
'vendor': '0xce01', 'device': '0x4e38', 'rev_id': '0x33',
'subvendor': '0xc180', 'subdevice': '0x4331', 'subrev_id': '0x01',
'part_num': 'AGIGA8811-016ACA',
'size': '16GB', 'clock_speed': '2933MHz',
'qualified_firmware': ['0.8'],
'recommended_firmware': '0.8',
},
'0xce01_0x4e42_0x31_0xc180_0x4331_0x01': {
'vendor': '0xce01', 'device': '0x4e42', 'rev_id': '0x31',
'subvendor': '0xc180', 'subdevice': '0x4331', 'subrev_id': '0x01',
'part_num': 'AGIGA8811-016BCA',
'size': '16GB', 'clock_speed': '2933MHz',
'qualified_firmware': ['3.0'],
'recommended_firmware': '3.0',
},
'0xce01_0x4e39_0x34_0xc180_0x4331_0x01': {
'vendor': '0xce01', 'device': '0x4e39', 'rev_id': '0x34',
'subvendor': '0xc180', 'subdevice': '0x4331', 'subrev_id': '0x01',
'part_num': 'AGIGA8811-032ACA',
'size': '32GB', 'clock_speed': '2933MHz',
'qualified_firmware': ['0.8'],
'recommended_firmware': '0.8',
},
'unknown': {
'vendor': None, 'device': None, 'rev_id': None,
'subvendor': None, 'subdevice': None, 'subrev_id': None,
'part_num': None,
'size': None, 'clock_speed': None,
'qualified_firmware': [],
'recommended_firmware': None,
}
}
result = mapping['unknown']
vend_key = subvend_key = None
if (match := re.search(r'vendor: (?P<v>\w+) device: (?P<d>\w+) revision: (?P<r>\w+)', output)):
vend_key = '_'.join([f'0x{v}' for v in match.groupdict().values()])
if (match := re.search(r'subvendor: (?P<v>\w+) subdevice: (?P<d>\w+) subrevision: (?P<r>\w+)', output)):
subvend_key = '_'.join([f'0x{v}' for v in match.groupdict().values()])
if all((vend_key, subvend_key)):
result = mapping.get(f'{vend_key}_{subvend_key}', mapping['unknown'])
return result
def health_info(self, output):
result = {
'critical_health_info': {},
'nvm_health_info': {},
'nvm_error_threshold_status': {},
'nvm_warning_threshold_status': {},
'nvm_lifetime': None,
'nvm_temperature': None,
'es_lifetime': None,
'es_temperature': None,
}
if m := re.search(r'Critical Health Info: (.*)', output):
bit, vals = m.group(1).split(' ', 1)
result['critical_health_info'][bit] = [i for i in vals.lstrip('<').rstrip('>').split(',') if i]
if m := re.search(r'Module Health: (.*)', output):
bit, vals = m.group(1).split(' ', 1)
result['nvm_health_info'][bit] = [i for i in vals.lstrip('<').rstrip('>').split(',') if i]
if m := re.search(r'Error Threshold Status: (.*)', output):
bit, vals = m.group(1).split(' ', 1)
result['nvm_error_threshold_status'][bit] = [i for i in vals.lstrip('<').rstrip('>').split(',') if i]
if m := re.search(r'Warning Threshold Status: (.*)', output):
bit, vals = m.group(1).split(' ', 1)
result['nvm_warning_threshold_status'][bit] = [i for i in vals.lstrip('<').rstrip('>').split(',') if i]
if m := re.search(r'NVM Lifetime: (.*)', output):
result['nvm_lifetime'] = m.group(1).split(' ', 1)[0]
if m := re.search(r'Module Current Temperature: (.*)', output):
result['nvm_temperature'] = m.group(1).split(' ', 1)[0]
if m := re.search(r'ES Lifetime Percentage: (.*)', output):
result['es_lifetime'] = m.group(1).split(' ', 1)[0]
if m := re.search(r'ES Current Temperature: (.*)', output):
result['es_temperature'] = m.group(1).split(' ', 1)[0]
return result
def state_flags(self, nmem):
try:
with open(f'/sys/bus/nd/devices/{nmem.removeprefix("/dev/")}/nfit/flags') as f:
state_flags = f.read().strip().split()
except Exception:
state_flags = []
return state_flags
def info(self):
results = []
sys = ("TRUENAS-M40", "TRUENAS-M50", "TRUENAS-M60")
if not self.middleware.call_sync("truenas.get_chassis_hardware").startswith(sys):
return results
try:
for nmem in glob.glob("/dev/nmem*"):
output, specrev = self.run_ixnvdimm(nmem)
info = {
'index': int(nmem[len('/dev/nmem')]),
'dev': nmem.removeprefix('/dev/'),
'dev_path': nmem,
'specrev': int(specrev.strip()),
'state_flags': self.state_flags(nmem),
}
info.update(self.health_info(output))
info.update(self.vendor_info(output))
info.update(self.get_running_firmware_vers_and_detect_old_bios(output))
results.append(info)
except Exception:
self.logger.error("Unhandled exception obtaining nvdimm info", exc_info=True)
else:
return results
| 8,094 | Python | .py | 165 | 36.121212 | 115 | 0.50822 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,052 | cpu.py | truenas_middleware/src/middlewared/middlewared/plugins/hardware/cpu.py | from pathlib import Path
from middlewared.service import Service
from middlewared.service_exception import ValidationError
from middlewared.schema import accepts, returns, Dict, Str
from middlewared.utils.functools_ import cache
class HardwareCpuService(Service):
class Config:
namespace = 'hardware.cpu'
cli_namespace = 'system.hardware.cpu'
@accepts()
@returns(Dict('governor', additional_attrs=True))
@cache
def available_governors(self):
"""Return available cpu governors"""
try:
with open('/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_governors') as f:
return {i: i for i in f.read().split()}
except FileNotFoundError:
# doesn't support changing governor
return dict()
@accepts()
@returns(Str('governor'))
def current_governor(self):
"""Returns currently set cpu governor"""
try:
with open('/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor') as f:
return f.read().strip()
except FileNotFoundError:
# doesn't support changing governor
return
@accepts(Str('governor', required=True))
@returns()
def set_governor(self, governor):
"""Set the cpu governor to `governor` on all cpus"""
curr_gov = self.current_governor()
if curr_gov is None:
raise ValidationError('hardware.cpu.governor', 'Changing cpu governor is not supported')
elif curr_gov == governor:
# current governor is already set to what is being requested
return
elif governor not in self.available_governors():
raise ValidationError('hardware.cpu.governor', f'{governor} is not available')
for i in Path('/sys/devices/system/cpu').iterdir():
if i.is_dir() and i.name.startswith('cpu'):
cpug = (i / 'cpufreq/scaling_governor')
if cpug.exists():
cpug.write_text(governor)
| 2,028 | Python | .py | 47 | 34.06383 | 100 | 0.641156 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,053 | rdma.py | truenas_middleware/src/middlewared/middlewared/plugins/rdma/rdma.py | import json
import subprocess
from pathlib import Path
from .constants import RDMAprotocols
from middlewared.api import api_method
from middlewared.api.current import (
RdmaLinkConfigArgs, RdmaLinkConfigResult,
RdmaCardConfigArgs, RdmaCardConfigResult,
RdmaCapableServicesArgs, RdmaCapableServicesResult
)
from middlewared.service import Service, private
from middlewared.service_exception import CallError
from middlewared.utils.functools_ import cache
from middlewared.plugins.rdma.interface import RDMAInterfaceService # noqa (just import to start the service)
PRODUCT_NAME_PREFIX = 'Product Name: '
SERIAL_NUMBER_PREFIX = '[SN] Serial number: '
PART_NUMBER_PREFIX = '[PN] Part number: '
class RDMAService(Service):
class Config:
private = True
@private
def get_pci_vpd(self, pci_addr):
lspci_cmd = ['lspci', '-vv', '-s', pci_addr]
ret = subprocess.run(lspci_cmd, capture_output=True)
if ret.returncode:
error = ret.stderr.decode() if ret.stderr else ret.stdout.decode()
if not error:
error = 'No error message reported'
self.logger.debug('Failed to execute command: %r with error: %r', " ".join(lspci_cmd), error)
raise CallError(f'Failed to determine serial number/product: {error}')
result = {}
for line in ret.stdout.decode().split('\n'):
sline = line.strip()
if sline.startswith(PRODUCT_NAME_PREFIX):
result['product'] = sline[len(PRODUCT_NAME_PREFIX):]
elif sline.startswith(SERIAL_NUMBER_PREFIX):
result['serial'] = sline[len(SERIAL_NUMBER_PREFIX):]
elif sline.startswith(PART_NUMBER_PREFIX):
result['part'] = sline[len(PART_NUMBER_PREFIX):]
return result
@api_method(RdmaLinkConfigArgs, RdmaLinkConfigResult, private=True)
async def get_link_choices(self, all):
"""Return a list containing dictionaries with keys 'rdma' and 'netdev'.
Unless all is set to True, configured interfaces will be excluded."""
all_links = await self.middleware.call('rdma._get_link_choices')
if all:
return all_links
existing = await self.middleware.call('interface.get_configured_interfaces')
return list(filter(lambda x: x['netdev'] not in existing, all_links))
@private
@cache
def _get_link_choices(self):
"""Return a list containing dictionaries with keys 'rdma' and 'netdev'.
Since these are just the hardware present in the system, we cache the result."""
self.logger.info('Fetching RDMA link netdev choices')
link_cmd = ['rdma', '-j', 'link']
ret = subprocess.run(link_cmd, capture_output=True)
if ret.returncode:
error = ret.stderr.decode() if ret.stderr else ret.stdout.decode()
if not error:
error = 'No error message reported'
self.logger.debug('Failed to execute command: %r with error: %r', " ".join(link_cmd), error)
raise CallError(f'Failed to determine RDMA links: {error}')
result = []
for link in json.loads(ret.stdout.decode()):
if 'netdev' in link:
result.append({'rdma': link['ifname'], 'netdev': link['netdev']})
return result
@api_method(RdmaCardConfigArgs, RdmaCardConfigResult)
@cache
def get_card_choices(self):
"""Return a list containing details about each RDMA card. Dual cards
will contain two RDMA links."""
self.logger.info('Fetching RDMA card choices')
links = self.middleware.call_sync('rdma.get_link_choices')
grouper = {}
for link in links:
rdma = link["rdma"]
p = Path(f'/sys/class/infiniband/{rdma}')
if not p.is_symlink():
# Should never happen
self.logger.debug(f'Not a symlink: {p}')
continue
pci_addr = p.readlink().parent.parent.name
if ':' not in pci_addr:
# Should never happen
self.logger.debug(f'{rdma} symlink {p} does not yield a PCI address: {pci_addr}')
continue
vpd = self.middleware.call_sync('rdma.get_pci_vpd', pci_addr)
serial = vpd.get('serial')
if not serial:
# Should never happen
self.logger.debug(f'Could not find serial number for {rdma} / {pci_addr}')
continue
part_number = vpd.get('part', '')
# We'll use part_number:serial as the key, just in case we had different
# device types with the same serial number (unlikely)
key = f'{part_number}:{serial}'
if key not in grouper:
grouper[key] = {'serial': serial,
'product': vpd.get('product', ''),
'part_number': part_number,
'links': [link]}
else:
grouper[key]['links'].append(link)
# Now that we have finished processing, generate a name that can be used
# to store in the database. We will concatenate the rdma names in each
# card.
for k, v in grouper.items():
names = [link['rdma'] for link in v['links']]
v['name'] = ':'.join(sorted(names))
return list(grouper.values())
@api_method(RdmaCapableServicesArgs, RdmaCapableServicesResult)
async def capable_services(self):
result = []
is_ent = await self.middleware.call('system.is_enterprise')
if is_ent and 'MINI' not in await self.middleware.call('truenas.get_chassis_hardware'):
if await self.middleware.call('rdma.get_link_choices', True):
result.append(RDMAprotocols.NFS.value)
return result
| 5,884 | Python | .py | 120 | 38.6 | 110 | 0.618824 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,054 | constants.py | truenas_middleware/src/middlewared/middlewared/plugins/rdma/constants.py | import enum
class RDMAprotocols(enum.Enum):
NFS = 'NFS'
ISER = 'iSER'
def values():
return [a.value for a in RDMAprotocols]
| 147 | Python | .py | 6 | 19.666667 | 47 | 0.652174 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,055 | crud.py | truenas_middleware/src/middlewared/middlewared/plugins/rdma/interface/crud.py | import middlewared.sqlalchemy as sa
from middlewared.schema import (Dict, Int, IPAddr, Patch, Str,
accepts)
from middlewared.service import CallError, CRUDService
from pyroute2 import NDB
from pyroute2.ndb.transaction import CheckProcessException
class ConnectionChecker:
def __init__(self, middleware, ifname, ip, mac=None):
self.middleware = middleware
self.ifname = ifname
self.ip = ip
self.mac = mac
def commit(self):
if not self.middleware.call_sync('rdma.interface.local_ping', self.ifname, self.ip, self.mac):
raise CheckProcessException('CheckProcess failed')
class RDMAInterfaceModel(sa.Model):
__tablename__ = 'rdma_interface'
__table_args__ = (sa.UniqueConstraint('rdmaif_node', 'rdmaif_ifname'),)
id = sa.Column(sa.Integer(), primary_key=True, autoincrement=True)
rdmaif_node = sa.Column(sa.String(120), nullable=False)
rdmaif_ifname = sa.Column(sa.String(120), nullable=False)
rdmaif_address = sa.Column(sa.String(45), nullable=False)
rdmaif_prefixlen = sa.Column(sa.Integer(), nullable=False)
rdmaif_mtu = sa.Column(sa.Integer(), nullable=False)
class RDMAInterfaceService(CRUDService):
class Config:
namespace = 'rdma.interface'
private = True
cli_private = True
datastore = 'rdma.interface'
datastore_prefix = "rdmaif_"
ENTRY = Dict(
'rdma_interface_entry',
Str('id', required=True),
Str('node', default=''),
Str('ifname', required=True),
IPAddr('address', required=True),
Int('prefixlen', required=True),
Int('mtu', default=5000),
)
async def compress(self, data):
if 'check' in data:
del data['check']
@accepts(
Patch(
'rdma_interface_entry', 'rdma_interface_create',
('rm', {'name': 'id'}),
('add', Dict('check',
Str('ping_ip'),
Str('ping_mac'))),
)
)
async def do_create(self, data):
result = await self.middleware.call('rdma.interface.configure_interface',
data['node'], data['ifname'], data['address'],
data['prefixlen'], data['mtu'], data.get('check'))
if result:
await self.compress(data)
data['id'] = await self.middleware.call(
'datastore.insert', self._config.datastore, data,
{'prefix': self._config.datastore_prefix})
return await self.get_instance(data['id'])
else:
return None
@accepts(
Int('id', required=True),
Patch(
'rdma_interface_entry', 'rdma_interface_update',
('add', Dict('check',
Str('ping_ip'),
Str('ping_mac'))),
('attr', {'update': True})
)
)
async def do_update(self, id_, data):
"""
Update RDMA interface of `id`
"""
old = await self.get_instance(id_)
new = old.copy()
new.update(data)
if old['node'] != new['node'] or old['ifname'] != new['ifname']:
await self.middleware.call('rdma.interface.configure_interface', data['node'], data['ifname'], None)
result = await self.middleware.call('rdma.interface.configure_interface',
new['node'], new['ifname'], new['address'],
new['prefixlen'], new['mtu'], new.get('check'))
if result:
await self.compress(new)
await self.middleware.call(
'datastore.update', self._config.datastore, id_, new,
{'prefix': self._config.datastore_prefix})
return await self.get_instance(id_)
else:
raise CallError("Failed to update active RDMA interface configuration")
@accepts(Int('id'))
async def do_delete(self, id_):
"""
Delete a RDMA interface by ID.
"""
data = await self.get_instance(id_)
# Attempt to remove the live configuration
result = await self.middleware.call('rdma.interface.configure_interface', data['node'], data['ifname'], None)
if not result:
self.logger.warn("Failed to delete active RDMA interface configuration")
# Now delete the entry
return await self.middleware.call('datastore.delete', self._config.datastore, id_)
async def configure_interface(self, node, ifname, address, prefixlen=None, mtu=None, check=None):
if not node or node == await self.middleware.call('failover.node'):
# Local
return await self.middleware.call('rdma.interface.local_configure_interface',
ifname, address, prefixlen, mtu, check)
else:
# Remote
try:
return await self.middleware.call('failover.call_remote', 'rdma.interface.local_configure_interface',
[ifname, address, prefixlen, mtu, check])
except CallError as e:
if e.errno != CallError.ENOMETHOD:
raise
self.logger.warning('Failed to configure remote RDMA interface')
return False
def local_configure_interface(self, ifname, address, prefixlen=None, mtu=None, check=None):
"""Configure the interface."""
netdev = self.middleware.call_sync('rdma.interface.ifname_to_netdev', ifname)
if not netdev:
self.logger.error('Could not find netdev associated with %s', ifname)
return False
try:
with NDB(log='off') as ndb:
with ndb.interfaces[netdev] as dev:
with ndb.begin() as ctx:
# First we will check to see if any change is required
dirty = True
if address:
for addr in dev.ipaddr:
if address == addr['address'] and prefixlen == addr['prefixlen']:
dirty = False
if mtu and not dirty:
dirty = mtu != dev['mtu']
# Now reconfigure if necessary
if dirty:
for addr in dev.ipaddr:
ctx.push(dev.del_ip(address=addr['address'],
prefixlen=addr['prefixlen'],
family=addr['family']))
if mtu:
dev['mtu'] = mtu
if address:
ctx.push(dev.add_ip(address=address, prefixlen=prefixlen).set('state', 'up'))
else:
# not dirty
if dev['state'] != 'up':
ctx.push(dev.set('state', 'up'))
if check:
ctx.push(ConnectionChecker(self.middleware,
ifname,
check['ping_ip'],
check.get('ping_mac')))
if dirty and not address:
with NDB(log='off') as ndb:
with ndb.interfaces[netdev] as dev:
dev.set('state', 'down')
if check:
self.logger.info(f'Validated communication of {netdev} with IP {check["ping_ip"]}')
return True
except CheckProcessException:
self.logger.info(f'Failed to validate communication of {netdev} with IP {check["ping_ip"]}')
except Exception:
self.logger.error('Failed to configure RDMA interface', exc_info=True)
return False
async def ping(self, node, ifname, ip, mac=None):
if not node or node == await self.middleware.call('failover.node'):
# Local
result = await self.middleware.call('rdma.interface.local_ping', ifname, ip, mac)
else:
# Remote
try:
result = await self.middleware.call('failover.call_remote',
'rdma.interface.local_ping', [ifname, ip, mac])
except CallError as e:
if e.errno != CallError.ENOMETHOD:
raise
self.logger.warning('Failed to ping from remote RDMA interface')
return False
return result
async def ifname_to_netdev(self, ifname):
links = await self.middleware.call('rdma.get_link_choices')
for link in links:
if link['rdma'] == ifname:
return link['netdev']
async def local_ping(self, ifname, ip, mac=None):
netdev = await self.middleware.call('rdma.interface.ifname_to_netdev', ifname)
if not await self.middleware.call('core.ping_remote', {'hostname': ip,
'timeout': 1,
'count': 4,
'interface': netdev,
'interval': '0.1'}):
# Common case no logging needed
return False
if mac:
macs = await self.middleware.call('core.arp', {'ip': ip, 'interface': netdev})
if ip not in macs:
# If we can ping it, we should be able to get the MAC address
self.middleware.logger.debug('Could not obtain arp info for IP: %s', ip)
return False
if macs[ip] != mac:
# MAC address mismatch
self.middleware.logger.debug('MAC address mismatch for IP %s', ip)
return False
return True
async def internal_interfaces(self, all=False):
# We must fetch all link choices. If we did not there would
# be a circular call chain between interface and rdma
links = await self.middleware.call('rdma._get_link_choices')
ifname_to_netdev = {}
for link in links:
ifname_to_netdev[link['rdma']] = link['netdev']
if all:
# Treat all RDMA interfaces as internal
return list(ifname_to_netdev.values())
else:
# Otherwise we only treat used RDMA interfaces as internal
result = set()
interfaces = await self.query()
for interface in interfaces:
result.add(ifname_to_netdev[interface['ifname']])
return list(result)
async def configure(self):
if await self.middleware.call('failover.licensed'):
node = await self.middleware.call('failover.node')
else:
node = ''
interfaces = await self.middleware.call('rdma.interface.query', [['node', '=', node]])
for interface in interfaces:
await self.middleware.call('rdma.interface.local_configure_interface',
interface['ifname'],
interface['address'],
interface['prefixlen'],
interface['mtu'])
return interfaces
| 11,653 | Python | .py | 240 | 32.55 | 117 | 0.521132 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,056 | config.py | truenas_middleware/src/middlewared/middlewared/plugins/system_advanced/config.py | import os
import re
import tempfile
import textwrap
import warnings
from copy import deepcopy
import middlewared.sqlalchemy as sa
from middlewared.schema import accepts, Bool, Dict, Int, List, Patch, Password, returns, Str
from middlewared.service import ConfigService, private, no_auth_required, ValidationErrors
from middlewared.validators import Range
from middlewared.utils import run
class SystemAdvancedModel(sa.Model):
__tablename__ = 'system_advanced'
id = sa.Column(sa.Integer(), primary_key=True)
adv_consolemenu = sa.Column(sa.Boolean(), default=False)
adv_serialconsole = sa.Column(sa.Boolean(), default=False)
adv_serialport = sa.Column(sa.String(120), default='ttyS0')
adv_serialspeed = sa.Column(sa.String(120), default='9600')
adv_powerdaemon = sa.Column(sa.Boolean(), default=False)
adv_overprovision = sa.Column(sa.Integer(), nullable=True, default=None)
adv_traceback = sa.Column(sa.Boolean(), default=True)
adv_advancedmode = sa.Column(sa.Boolean(), default=False)
adv_autotune = sa.Column(sa.Boolean(), default=False)
adv_debugkernel = sa.Column(sa.Boolean(), default=False)
adv_uploadcrash = sa.Column(sa.Boolean(), default=True)
adv_anonstats = sa.Column(sa.Boolean(), default=True)
adv_anonstats_token = sa.Column(sa.Text())
adv_motd = sa.Column(sa.Text(), default='Welcome')
adv_login_banner = sa.Column(sa.Text(), default='')
adv_boot_scrub = sa.Column(sa.Integer(), default=7)
adv_fqdn_syslog = sa.Column(sa.Boolean(), default=False)
adv_sed_user = sa.Column(sa.String(120), default='user')
adv_sed_passwd = sa.Column(sa.EncryptedText(), default='')
adv_sysloglevel = sa.Column(sa.String(120), default='f_info')
adv_syslogserver = sa.Column(sa.String(120), default='')
adv_syslog_transport = sa.Column(sa.String(12), default='UDP')
adv_syslog_tls_certificate_id = sa.Column(sa.ForeignKey('system_certificate.id'), index=True, nullable=True)
adv_syslog_tls_certificate_authority_id = sa.Column(
sa.ForeignKey('system_certificateauthority.id'), index=True, nullable=True
)
adv_syslog_audit = sa.Column(sa.Boolean(), default=False)
adv_kmip_uid = sa.Column(sa.String(255), nullable=True, default=None)
adv_kdump_enabled = sa.Column(sa.Boolean(), default=False)
adv_isolated_gpu_pci_ids = sa.Column(sa.JSON(), default=[])
adv_kernel_extra_options = sa.Column(sa.Text(), default='', nullable=False)
class SystemAdvancedService(ConfigService):
class Config:
datastore = 'system.advanced'
datastore_prefix = 'adv_'
datastore_extend = 'system.advanced.system_advanced_extend'
namespace = 'system.advanced'
cli_namespace = 'system.advanced'
role_prefix = 'SYSTEM_ADVANCED'
ENTRY = Dict(
'system_advanced_entry',
Bool('advancedmode', required=True),
Bool('autotune', required=True),
Bool('kdump_enabled', required=True),
Int('boot_scrub', validators=[Range(min_=1)], required=True),
Bool('consolemenu', required=True),
Bool('consolemsg', required=True),
Bool('debugkernel', required=True),
Bool('fqdn_syslog', required=True),
Str('motd', required=True),
Str('login_banner', required=True, max_length=4096),
Bool('powerdaemon', required=True),
Bool('serialconsole', required=True),
Str('serialport', required=True),
Str('anonstats_token', required=True),
Str('serialspeed', enum=['9600', '19200', '38400', '57600', '115200'], required=True),
Int('overprovision', validators=[Range(min_=0)], null=True, required=True),
Bool('traceback', required=True),
Bool('uploadcrash', required=True),
Bool('anonstats', required=True),
Str('sed_user', enum=['USER', 'MASTER'], required=True),
Str('sysloglevel', enum=[
'F_EMERG', 'F_ALERT', 'F_CRIT', 'F_ERR', 'F_WARNING', 'F_NOTICE', 'F_INFO', 'F_DEBUG',
], required=True),
Str('syslogserver'),
Str('syslog_transport', enum=['UDP', 'TCP', 'TLS'], required=True),
Int('syslog_tls_certificate', null=True, required=True),
Int('syslog_tls_certificate_authority', null=True, required=True),
Bool('syslog_audit'),
List('isolated_gpu_pci_ids', items=[Str('pci_id')], required=True),
Str('kernel_extra_options', required=True),
Int('id', required=True),
)
@private
async def system_advanced_extend(self, data):
data['consolemsg'] = (await self.middleware.call('system.general.config'))['ui_consolemsg']
if data.get('sed_user'):
data['sed_user'] = data.get('sed_user').upper()
for k in filter(lambda k: data[k], ['syslog_tls_certificate_authority', 'syslog_tls_certificate']):
data[k] = data[k]['id']
data.pop('sed_passwd')
data.pop('kmip_uid')
return data
async def __validate_fields(self, schema, data):
verrors = ValidationErrors()
serial_choice = data.get('serialport')
if data.get('serialconsole'):
if not serial_choice:
verrors.add(
f'{schema}.serialport',
'Please specify a serial port when serial console option is checked'
)
elif serial_choice not in await self.middleware.call('system.advanced.serial_port_choices'):
verrors.add(
f'{schema}.serialport',
'Serial port specified has not been identified by the system'
)
ups_port = (await self.middleware.call('ups.config'))['port']
if not verrors and os.path.join('/dev', serial_choice or '') == ups_port:
verrors.add(
f'{schema}.serialport',
'Serial port must be different then the port specified for UPS Service'
)
syslog_server = data.get('syslogserver')
if syslog_server:
match = re.match(r"^\[?[\w\.\-\:\%]+\]?(\:\d+)?$", syslog_server)
if not match:
verrors.add(
f'{schema}.syslogserver',
'Invalid syslog server format'
)
elif ']:' in syslog_server or (':' in syslog_server and not ']' in syslog_server):
port = int(syslog_server.split(':')[-1])
if port < 0 or port > 65535:
verrors.add(
f'{schema}.syslogserver',
'Port must be in the range of 0 to 65535.'
)
if data['syslog_transport'] == 'TLS':
if not data['syslog_tls_certificate_authority']:
verrors.add(
f'{schema}.syslog_tls_certificate_authority', 'This is required when using TLS as syslog transport'
)
ca_cert = await self.middleware.call(
'certificateauthority.query', [['id', '=', data['syslog_tls_certificate_authority']]]
)
if not ca_cert:
verrors.add(f'{schema}.syslog_tls_certificate_authority', 'Unable to locate specified CA')
elif ca_cert[0]['revoked']:
verrors.add(f'{schema}.syslog_tls_certificate_authority', 'Specified CA has been revoked')
if data['syslog_tls_certificate']:
verrors.extend(await self.middleware.call(
'certificate.cert_services_validation', data['syslog_tls_certificate'],
f'{schema}.syslog_tls_certificate', False
))
elif data['syslog_tls_certificate_authority'] or data['syslog_tls_certificate']:
data['syslog_tls_certificate_authority'] = data['syslog_tls_certificate'] = None
for invalid_char in ('\n', '"'):
if invalid_char in data['kernel_extra_options']:
verrors.add('kernel_extra_options', f'{invalid_char!r} is an invalid character and not allowed')
with tempfile.NamedTemporaryFile(mode="w+", encoding="utf-8") as f:
f.write(textwrap.dedent(f"""\
menuentry 'TrueNAS' {{
linux /boot/vmlinuz {data['kernel_extra_options']}
}}
"""))
f.flush()
result = await run(["grub-script-check", f.name], check=False)
if result.returncode != 0:
verrors.add('kernel_extra_options', 'Invalid syntax')
invalid_param = 'systemd.unified_cgroup_hierarchy'
if invalid_param in data['kernel_extra_options']:
# TODO: we don't normalize values being passed into us which
# allows a comical amount of potential foot-shooting
verrors.add('kernel_extra_options', f'Modifying {invalid_param!r} is not allowed')
return verrors, data
@accepts(
Patch(
'system_advanced_entry', 'system_advanced_update',
('rm', {'name': 'id'}),
('rm', {'name': 'anonstats_token'}),
('rm', {'name': 'isolated_gpu_pci_ids'}),
('add', Password('sed_passwd')),
('attr', {'update': True}),
),
audit='System advanced update'
)
async def do_update(self, data):
"""
Update System Advanced Service Configuration.
`consolemenu` should be disabled if the menu at console is not desired. It will default to standard login
in the console if disabled.
`autotune` when enabled executes autotune script which attempts to optimize the system based on the installed
hardware.
When `syslogserver` is defined, logs of `sysloglevel` or above are sent. If syslog_audit is also set
then the remote syslog server will also receive audit messages.
`consolemsg` is a deprecated attribute and will be removed in further releases. Please, use `consolemsg`
attribute in the `system.general` plugin.
"""
consolemsg = None
if 'consolemsg' in data:
consolemsg = data.pop('consolemsg')
warnings.warn("`consolemsg` has been deprecated and moved to `system.general`", DeprecationWarning)
config_data = await self.config()
config_data['sed_passwd'] = await self.sed_global_password()
config_data.pop('consolemsg')
original_data = deepcopy(config_data)
config_data.update(data)
verrors, config_data = await self.__validate_fields('advanced_settings_update', config_data)
verrors.check()
if config_data != original_data:
if original_data.get('sed_user'):
original_data['sed_user'] = original_data['sed_user'].lower()
if config_data.get('sed_user'):
config_data['sed_user'] = config_data['sed_user'].lower()
if not config_data['sed_passwd'] and config_data['sed_passwd'] != original_data['sed_passwd']:
# We want to make sure kmip uid is None in this case
adv_config = await self.middleware.call('datastore.config', self._config.datastore)
self.middleware.create_task(
self.middleware.call('kmip.reset_sed_global_password', adv_config['adv_kmip_uid'])
)
config_data['kmip_uid'] = None
await self.middleware.call(
'datastore.update',
self._config.datastore,
config_data['id'],
config_data,
{'prefix': self._config.datastore_prefix}
)
if original_data['boot_scrub'] != config_data['boot_scrub']:
await self.middleware.call('service.restart', 'cron')
generate_grub = original_data['kernel_extra_options'] != config_data['kernel_extra_options']
if original_data['motd'] != config_data['motd']:
await self.middleware.call('etc.generate', 'motd')
if original_data['login_banner'] != config_data['login_banner']:
await self.middleware.call('service.reload', 'ssh')
if original_data['powerdaemon'] != config_data['powerdaemon']:
await self.middleware.call('service.restart', 'powerd')
if original_data['fqdn_syslog'] != config_data['fqdn_syslog']:
await self.middleware.call('service.restart', 'syslogd')
if (
original_data['sysloglevel'].lower() != config_data['sysloglevel'].lower() or
original_data['syslogserver'] != config_data['syslogserver'] or
original_data['syslog_transport'] != config_data['syslog_transport'] or
original_data['syslog_tls_certificate'] != config_data['syslog_tls_certificate'] or
original_data['syslog_audit'] != config_data['syslog_audit'] or
original_data['syslog_tls_certificate_authority'] != config_data['syslog_tls_certificate_authority']
):
await self.middleware.call('service.restart', 'syslogd')
if config_data['sed_passwd'] and original_data['sed_passwd'] != config_data['sed_passwd']:
await self.middleware.call('kmip.sync_sed_keys')
if config_data['kdump_enabled'] != original_data['kdump_enabled']:
# kdump changes require a reboot to take effect. So just generating the kdump config
# should be enough
await self.middleware.call('etc.generate', 'kdump')
generate_grub = True
if original_data['debugkernel'] != config_data['debugkernel']:
generate_grub = True
await self.middleware.call('system.advanced.configure_tty', original_data, config_data, generate_grub)
if consolemsg is not None:
await self.middleware.call('system.general.update', {'ui_consolemsg': consolemsg})
return await self.config()
@accepts(roles=['SYSTEM_ADVANCED_READ'])
@returns(Bool('sed_global_password_is_set'))
async def sed_global_password_is_set(self):
"""Returns a boolean identifying whether or not a global
SED password has been set"""
return bool(await self.sed_global_password())
@accepts(roles=['SYSTEM_ADVANCED_READ'])
@returns(Password('sed_global_password'))
async def sed_global_password(self):
"""Returns configured global SED password in clear-text if one
is configured, otherwise an empty string"""
passwd = (await self.middleware.call(
'datastore.config', 'system.advanced', {'prefix': self._config.datastore_prefix}
))['sed_passwd']
return passwd if passwd else await self.middleware.call('kmip.sed_global_password')
@no_auth_required
@accepts()
@returns(Str())
def login_banner(self):
"""Returns user set login banner"""
return self.middleware.call_sync('datastore.config', 'system.advanced')['adv_login_banner']
| 15,025 | Python | .py | 277 | 43.256318 | 119 | 0.616206 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,057 | serial.py | truenas_middleware/src/middlewared/middlewared/plugins/system_advanced/serial.py | from middlewared.schema import accepts, Dict, returns
from middlewared.service import private, Service
from middlewared.utils import run
class SystemAdvancedService(Service):
class Config:
namespace = 'system.advanced'
cli_namespace = 'system.advanced'
@accepts()
@returns(Dict('serial_port_choices', additional_attrs=True))
async def serial_port_choices(self):
"""
Get available choices for `serialport`.
"""
ports = {e['name']: e['name'] for e in await self.middleware.call('device.get_info', {'type': 'SERIAL'})}
if not ports or (await self.middleware.call('system.advanced.config'))['serialport'] == 'ttyS0':
# We should always add ttyS0 if ports is false or current value is the default one in db
# i.e ttyS0
ports['ttyS0'] = 'ttyS0'
return ports
@private
async def configure_tty(self, old, new, generate_grub=False):
restart_ttys = any(old[k] != new[k] for k in ('serialconsole', 'serialspeed', 'serialport'))
if old['serialconsole'] != new['serialconsole']:
if old['serialport'] == new['serialport']:
action = 'enable' if new['serialconsole'] else 'disable'
cp = await run(
['systemctl', action, f'serial-getty@{old["serialport"]}.service'], check=False
)
if cp.returncode:
self.logger.error('Failed to %r serialconsole: %r', action, cp.stderr.decode())
if old['serialport'] != new['serialport']:
for command in [
['systemctl', 'disable', f'serial-getty@{old["serialport"]}.service'],
['systemctl', 'stop', f'serial-getty@{old["serialport"]}.service'],
] + (
[['systemctl', 'enable', f'serial-getty@{new["serialport"]}.service']] if new['serialconsole'] else []
):
cp = await run(command, check=False)
if cp.returncode:
self.logger.error(
'Failed to %r %r serialport service: %r', command[1], command[2], cp.stderr.decode()
)
if restart_ttys or old['consolemenu'] != new['consolemenu']:
serial_action = 'restart' if new['serialconsole'] else 'stop'
cp = await run(['systemctl', serial_action, f'serial-getty@{new["serialport"]}.service'], check=False)
if cp.returncode:
self.middleware.logger.error(
'Failed to %r %r serial port: %r', serial_action, new['serialport'], cp.stderr.decode()
)
if old['consolemenu'] != new['consolemenu']:
cp = await run(['systemctl', 'restart', 'getty@tty1.service'], check=False)
if cp.returncode:
self.middleware.logger.error('Failed to restart tty service: %r', cp.stderr.decode())
if generate_grub or restart_ttys:
await self.middleware.call('etc.generate', 'grub')
| 3,026 | Python | .py | 55 | 42.763636 | 118 | 0.582095 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,058 | gpu.py | truenas_middleware/src/middlewared/middlewared/plugins/system_advanced/gpu.py | from middlewared.schema import accepts, Dict, List, returns, Str
from middlewared.service import private, Service, ValidationErrors
from middlewared.utils.gpu import get_gpus
class SystemAdvancedService(Service):
class Config:
namespace = 'system.advanced'
cli_namespace = 'system.advanced'
@accepts(roles=['SYSTEM_ADVANCED_READ'])
@returns(Dict(additional_attrs=True))
def get_gpu_pci_choices(self):
"""
This endpoint gives all the gpu pci ids/slots that can be isolated.
"""
configured_value = self.middleware.call_sync('system.advanced.config')['isolated_gpu_pci_ids']
gpus = {
f'{gpu["description"]} [{gpu["addr"]["pci_slot"]}]': gpu['addr']['pci_slot']
for gpu in get_gpus() if not gpu['uses_system_critical_devices']
}
for slot in filter(lambda gpu: gpu not in gpus.values(), configured_value):
gpus[f'Unknown {slot!r} slot'] = slot
return gpus
@accepts(List('isolated_gpu_pci_ids', items=[Str('pci_id')], required=True), roles=['SYSTEM_ADVANCED_WRITE'])
@returns()
async def update_gpu_pci_ids(self, isolated_gpu_pci_ids):
"""
`isolated_gpu_pci_ids` is a list of PCI ids which are isolated from host system.
"""
verrors = ValidationErrors()
if isolated_gpu_pci_ids:
verrors = await self.validate_gpu_pci_ids(isolated_gpu_pci_ids, verrors, 'gpu_settings')
if await self.middleware.call('system.is_ha_capable') and isolated_gpu_pci_ids:
verrors.add(
'gpu_settings.isolated_gpu_pci_ids',
'HA capable systems do not support PCI passthrough'
)
verrors.check()
await self.middleware.call(
'datastore.update',
'system.advanced',
(await self.middleware.call('system.advanced.config'))['id'],
{'isolated_gpu_pci_ids': isolated_gpu_pci_ids},
{'prefix': 'adv_'}
)
await self.middleware.call('boot.update_initramfs')
@private
async def validate_gpu_pci_ids(self, isolated_gpu_pci_ids, verrors, schema):
available = set()
critical_gpus = set()
for gpu in await self.middleware.call('device.get_gpus'):
available.add(gpu['addr']['pci_slot'])
if gpu['uses_system_critical_devices']:
critical_gpus.add(gpu['addr']['pci_slot'])
provided = set(isolated_gpu_pci_ids)
not_available = provided - available
cannot_isolate = provided & critical_gpus
if not_available:
verrors.add(
f'{schema}.isolated_gpu_pci_ids',
f'{", ".join(not_available)} GPU pci slot(s) are not available or a GPU is not configured.'
)
if cannot_isolate:
verrors.add(
f'{schema}.isolated_gpu_pci_ids',
f'{", ".join(cannot_isolate)} GPU pci slot(s) consists of devices '
'which cannot be isolated from host.'
)
if len(available - provided) < 1:
verrors.add(
f'{schema}.isolated_gpu_pci_ids',
'A minimum of 1 GPU is required for the host to ensure it functions as desired.'
)
return verrors
| 3,324 | Python | .py | 72 | 35.75 | 113 | 0.60315 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,059 | syslog.py | truenas_middleware/src/middlewared/middlewared/plugins/system_advanced/syslog.py | from middlewared.schema import accepts, Dict, returns
from middlewared.service import Service
class SystemAdvancedService(Service):
class Config:
namespace = 'system.advanced'
cli_namespace = 'system.advanced'
@accepts()
@returns(Dict(
additional_attrs=True,
title='Syslog Certificate Choices',
))
async def syslog_certificate_choices(self):
"""
Return choices of certificates which can be used for `syslog_tls_certificate`.
"""
return {
i['id']: i['name']
for i in await self.middleware.call('certificate.query', [('cert_type_CSR', '=', False)])
}
@accepts()
@returns(Dict(
additional_attrs=True,
title='Syslog Certificate Authority Choices',
))
async def syslog_certificate_authority_choices(self):
"""
Return choices of certificate authorities which can be used for `syslog_tls_certificate_authority`.
"""
return {
i['id']: i['name']
for i in await self.middleware.call('certificateauthority.query', [['revoked', '=', False]])
}
| 1,149 | Python | .py | 32 | 28.125 | 107 | 0.626799 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,060 | device.py | truenas_middleware/src/middlewared/middlewared/plugins/virt/device.py | import usb.core
from middlewared.service import CallError, Service
from middlewared.api import api_method
from middlewared.api.current import (
VirtDeviceUSBChoicesArgs, VirtDeviceUSBChoicesResult,
VirtDeviceGPUChoicesArgs, VirtDeviceGPUChoicesResult,
VirtDeviceDiskChoicesArgs, VirtDeviceDiskChoicesResult,
)
class VirtDeviceService(Service):
class Config:
namespace = 'virt.device'
cli_namespace = 'virt.device'
@api_method(VirtDeviceUSBChoicesArgs, VirtDeviceUSBChoicesResult, roles=['VIRT_INSTANCE_READ'])
def usb_choices(self):
"""
Provide choices for USB devices.
"""
choices = {}
for i in usb.core.find(find_all=True):
name = f'usb_{i.bus}_{i.address}'
choices[name] = {
'vendor_id': format(i.idVendor, '04x'),
'product_id': format(i.idProduct, '04x'),
'bus': i.bus,
'dev': i.address,
'product': i.product,
'manufacturer': i.manufacturer,
}
return choices
@api_method(VirtDeviceGPUChoicesArgs, VirtDeviceGPUChoicesResult, roles=['VIRT_INSTANCE_READ'])
async def gpu_choices(self, instance_type, gpu_type):
"""
Provide choices for GPU devices.
"""
choices = {}
if gpu_type != 'PHYSICAL':
raise CallError('Only PHYSICAL type is supported for now.')
if instance_type != 'CONTAINER':
raise CallError('Only CONTAINER supported for now.')
for i in await self.middleware.call('device.get_gpus'):
if not i['available_to_host'] or i['uses_system_critical_devices']:
continue
choices[i['addr']['pci_slot']] = {
'bus': i['addr']['bus'],
'slot': i['addr']['slot'],
'description': i['description'],
'vendor': i['vendor'],
}
return choices
@api_method(VirtDeviceDiskChoicesArgs, VirtDeviceDiskChoicesResult, roles=['VIRT_INSTANCE_READ'])
async def disk_choices(self):
"""
Returns disk (zvol) choices for device type "DISK".
"""
out = {}
zvols = await self.middleware.call(
'zfs.dataset.unlocked_zvols_fast', [
['OR', [['attachment', '=', None], ['attachment.method', '=', 'virt.instance.query']]],
['ro', '=', False],
],
{}, ['ATTACHMENT', 'RO']
)
for zvol in zvols:
out[zvol['path']] = zvol['name']
return out
| 2,601 | Python | .py | 65 | 29.830769 | 103 | 0.577091 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,061 | global.py | truenas_middleware/src/middlewared/middlewared/plugins/virt/global.py | from typing import TYPE_CHECKING
import subprocess
import middlewared.sqlalchemy as sa
from middlewared.api import api_method
from middlewared.api.current import (
VirtGlobalEntry,
VirtGlobalUpdateArgs, VirtGlobalUpdateResult,
VirtGlobalBridgeChoicesArgs, VirtGlobalBridgeChoicesResult,
VirtGlobalPoolChoicesArgs, VirtGlobalPoolChoicesResult,
VirtGlobalGetNetworkArgs, VirtGlobalGetNetworkResult,
)
from middlewared.service import job, private
from middlewared.service import ConfigService, ValidationErrors
from middlewared.service_exception import CallError
from middlewared.utils import run
from middlewared.plugins.boot import BOOT_POOL_NAME_VALID
from .utils import Status, incus_call
if TYPE_CHECKING:
from middlewared.main import Middleware
INCUS_PATH = '/var/lib/incus'
INCUS_BRIDGE = 'incusbr0'
BRIDGE_AUTO = '[AUTO]'
POOL_DISABLED = '[DISABLED]'
class NoPoolConfigured(Exception):
pass
class LockedDataset(Exception):
pass
class VirtGlobalModel(sa.Model):
__tablename__ = 'virt_global'
id = sa.Column(sa.Integer(), primary_key=True)
pool = sa.Column(sa.String(120), nullable=True)
bridge = sa.Column(sa.String(120), nullable=True)
v4_network = sa.Column(sa.String(120), nullable=True)
v6_network = sa.Column(sa.String(120), nullable=True)
class VirtGlobalService(ConfigService):
class Config:
datastore = 'virt_global'
datastore_extend = 'virt.global.extend'
namespace = 'virt.global'
cli_namespace = 'virt.global'
role_prefix = 'VIRT_GLOBAL'
entry = VirtGlobalEntry
@private
async def extend(self, data):
if data['pool']:
data['dataset'] = f'{data["pool"]}/.ix-virt'
else:
data['dataset'] = None
try:
data['state'] = await self.middleware.call('cache.get', 'VIRT_STATE')
except KeyError:
data['state'] = Status.INITIALIZING.value
return data
@private
async def validate(self, new: dict, schema_name: str, verrors: ValidationErrors):
bridge = new['bridge']
if not bridge:
bridge = BRIDGE_AUTO
if bridge not in await self.bridge_choices():
verrors.add(f'{schema_name}.bridge', 'Invalid bridge')
if bridge == BRIDGE_AUTO:
new['bridge'] = None
pool = new['pool']
if not pool:
pool = POOL_DISABLED
if pool not in await self.pool_choices():
verrors.add(f'{schema_name}.pool', 'Invalid pool')
if pool == POOL_DISABLED:
new['pool'] = None
@api_method(VirtGlobalUpdateArgs, VirtGlobalUpdateResult)
@job()
async def do_update(self, job, data):
"""
Update global virtualization settings.
`pool` which pool to use to store instances.
None will disable the service.
`bridge` which bridge interface to use by default.
None means it will automatically create one.
"""
old = await self.config()
new = old.copy()
new.update(data)
verrors = ValidationErrors()
await self.validate(new, 'virt_global_update', verrors)
verrors.check()
# Not part of the database
new.pop('state')
new.pop('dataset')
await self.middleware.call(
'datastore.update', self._config.datastore,
new['id'], new,
)
job = await self.middleware.call('virt.global.setup')
await job.wait(raise_error=True)
return await self.config()
@api_method(VirtGlobalBridgeChoicesArgs, VirtGlobalBridgeChoicesResult, roles=['VIRT_GLOBAL_READ'])
async def bridge_choices(self):
"""
Bridge choices for virtualization purposes.
Empty means it will be managed/created automatically.
"""
choices = {BRIDGE_AUTO: 'Automatic'}
# We do not allow custom bridge on HA because it might have bridge STP issues
# causing failover problems.
if not await self.middleware.call('failover.licensed'):
choices.update({
i['name']: i['name']
for i in await self.middleware.call('interface.query', [['type', '=', 'BRIDGE']])
})
return choices
@api_method(VirtGlobalPoolChoicesArgs, VirtGlobalPoolChoicesResult, roles=['VIRT_GLOBAL_READ'])
async def pool_choices(self):
"""
Pool choices for virtualization purposes.
"""
pools = {POOL_DISABLED: '[Disabled]'}
for p in (await self.middleware.call('zfs.pool.query_imported_fast')).values():
if p['name'] in BOOT_POOL_NAME_VALID:
continue
ds = await self.middleware.call(
'pool.dataset.get_instance_quick', p['name'], {'encryption': True},
)
if not ds['locked']:
pools[p['name']] = p['name']
return pools
@private
async def internal_interfaces(self):
return [INCUS_BRIDGE]
@private
async def check_initialized(self, config=None):
if config is None:
config = await self.config()
if config['state'] != Status.INITIALIZED.value:
raise CallError('Virtualization not initialized.')
@private
async def get_default_profile(self):
result = await incus_call('1.0/profiles/default', 'get')
if result.get('status_code') != 200:
raise CallError(result.get('error'))
return result['metadata']
@api_method(VirtGlobalGetNetworkArgs, VirtGlobalGetNetworkResult, roles=['VIRT_GLOBAL_READ'])
async def get_network(self, name):
"""
Details for the given network.
"""
await self.check_initialized()
result = await incus_call(f'1.0/networks/{name}', 'get')
if result.get('status_code') != 200:
raise CallError(result.get('error'))
data = result['metadata']
return {
'type': data['type'].upper(),
'managed': data['managed'],
'ipv4_address': data['config']['ipv4.address'],
'ipv4_nat': data['config']['ipv4.nat'],
'ipv6_address': data['config']['ipv6.address'],
'ipv6_nat': data['config']['ipv6.nat'],
}
@private
@job()
async def setup(self, job):
"""
Sets up incus through their API.
Will create necessary storage datasets if required.
"""
try:
await self.middleware.call('cache.put', 'VIRT_STATE', Status.INITIALIZING.value)
await self._setup_impl(job)
except NoPoolConfigured:
await self.middleware.call('cache.put', 'VIRT_STATE', Status.NO_POOL.value)
except LockedDataset:
await self.middleware.call('cache.put', 'VIRT_STATE', Status.LOCKED.value)
except Exception:
await self.middleware.call('cache.put', 'VIRT_STATE', Status.ERROR.value)
raise
else:
await self.middleware.call('cache.put', 'VIRT_STATE', Status.INITIALIZED.value)
async def _setup_impl(self, job):
config = await self.config()
if not config['pool']:
if await self.middleware.call('service.started', 'incus'):
job = await self.middleware.call('virt.global.reset')
await job.wait(raise_error=True)
self.logger.debug('No pool set for virtualization, skipping.')
raise NoPoolConfigured()
else:
await self.middleware.call('service.start', 'incus')
try:
ds = await self.middleware.call(
'zfs.dataset.get_instance', config['dataset'], {
'extra': {
'retrieve_children': False,
'user_properties': False,
'properties': ['encryption', 'keystatus'],
}
},
)
except Exception:
ds = None
if not ds:
await self.middleware.call('zfs.dataset.create', {
'name': config['dataset'],
'properties': {
'aclmode': 'discard',
'acltype': 'posix',
'exec': 'on',
'casesensitivity': 'sensitive',
'atime': 'off',
},
})
else:
if ds['encrypted'] and not ds['key_loaded']:
self.logger.info('Dataset %r not unlocked, skipping virt setup.', ds['name'])
raise LockedDataset()
import_storage = True
storage = await incus_call('1.0/storage-pools/default', 'get')
if storage['type'] != 'error':
if storage['metadata']['config']['source'] == config['dataset']:
self.logger.debug('Storage pool for virt already configured.')
import_storage = False
else:
job = await self.middleware.call('virt.global.reset', True)
await job.wait(raise_error=True)
# If no bridge interface has been set, use incus managed
if not config['bridge']:
result = await incus_call(f'1.0/networks/{INCUS_BRIDGE}', 'get')
# Create INCUS_BRIDGE if it doesn't exist
if result.get('status') != 'Success':
# Reuse v4/v6 network from database if there is one
result = await incus_call('1.0/networks', 'post', {'json': {
'config': {
'ipv4.address': config['v4_network'] or 'auto',
'ipv4.nat': 'true',
'ipv6.address': config['v6_network'] or 'auto',
'ipv6.nat': 'true',
},
'description': '',
'name': INCUS_BRIDGE,
'type': 'bridge',
}})
if result.get('status_code') != 200:
raise CallError(result.get('error'))
result = await incus_call(f'1.0/networks/{INCUS_BRIDGE}', 'get')
if result.get('status_code') != 200:
raise CallError(result.get('error'))
# Update automatically selected networks into our database
# so it can persist upgrades.
await self.middleware.call('datastore.update', 'virt_global', config['id'], {
'v4_network': result['metadata']['config']['ipv4.address'],
'v6_network': result['metadata']['config']['ipv6.address'],
})
nic = {
'name': 'eth0',
'network': INCUS_BRIDGE,
'type': 'nic',
}
else:
nic = {
'name': 'eth0',
'type': 'nic',
'nictype': 'bridged',
'parent': config['bridge'],
}
if import_storage:
payload = {
'pools': [{
'config': {'source': config['dataset']},
'description': '',
'name': 'default',
'driver': 'zfs',
}],
}
result = await incus_call('internal/recover/validate', 'post', {'json': payload})
if result.get('status') == 'Success':
if result['metadata']['DependencyErrors']:
raise CallError('Missing depedencies: ' + ', '.join(result['metadata']['DependencyErrors']))
result = await incus_call('internal/recover/import', 'post', {'json': payload})
if result.get('status') != 'Success':
raise CallError(result.get('error'))
else:
raise CallError('Invalid storage')
result = await incus_call('1.0/profiles/default', 'put', {'json': {
'config': {},
'description': 'Default TrueNAS profile',
'devices': {
'root': {
'path': '/',
'pool': 'default',
'type': 'disk',
},
'eth0': nic,
},
}})
if result.get('status') != 'Success':
raise CallError(result.get('error'))
# If storage was imported we need to restart incus service so instances
# with autostart can be started
if import_storage:
await self.middleware.call('service.restart', 'incus')
@private
@job()
async def reset(self, job, start: bool = False):
config = await self.config()
if await self.middleware.call('service.started', 'incus'):
# Stop running instances
params = [
[i['id'], {'force': True, 'timeout': 10}]
for i in await self.middleware.call(
'virt.instance.query', [('status', '=', 'RUNNING')]
)
]
job = await self.middleware.call('core.bulk', 'virt.instance.stop', params, 'Stopping instances')
await job.wait()
if await self.middleware.call('virt.instance.query', [('status', '=', 'RUNNING')]):
raise CallError('Failed to stop instances')
await self.middleware.call('service.stop', 'incus')
if await self.middleware.call('service.started', 'incus'):
raise CallError('Failed to stop virtualization service')
if not config['bridge']:
# Make sure we delete in case it exists
try:
await run(['ip', 'link', 'show', INCUS_BRIDGE], check=True)
except subprocess.CalledProcessError:
pass
else:
await run(['ip', 'link', 'delete', INCUS_BRIDGE], check=True)
# Have incus start fresh
# Use subprocess because shutil.rmtree will traverse filesystems
# and we do have instances datasets that might be mounted beneath
await run(f'rm -rf --one-file-system {INCUS_PATH}/*', shell=True, check=True)
if start and not await self.middleware.call('service.start', 'incus'):
raise CallError('Failed to start virtualization service')
async def _event_system_ready(middleware: 'Middleware', event_type, args):
if not await middleware.call('failover.licensed'):
middleware.create_task(middleware.call('virt.global.setup'))
async def setup(middleware: 'Middleware'):
middleware.event_subscribe('system.ready', _event_system_ready)
# Should only happen if middlewared crashes or during development
failover_licensed = await middleware.call('failover.licensed')
ready = await middleware.call('system.ready')
if ready and not failover_licensed:
await middleware.call('virt.global.setup')
| 14,865 | Python | .py | 342 | 32.040936 | 112 | 0.570945 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,062 | utils.py | truenas_middleware/src/middlewared/middlewared/plugins/virt/utils.py | import asyncio
import aiohttp
import enum
from collections.abc import Callable
from .websocket import IncusWS
from middlewared.service import CallError
SOCKET = '/var/lib/incus/unix.socket'
HTTP_URI = 'http://unix.socket'
class Status(enum.Enum):
INITIALIZING = 'INITIALIZING'
INITIALIZED = 'INITIALIZED'
NO_POOL = 'NO_POOL'
LOCKED = 'LOCKED'
ERROR = 'ERROR'
async def incus_call(path: str, method: str, request_kwargs: dict = None):
async with aiohttp.UnixConnector(path=SOCKET) as conn:
async with aiohttp.ClientSession(connector=conn) as session:
methodobj = getattr(session, method)
r = await methodobj(f'{HTTP_URI}/{path}', **(request_kwargs or {}))
return await r.json()
async def incus_call_and_wait(
path: str, method: str, request_kwargs: dict = None,
running_cb: Callable[[dict], None] = None, timeout: int = 300,
):
result = await incus_call(path, method, request_kwargs)
if result.get('type') == 'error':
raise CallError(result['error'])
async def callback(data):
if data['metadata']['status'] == 'Failure':
return ('ERROR', data['metadata']['err'])
if data['metadata']['status'] == 'Success':
return ('SUCCESS', data['metadata']['metadata'])
if data['metadata']['status'] == 'Running':
if running_cb:
await running_cb(data)
return ('RUNNING', None)
task = asyncio.ensure_future(IncusWS().wait(result['metadata']['id'], callback))
try:
await asyncio.wait_for(task, timeout)
except asyncio.TimeoutError:
raise CallError('Timed out')
return task.result()
| 1,693 | Python | .py | 42 | 33.880952 | 84 | 0.650397 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,063 | instance_device.py | truenas_middleware/src/middlewared/middlewared/plugins/virt/instance_device.py | import errno
from typing import Any
from middlewared.service import (
CallError, Service, ValidationErrors, private
)
from middlewared.api import api_method
from middlewared.api.current import (
VirtInstanceDeviceListArgs, VirtInstanceDeviceListResult,
VirtInstanceDeviceAddArgs, VirtInstanceDeviceAddResult,
VirtInstanceDeviceDeleteArgs, VirtInstanceDeviceDeleteResult,
)
from .utils import incus_call_and_wait
class VirtInstanceDeviceService(Service):
class Config:
namespace = 'virt.instance'
cli_namespace = 'virt.instance'
@api_method(VirtInstanceDeviceListArgs, VirtInstanceDeviceListResult, roles=['VIRT_INSTANCE_READ'])
async def device_list(self, id):
"""
List all devices associated to an instance.
"""
instance = await self.middleware.call('virt.instance.get_instance', id, {'extra': {'raw': True}})
# Grab devices from default profile (e.g. nic and disk)
profile = await self.middleware.call('virt.global.get_default_profile')
# Flag devices from the profile as readonly, cannot be modified only overridden
raw_devices = profile['devices']
for v in raw_devices.values():
v['readonly'] = True
raw_devices.update(instance['raw']['devices'])
devices = []
for k, v in raw_devices.items():
if (device := await self.incus_to_device(k, v)) is not None:
devices.append(device)
return devices
@private
async def incus_to_device(self, name: str, incus: dict[str, Any]):
device = {'name': name, 'readonly': incus.get('readonly') or False}
def unsupported():
self.logger.trace('Proxy device not supported by API, skipping.')
return None
match incus['type']:
case 'disk':
device['dev_type'] = 'DISK'
device['source'] = incus.get('source')
device['destination'] = incus.get('path')
case 'nic':
device['dev_type'] = 'NIC'
device['network'] = incus.get('network')
case 'proxy':
device['dev_type'] = 'PROXY'
# For now follow docker lead for simplification
# only allowing to bind on host (host -> container)
if incus.get('bind') == 'instance':
return unsupported()
proto, addr, ports = incus['listen'].split(':')
if proto == 'unix' or '-' in ports or ',' in ports:
return unsupported()
device['source_proto'] = proto.upper()
device['source_port'] = int(ports)
proto, addr, ports = incus['connect'].split(':')
if proto == 'unix' or '-' in ports or ',' in ports:
return unsupported()
device['dest_proto'] = proto.upper()
device['dest_port'] = int(ports)
case 'tpm':
device['dev_type'] = 'TPM'
device['path'] = incus.get('path')
device['pathrm'] = incus.get('pathrm')
case 'usb':
device['dev_type'] = 'USB'
if incus.get('busnum') is not None:
device['bus'] = int(incus['busnum'])
if incus.get('devnum') is not None:
device['dev'] = int(incus['devnum'])
if incus.get('productid') is not None:
device['product_id'] = incus['productid']
if incus.get('vendorid') is not None:
device['vendir_id'] = incus['vendorid']
case 'gpu':
device['dev_type'] = 'USB'
device['id'] = incus['id']
device['gpu_type'] = incus['gputype'].upper()
match incus['gputype']:
case 'physical':
pass
case 'mdev':
pass
case 'mig':
device['mig_uuid'] = incus['mig.uuid']
case 'sriov':
pass
case _:
return unsupported()
return device
@private
async def device_to_incus(self, instance_type: str, device: dict[str, Any]) -> dict[str, Any]:
new = {}
match device['dev_type']:
case 'DISK':
new['type'] = 'disk'
source = device.get('source') or ''
if not source.startswith(('/dev/zvol/', '/mnt/')):
raise CallError('Only pool paths are allowed.')
new['source'] = device['source']
if source.startswith('/mnt/'):
if source.startswith('/mnt/.ix-apps'):
raise CallError('Invalid source')
if not device.get('destination'):
raise CallError('Destination is required for filesystem paths.')
if instance_type == 'VM':
raise CallError('Destination is not valid for VM')
new['path'] = device['destination']
case 'NIC':
new['type'] = 'nic'
new['network'] = device['network']
case 'PROXY':
new['type'] = 'proxy'
# For now follow docker lead for simplification
# only allowing to bind on host (host -> container)
new['bind'] = 'host'
new['listen'] = f'{device["source_proto"].lower()}:0.0.0.0:{device["source_port"]}'
new['connect'] = f'{device["dest_proto"].lower()}:0.0.0.0:{device["dest_port"]}'
case 'USB':
new['type'] = 'usb'
if device.get('bus') is not None:
new['busnum'] = str(device['bus'])
if device.get('dev') is not None:
new['devnum'] = str(device['dev'])
if device.get('product_id') is not None:
new['productid'] = device['product_id']
if device.get('vendor_id') is not None:
new['vendorid'] = device['vendor_id']
case 'TPM':
new['type'] = 'tpm'
if device.get('path'):
if instance_type == 'VM':
raise CallError('Path is not valid for VM')
new['path'] = device['path']
elif instance_type == 'CONTAINER':
new['path'] = '/dev/tpm0'
if device.get('pathrm'):
if instance_type == 'VM':
raise CallError('Pathrm is not valid for VM')
new['pathrm'] = device['pathrm']
elif instance_type == 'CONTAINER':
new['pathrm'] = '/dev/tpmrm0'
case 'GPU':
new['type'] = 'gpu'
# new['id'] = device['id']
match device['gpu_type']:
case 'PHYSICAL':
new['gputype'] = 'physical'
new['pci'] = device['pci']
case 'MDEV':
new['gputype'] = 'mdev'
case 'MIG':
new['gputype'] = 'mig'
if not device.get('mig_uuid'):
raise CallError('UUID is required for MIG')
new['mig.uuid'] = device['mig_uuid']
case 'SRIOV':
new['gputype'] = 'sriov'
case _:
raise Exception('Invalid device type')
return new
async def __generate_device_name(self, device_names: list[str], device_type: str) -> str:
name = device_type.lower()
i = 0
while True:
new_name = f'{name}{i}'
if new_name not in device_names:
name = new_name
break
i += 1
return name
async def __validate_device(self, device, schema, verrors: ValidationErrors):
match device['dev_type']:
case 'PROXY':
verror = await self.middleware.call('port.validate_port', schema, device['source_port'])
verrors.extend(verror)
case 'DISK':
if device['source'] and device['source'].startswith('/dev/zvol/'):
if device['source'] not in await self.middleware.call('virt.device.disk_choices'):
verrors.add(schema, 'Invalid ZVOL choice.')
@api_method(VirtInstanceDeviceAddArgs, VirtInstanceDeviceAddResult, roles=['VIRT_INSTANCE_WRITE'])
async def device_add(self, id, device):
"""
Add a device to an instance.
"""
instance = await self.middleware.call('virt.instance.get_instance', id, {'extra': {'raw': True}})
data = instance['raw']
if device['name'] is None:
device['name'] = await self.__generate_device_name(data['devices'].keys(), device['dev_type'])
verrors = ValidationErrors()
await self.__validate_device(device, 'virt_device_add', verrors)
verrors.check()
data['devices'][device['name']] = await self.device_to_incus(instance['type'], device)
await incus_call_and_wait(f'1.0/instances/{id}', 'put', {'json': data})
return True
@api_method(VirtInstanceDeviceDeleteArgs, VirtInstanceDeviceDeleteResult, roles=['VIRT_INSTANCE_DELETE'])
async def device_delete(self, id, device):
"""
Delete a device from an instance.
"""
instance = await self.middleware.call('virt.instance.get_instance', id, {'extra': {'raw': True}})
data = instance['raw']
if device not in data['devices']:
raise CallError('Device not found.', errno.ENOENT)
data['devices'].pop(device)
await incus_call_and_wait(f'1.0/instances/{id}', 'put', {'json': data})
return True
| 10,039 | Python | .py | 211 | 33.099526 | 109 | 0.515306 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,064 | websocket.py | truenas_middleware/src/middlewared/middlewared/plugins/virt/websocket.py | import asyncio
from collections.abc import Callable
from collections import defaultdict
from typing import TYPE_CHECKING
import aiohttp
import logging
from middlewared.service import CallError
if TYPE_CHECKING:
from middlewared.main import Middleware
logger = logging.getLogger(__name__)
SOCKET = '/var/lib/incus/unix.socket'
class Singleton(type):
instance = None
def __call__(cls, *args, **kwargs):
if cls.instance is None:
cls.instance = super(Singleton, cls).__call__(*args, **kwargs)
return cls.instance
class IncusWS(object, metaclass=Singleton):
def __init__(self, middleware):
self.middleware = middleware
self._incoming = defaultdict(list)
self._waiters = defaultdict(list)
self._task = None
async def run(self):
while True:
try:
await self._run_impl()
except aiohttp.client_exceptions.UnixClientConnectorError as e:
logger.warning('Failed to connect to incus socket: %r', e)
except Exception:
logger.warning('Incus websocket failure', exc_info=True)
await asyncio.sleep(1)
async def _run_impl(self):
async with aiohttp.UnixConnector(path=SOCKET) as conn:
async with aiohttp.ClientSession(connector=conn) as session:
async with session.ws_connect('ws://unix.socket/1.0/events') as ws:
async for msg in ws:
if msg.type != aiohttp.WSMsgType.TEXT:
continue
data = msg.json()
match data['type']:
case 'operation':
if 'metadata' in data and 'id' in data['metadata']:
self._incoming[data['metadata']['id']].append(data)
for i in self._waiters[data['metadata']['id']]:
i.set()
case 'logging':
if data['metadata']['message'] == 'Instance agent started':
self.middleware.send_event(
'virt.instance.agent_running',
'CHANGED',
id=data['metadata']['context']['instance'],
)
async def wait(self, id: str, callback: Callable[[str], None]):
event = asyncio.Event()
self._waiters[id].append(event)
try:
while True:
if not self._incoming[id]:
await event.wait()
event.clear()
for i in list(self._incoming[id]):
self._incoming[id].remove(i)
if (result := await callback(i)) is None:
continue
status, data = result
match status:
case 'SUCCESS':
return data
case 'ERROR':
raise CallError(data)
case 'RUNNING':
pass
case _:
raise CallError(f'Unknown status: {status}')
finally:
self._waiters[id].remove(event)
async def start(self):
if not self._task:
self._task = asyncio.ensure_future(self.run())
async def stop(self):
if self._task:
self._task.cancel()
self._task = None
async def __event_system_shutdown(middleware, event_type, args):
await IncusWS().stop()
async def setup(middleware: 'Middleware'):
middleware.event_register(
'virt.instance.agent_running', 'Agent is running on guest.', roles=['VIRT_INSTANCE_READ'],
)
IncusWS(middleware)
middleware.event_subscribe('system.shutdown', __event_system_shutdown)
| 4,023 | Python | .py | 92 | 28.391304 | 98 | 0.515865 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,065 | instance.py | truenas_middleware/src/middlewared/middlewared/plugins/virt/instance.py | import aiohttp
from middlewared.service import (
CRUDService, ValidationErrors, filterable, job, private
)
from middlewared.utils import filter_list
from middlewared.api import api_method
from middlewared.api.current import (
VirtInstanceEntry,
VirtInstanceCreateArgs, VirtInstanceCreateResult,
VirtInstanceUpdateArgs, VirtInstanceUpdateResult,
VirtInstanceDeleteArgs, VirtInstanceDeleteResult,
VirtInstanceStartArgs, VirtInstanceStartResult,
VirtInstanceStopArgs, VirtInstanceStopResult,
VirtInstanceRestartArgs, VirtInstanceRestartResult,
VirtInstanceImageChoicesArgs, VirtInstanceImageChoicesResult,
)
from .utils import incus_call, incus_call_and_wait
LC_IMAGES_SERVER = 'https://images.linuxcontainers.org'
LC_IMAGES_JSON = f'{LC_IMAGES_SERVER}/streams/v1/images.json'
class VirtInstanceService(CRUDService):
class Config:
namespace = 'virt.instance'
cli_namespace = 'virt.instance'
entry = VirtInstanceEntry
role_prefix = 'VIRT_INSTANCE'
@filterable
async def query(self, filters, options):
"""
Query all instances with `query-filters` and `query-options`.
"""
config = await self.middleware.call('virt.global.config')
if config['state'] != 'INITIALIZED':
return []
results = (await incus_call('1.0/instances?filter=&recursion=2', 'get'))['metadata']
entries = []
for i in results:
# If entry has no config or state its probably in an unknown state, skip it
if not i.get('config') or not i.get('state'):
continue
entry = {
'id': i['name'],
'name': i['name'],
'type': 'CONTAINER' if i['type'] == 'container' else 'VM',
'status': i['state']['status'].upper(),
'cpu': i['config'].get('limits.cpu'),
'autostart': i['config'].get('boot.autostart') or False,
'environment': {},
'aliases': [],
}
if options.get('extra').get('raw'):
entry['raw'] = i
if memory := i['config'].get('limits.memory'):
# Handle all units? e.g. changes done through CLI
if memory.endswith('MiB'):
memory = int(memory[:-3]) * 1024 * 1024
else:
memory = None
entry['memory'] = memory
for k, v in i['config'].items():
if not k.startswith('environment.'):
continue
entry['environment'][k[12:]] = v
entries.append(entry)
for v in (i['state']['network'] or {}).values():
for address in v['addresses']:
if address['scope'] != 'global':
continue
entry['aliases'].append({
'type': address['family'].upper(),
'address': address['address'],
'netmask': int(address['netmask']),
})
return filter_list(entries, filters, options)
@private
async def validate(self, new, schema_name, verrors, old=None):
if not old and await self.query([('name', '=', new['name'])]):
verrors.add(f'{schema_name}.name', f'Name {new["name"]!r} already exists')
# Do not validate image_choices because its an expansive operation, just fail on creation
def __data_to_config(self, data):
config = {}
if data.get('environment'):
for k, v in data['environment'].items():
config[f'environment.{k}'] = v
if data.get('cpu'):
config['limits.cpu'] = data['cpu']
if data.get('memory'):
config['limits.memory'] = str(int(data['memory'] / 1024 / 1024)) + 'MiB'
if data.get('autostart') is not None:
config['boot.autostart'] = str(data['autostart']).lower()
return config
@api_method(VirtInstanceImageChoicesArgs, VirtInstanceImageChoicesResult, roles=['VIRT_INSTANCE_READ'])
async def image_choices(self, data):
"""
Provice choices for instance image from a remote repository.
"""
choices = {}
if data['remote'] == 'LINUX_CONTAINERS':
url = LC_IMAGES_JSON
async with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
data = await resp.json()
for v in data['products'].values():
alias = v['aliases'].split(',', 1)[0]
choices[alias] = {
'label': f'{v["os"]} {v["release"]} ({v["arch"]}, {v["variant"]})',
'os': v['os'],
'release': v['release'],
'arch': v['arch'],
'variant': v['variant'],
}
return choices
@api_method(VirtInstanceCreateArgs, VirtInstanceCreateResult)
@job()
async def do_create(self, job, data):
"""
Create a new virtualizated instance.
"""
await self.middleware.call('virt.global.check_initialized')
verrors = ValidationErrors()
await self.validate(data, 'virt_instance_create', verrors)
devices = {}
for i in (data['devices'] or []):
await self.middleware.call('virt.instance.validate', i, 'virt_instance_create', verrors)
devices[i['name']] = await self.middleware.call('virt.instance.device_to_incus', data['instance_type'], i)
verrors.check()
async def running_cb(data):
if 'metadata' in data['metadata'] and (metadata := data['metadata']['metadata']):
if 'download_progress' in metadata:
job.set_progress(None, metadata['download_progress'])
if 'create_instance_from_image_unpack_progress' in metadata:
job.set_progress(None, metadata['create_instance_from_image_unpack_progress'])
if data['remote'] == 'LINUX_CONTAINERS':
url = LC_IMAGES_SERVER
source = {
'type': 'image',
}
result = await incus_call(f'1.0/images/{data["image"]}', 'get')
if result['status_code'] == 200:
source['fingerprint'] = result['metadata']['fingerprint']
else:
source.update({
'server': url,
'protocol': 'simplestreams',
'mode': 'pull',
'alias': data['image'],
})
await incus_call_and_wait('1.0/instances', 'post', {'json': {
'name': data['name'],
'ephemeral': False,
'config': self.__data_to_config(data),
'devices': devices,
'source': source,
'type': 'container' if data['instance_type'] == 'CONTAINER' else 'virtual-machine',
'start': True,
}}, running_cb)
return await self.middleware.call('virt.instance.get_instance', data['name'])
@api_method(VirtInstanceUpdateArgs, VirtInstanceUpdateResult)
@job()
async def do_update(self, job, id, data):
"""
Update instance.
"""
await self.middleware.call('virt.global.check_initialized')
instance = await self.middleware.call('virt.instance.get_instance', id, {'extra': {'raw': True}})
verrors = ValidationErrors()
await self.validate(data, 'virt_instance_create', verrors, old=instance)
verrors.check()
instance['raw']['config'].update(self.__data_to_config(data))
await incus_call_and_wait(f'1.0/instances/{id}', 'put', {'json': instance['raw']})
return await self.middleware.call('virt.instance.get_instance', id)
@api_method(VirtInstanceDeleteArgs, VirtInstanceDeleteResult)
@job()
async def do_delete(self, job, id):
"""
Delete an instance.
"""
await self.middleware.call('virt.global.check_initialized')
instance = await self.middleware.call('virt.instance.get_instance', id)
if instance['status'] == 'RUNNING':
await incus_call_and_wait(f'1.0/instances/{id}/state', 'put', {'json': {
'action': 'stop',
'timeout': -1,
'force': True,
}})
await incus_call_and_wait(f'1.0/instances/{id}', 'delete')
return True
@api_method(VirtInstanceStartArgs, VirtInstanceStartResult, roles=['VIRT_INSTANCE_WRITE'])
@job()
async def start(self, job, id):
"""
Start an instance.
"""
await self.middleware.call('virt.global.check_initialized')
await incus_call_and_wait(f'1.0/instances/{id}/state', 'put', {'json': {
'action': 'start',
}})
return True
@api_method(VirtInstanceStopArgs, VirtInstanceStopResult, roles=['VIRT_INSTANCE_WRITE'])
@job()
async def stop(self, job, id, data):
"""
Stop an instance.
Timeout is how long it should wait for the instance to shutdown cleanly.
"""
await self.middleware.call('virt.global.check_initialized')
await incus_call_and_wait(f'1.0/instances/{id}/state', 'put', {'json': {
'action': 'stop',
'timeout': data['timeout'],
'force': data['force'],
}})
return True
@api_method(VirtInstanceRestartArgs, VirtInstanceRestartResult, roles=['VIRT_INSTANCE_WRITE'])
@job()
async def restart(self, job, id, data):
"""
Restart an instance.
Timeout is how long it should wait for the instance to shutdown cleanly.
"""
await self.middleware.call('virt.global.check_initialized')
instance = await self.middleware.call('virt.instance.get_instance', id)
if instance['state'] == 'RUNNING':
await incus_call_and_wait(f'1.0/instances/{id}/state', 'put', {'json': {
'action': 'stop',
'timeout': data['timeout'],
'force': data['force'],
}})
await incus_call_and_wait(f'1.0/instances/{id}/state', 'put', {'json': {
'action': 'start',
}})
return True
| 10,298 | Python | .py | 230 | 33.556522 | 118 | 0.566853 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,066 | stats.py | truenas_middleware/src/middlewared/middlewared/plugins/virt/stats.py | import time
from middlewared.event import EventSource
from middlewared.schema import Dict, Str
from middlewared.plugins.reporting.realtime_reporting.cgroup import get_cgroup_stats
class VirtInstacesMetricsEventSource(EventSource):
ACCEPTS = Dict(
Str('id'),
)
def run_sync(self):
instance_id = self.arg['id']
while not self._cancel_sync.is_set():
netdata_metrics = None
# TODO: code duplication with reporting.realtime
# this gathers the most recent metric recorded via netdata (for all charts)
retries = 2
while retries > 0:
try:
netdata_metrics = self.middleware.call_sync('netdata.get_all_metrics')
except Exception:
retries -= 1
if retries <= 0:
raise
time.sleep(0.5)
else:
break
data = {}
if not bool(netdata_metrics):
data['error'] = True
data['errname'] = 'FAILED_TO_CONNECT'
else:
data.update(get_cgroup_stats(netdata_metrics, [instance_id])[instance_id])
data['error'] = False
data['errname'] = None
self.send_event('ADDED', fields=data)
time.sleep(2)
async def setup(middleware):
middleware.register_event_source(
'virt.instance.metrics', VirtInstacesMetricsEventSource, roles=['VIRT_INSTANCE_READ']
)
| 1,550 | Python | .py | 39 | 27.512821 | 93 | 0.567802 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,067 | attachments.py | truenas_middleware/src/middlewared/middlewared/plugins/virt/attachments.py | from typing import TYPE_CHECKING
from middlewared.common.attachment import FSAttachmentDelegate
from middlewared.common.ports import PortDelegate
if TYPE_CHECKING:
from middlewared.main import Middleware
class VirtFSAttachmentDelegate(FSAttachmentDelegate):
name = 'virt'
title = 'Virtualization'
async def query(self, path, enabled, options=None):
config = await self.middleware.call('virt.global.config')
instances = []
for i in await self.middleware.call('virt.instance.query'):
append = False
if path != f'/mnt/{config["pool"]}':
for device in await self.middleware.call('virt.instance.device_list', i['id']):
if device['dev_type'] != 'DISK':
continue
if device['source'] is None:
continue
if await self.middleware.call('filesystem.is_child', device['source'], path):
append = True
break
else:
append = True
if append:
instances.append({
'id': i['id'],
'name': i['name'],
})
return instances
async def delete(self, attachments):
if attachments:
job = await self.middleware.call('virt.global.update', {'pool': ''})
await job.wait(raise_error=True)
async def toggle(self, attachments, enabled):
for attachment in attachments:
action = 'start' if enabled else 'stop'
try:
job = await self.middleware.call(f'virt.instance.{action}', attachment['id'])
await job.wait(raise_error=True)
except Exception as e:
self.middleware.logger.warning('Unable to %s %r: %s', action, attachment['id'], e)
async def stop(self, attachments):
await self.toggle(attachments, False)
async def start(self, attachments):
await self.toggle(attachments, True)
class VirtPortDelegate(PortDelegate):
name = 'virt devices'
namespace = 'virt.device'
title = 'Virtualization Device'
async def get_ports(self):
ports = []
for instance in await self.middleware.call('virt.instance.query'):
instance_ports = []
for device in await self.middleware.call('virt.instance.device_list', instance['id']):
if device['dev_type'] != 'PROXY':
continue
instance_ports.append(('0.0.0.0', device['source_port']))
instance_ports.append(('::', device['source_port']))
if instance_ports:
ports.append({
'description': f'{instance["id"]!r} instance',
'ports': instance_ports,
})
return ports
async def setup(middleware: 'Middleware'):
middleware.create_task(
middleware.call(
'pool.dataset.register_attachment_delegate',
VirtFSAttachmentDelegate(middleware),
)
)
await middleware.call('port.register_attachment_delegate', VirtPortDelegate(middleware))
| 3,198 | Python | .py | 73 | 31.90411 | 98 | 0.581537 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,068 | client.py | truenas_middleware/src/middlewared/middlewared/plugins/apps_images/client.py | import aiohttp
import aiohttp.client_exceptions
import async_timeout
import asyncio
import urllib.parse
from middlewared.service import CallError
from .utils import (
DEFAULT_DOCKER_REGISTRY, DOCKER_AUTH_SERVICE, DOCKER_AUTH_HEADER, DOCKER_AUTH_URL, DOCKER_CONTENT_DIGEST_HEADER,
DOCKER_MANIFEST_LIST_SCHEMA_V2, DOCKER_MANIFEST_SCHEMA_V1, DOCKER_MANIFEST_SCHEMA_V2, DOCKER_RATELIMIT_URL,
parse_auth_header, parse_digest_from_schema,
)
class ContainerRegistryClientMixin:
@staticmethod
async def _api_call(url, options=None, headers=None, mode='get'):
options = options or {}
timeout = options.get('timeout', 15)
assert mode in ('get', 'head')
response = {'error': None, 'response': {}, 'response_obj': None}
try:
async with async_timeout.timeout(timeout):
async with aiohttp.ClientSession(
raise_for_status=True, trust_env=True,
) as session:
req = await getattr(session, mode)(url, headers=headers)
except asyncio.TimeoutError:
response['error'] = f'Unable to connect with {url} in {timeout} seconds.'
except aiohttp.ClientResponseError as e:
response['error'] = str(e)
else:
response['response_obj'] = req
if req.status != 200:
response['error'] = f'Received response code {req.status}' + (
f' ({req.content})' if req.content else ''
)
else:
try:
response['response'] = await req.json()
except aiohttp.client_exceptions.ContentTypeError as e:
# quay.io registry returns malformed content type header which aiohttp fails to parse
# even though the content returned by registry is valid json
response['error'] = f'Unable to parse response: {e}'
except asyncio.TimeoutError:
response['error'] = 'Timed out waiting for a response'
return response
async def _get_token(self, scope, auth_url=DOCKER_AUTH_URL, service=DOCKER_AUTH_SERVICE):
query_params = urllib.parse.urlencode({
'service': service,
'scope': scope,
})
response = await self._api_call(f'{auth_url}?{query_params}')
if response['error']:
raise CallError(f'Unable to retrieve token for {scope!r}: {response["error"]}')
return response['response']['token']
async def _get_manifest_response(self, registry, image, tag, headers, mode, raise_error):
manifest_url = f'https://{registry}/v2/{image}/manifests/{tag}'
# 1) try getting manifest
response = await self._api_call(manifest_url, headers=headers, mode=mode)
if (error := response['error']) and isinstance(error, aiohttp.ClientResponseError):
if error.status == 401:
# 2) try to get token from manifest api call's response headers
auth_data = parse_auth_header(error.headers[DOCKER_AUTH_HEADER])
headers['Authorization'] = f'Bearer {await self._get_token(**auth_data)}'
# 3) Redo the manifest call with updated token
response = await self._api_call(manifest_url, headers=headers, mode=mode)
if raise_error and response['error']:
raise CallError(
f'Unable to retrieve latest image digest for registry={registry} '
f'image={image} tag={tag}: {response["error"]}'
)
return response
async def get_manifest_call_headers(self, registry, image, headers):
if registry == DEFAULT_DOCKER_REGISTRY:
headers['Authorization'] = f'Bearer {await self._get_token(scope=f"repository:{image}:pull")}'
return headers
async def _get_repo_digest(self, registry, image, tag):
response = await self._get_manifest_response(
registry, image, tag, await self.get_manifest_call_headers(registry, image, {
'Accept': (f'{DOCKER_MANIFEST_SCHEMA_V2}, '
f'{DOCKER_MANIFEST_LIST_SCHEMA_V2}, '
f'{DOCKER_MANIFEST_SCHEMA_V1}')
}), 'get', True
)
digests = parse_digest_from_schema(response)
digests.append(response['response_obj'].headers.get(DOCKER_CONTENT_DIGEST_HEADER))
return digests
async def get_docker_hub_rate_limit_preview(self):
return await self._api_call(
url=DOCKER_RATELIMIT_URL,
headers={'Authorization': f'Bearer {await self._get_token(scope="repository:ratelimitpreview/test:pull")}'},
mode='head'
)
| 4,747 | Python | .py | 91 | 40.67033 | 120 | 0.61512 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,069 | update_alerts.py | truenas_middleware/src/middlewared/middlewared/plugins/apps_images/update_alerts.py | import contextlib
import logging
from collections import defaultdict
from middlewared.service import CallError, Service
from .client import ContainerRegistryClientMixin
from .utils import normalize_reference
logger = logging.getLogger('docker_image')
class ContainerImagesService(Service, ContainerRegistryClientMixin):
class Config:
namespace = 'app.image.op'
private = True
IMAGE_CACHE = defaultdict(lambda: False)
async def get_update_cache(self, normalized=False):
return {
normalize_reference(i)['complete_tag']: v for i, v in self.IMAGE_CACHE.items()
} if normalized else self.IMAGE_CACHE
def normalize_reference(self, reference: str) -> dict:
return normalize_reference(reference=reference)
async def check_update(self):
images = await self.middleware.call('app.image.query')
for image in images:
for tag in image['repo_tags']:
try:
await self.check_update_for_image(tag, image)
except CallError as e:
logger.error(str(e))
async def retrieve_digest(self, reference: str):
repo_digests = []
parsed_reference = self.normalize_reference(reference=reference)
with contextlib.suppress(CallError):
repo_digests = await self._get_repo_digest(
parsed_reference['registry'],
parsed_reference['image'],
parsed_reference['tag'],
)
return repo_digests
async def check_update_for_image(self, tag, image_details):
if not image_details['dangling']:
parsed_reference = self.normalize_reference(tag)
self.IMAGE_CACHE[tag] = await self.compare_id_digests(
image_details,
parsed_reference['registry'],
parsed_reference['image'],
parsed_reference['tag']
)
async def clear_update_flag_for_tag(self, tag):
self.IMAGE_CACHE[tag] = False
async def compare_id_digests(self, image_details, registry, image_str, tag_str):
"""
Returns whether an update is available for an image.
"""
digest = await self._get_repo_digest(registry, image_str, tag_str)
return not any(
digest.split('@', 1)[-1] == upstream_digest
for upstream_digest in digest
for digest in image_details['repo_digests']
) if image_details['repo_digests'] else False
async def remove_from_cache(self, image):
for tag in image['repo_tags']:
self.IMAGE_CACHE.pop(tag, None)
| 2,648 | Python | .py | 60 | 34.25 | 90 | 0.635939 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,070 | images.py | truenas_middleware/src/middlewared/middlewared/plugins/apps_images/images.py | from middlewared.plugins.apps.ix_apps.docker.images import delete_image, list_images, pull_image
from middlewared.schema import accepts, Bool, Dict, Int, List, returns, Str
from middlewared.service import CRUDService, filterable, job
from middlewared.utils import filter_list
from .utils import parse_tags
class AppImageService(CRUDService):
class Config:
cli_namespace = 'app.image'
namespace = 'app.image'
role_prefix = 'APPS'
ENTRY = Dict(
'app_image_entry',
Str('id'),
List('repo_tags', items=[Str('repo_tag')]),
List('repo_digests', items=[Str('repo_digest')]),
Int('size'),
Bool('dangling'),
Bool('update_available'),
Str('created'),
Str('author'),
Str('comment'),
List(
'parsed_repo_tags', items=[Dict(
'parsed_repo_tag',
Str('image'),
Str('tag'),
Str('registry'),
Str('complete_tag'),
additional_attrs=True,
)]
),
additional_attrs=True,
)
@filterable
def query(self, filters, options):
"""
Query all docker images with `query-filters` and `query-options`.
`query-options.extra.parse_tags` is a boolean which when set will have normalized tags to be retrieved.
"""
if not self.middleware.call_sync('docker.state.validate', False):
return filter_list([], filters, options)
update_cache = self.middleware.call_sync('app.image.op.get_update_cache')
parse_all_tags = options['extra'].get('parse_tags')
images = []
for image in list_images():
config = {
k if isinstance(k, str) else k[0]: image.get(k) if isinstance(k, str) else image.get(*k) for k in (
'id', ('repo_tags', []), ('repo_digests', []), 'size', 'created', 'author', 'comment',
)
}
config.update({
'dangling': len(config['repo_tags']) == 1 and config['repo_tags'][0] == '<none>:<none>',
'update_available': any(update_cache[r] for r in config['repo_tags']),
})
if parse_all_tags:
config['parsed_repo_tags'] = parse_tags(config['repo_tags'])
images.append(config)
return filter_list(images, filters, options)
@accepts(
Dict(
'image_pull',
Dict(
'auth_config',
Str('username', required=True),
Str('password', required=True, max_length=4096),
default=None,
null=True,
),
Str('image', required=True),
), roles=['APPS_WRITE']
)
@returns()
@job()
def pull(self, job, data):
"""
`image` is the name of the image to pull. Format for the name is "registry/repo/image:v1.2.3" where
registry may be omitted and it will default to docker registry in this case. It can or cannot contain
the tag - this will be passed as is to docker so this should be analogous to what `docker pull` expects.
`auth_config` should be specified if image to be retrieved is under a private repository.
"""
def callback(entry):
nonlocal job
# Just having some sanity checks in place in case we come across some weird registry
if not isinstance(entry, dict) or any(
k not in entry for k in ('progressDetail', 'status')
) or entry['status'].lower().strip() not in ('pull complete', 'downloading'):
return
if entry['status'].lower().strip() == 'pull complete':
job.set_progress(95, 'Image downloaded, doing post processing')
return
progress = entry['progressDetail']
if not isinstance(progress, dict) or any(
k not in progress for k in ('current', 'total')
) or progress['current'] > progress['total']:
return
job.set_progress((progress['current']/progress['total']) * 90, 'Pulling image')
self.middleware.call_sync('docker.state.validate')
auth_config = data['auth_config'] or {}
image_tag = data['image']
pull_image(image_tag, callback, auth_config.get('username'), auth_config.get('password'))
job.set_progress(100, f'{image_tag!r} image pulled successfully')
@accepts(
Str('image_id'),
Dict(
'options',
Bool('force', default=False),
)
)
def do_delete(self, image_id, options):
"""
Delete docker image `image_id`.
`options.force` when set will force delete the image regardless of the state of containers and should
be used cautiously.
"""
self.middleware.call_sync('docker.state.validate')
image = self.get_instance__sync(image_id)
delete_image(image_id, options['force'])
self.middleware.call_sync('app.image.op.remove_from_cache', image)
return True
| 5,137 | Python | .py | 119 | 32.478992 | 115 | 0.573485 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,071 | utils.py | truenas_middleware/src/middlewared/middlewared/plugins/apps_images/utils.py | import re
from collections import defaultdict
from middlewared.service import CallError
# Default values
DEFAULT_DOCKER_REGISTRY = 'registry-1.docker.io'
DEFAULT_DOCKER_REPO = 'library'
DEFAULT_DOCKER_TAG = 'latest'
DOCKER_CONTENT_DIGEST_HEADER = 'Docker-Content-Digest'
# Taken from OCI: https://github.com/opencontainers/go-digest/blob/master/digest.go#L63
DIGEST_RE = r'[a-z0-9]+(?:[.+_-])*:[a-zA-Z0-9=_-]+'
DOCKER_AUTH_HEADER = 'WWW-Authenticate'
DOCKER_AUTH_URL = 'https://auth.docker.io/token'
DOCKER_AUTH_SERVICE = 'registry.docker.io'
DOCKER_MANIFEST_SCHEMA_V1 = 'application/vnd.docker.distribution.manifest.v1+json'
DOCKER_MANIFEST_SCHEMA_V2 = 'application/vnd.docker.distribution.manifest.v2+json'
DOCKER_MANIFEST_LIST_SCHEMA_V2 = 'application/vnd.docker.distribution.manifest.list.v2+json'
DOCKER_RATELIMIT_URL = 'https://registry-1.docker.io/v2/ratelimitpreview/test/manifests/latest'
def parse_digest_from_schema(response: dict) -> list[str]:
"""
Parses out the digest according to schemas specs:
https://docs.docker.com/registry/spec/manifest-v2-1/
"""
media_type = response['response']['mediaType']
if media_type == DOCKER_MANIFEST_SCHEMA_V2:
digest_value = response['response']['config']['digest']
return [digest_value] if isinstance(digest_value, str) else digest_value
elif media_type == DOCKER_MANIFEST_LIST_SCHEMA_V2:
if manifests := response['response']['manifests']:
return [digest['digest'] for digest in manifests]
return []
def parse_auth_header(header: str) -> dict[str, str]:
"""
Parses header in format below:
'Bearer realm="https://ghcr.io/token",service="ghcr.io",scope="redis:pull"'
Returns:
{
'auth_url': 'https://ghcr.io/token',
'service': 'ghcr.io',
'scope': 'redis:pull'
}
"""
adapter = {
'realm': 'auth_url',
'service': 'service',
'scope': 'scope',
}
results = {}
parts = header.split()
if len(parts) > 1:
for part in parts[1].split(','):
key_value = part.split('=')
if len(key_value) == 2 and key_value[0] in adapter:
results[adapter[key_value[0]]] = key_value[1].strip('"')
return results
def normalize_reference(reference: str) -> dict:
"""
Parses the reference for image, tag and repository.
Most of the logic has been used from docker engine to make sure we follow the same rules/practices
for normalising the image name / tag
"""
# This needs to be done as containerd automatically adds docker.io as a registery which can't be queried by us
# when checking for update alerts as registry-1.docker.io is the one used to actually query that information
reference = reference.removeprefix('docker.io/')
registry_idx = reference.find('/')
if registry_idx == -1 or (not any(c in reference[:registry_idx] for c in ('.', ':')) and reference[:registry_idx] != 'localhost'):
registry, tagged_image = DEFAULT_DOCKER_REGISTRY, reference
else:
registry, tagged_image = reference[:registry_idx], reference[registry_idx + 1:]
if '/' not in tagged_image:
tagged_image = f'{DEFAULT_DOCKER_REPO}/{tagged_image}'
# if image is not tagged, use default value.
if ':' not in tagged_image:
tagged_image += f':{DEFAULT_DOCKER_TAG}'
# At this point, tag should be included already – we just need to see whether this
# tag is named or digested and respond accordingly.
ref_is_digest = False
if '@' in tagged_image:
matches = re.findall(DIGEST_RE, tagged_image)
if not matches:
raise CallError(f'Invalid reference format: {tagged_image}')
tag = matches[-1]
tag_pos = tagged_image.find(tag)
image = tagged_image[:tag_pos - 1].rsplit(':', 1)[0]
sep = '@'
ref_is_digest = True
elif ':' in tagged_image:
image, tag = tagged_image.rsplit(':', 1)
sep = ':'
return {
'reference': reference,
'image': image,
'tag': tag,
'registry': registry,
'complete_tag': f'{registry}/{image}{sep}{tag}',
'reference_is_digest': ref_is_digest,
}
def get_chart_releases_consuming_image(
image_names: list | set, chart_releases: list, get_mapping: bool = False
) -> dict | list:
chart_releases_consuming_image = defaultdict(list) if get_mapping else set()
images = {i['complete_tag']: i for i in map(normalize_reference, image_names)}
for chart_release in chart_releases:
for image in chart_release['resources']['container_images']:
parsed_image = normalize_reference(image)
if parsed_image['complete_tag'] in images and images[
parsed_image['complete_tag']
]['tag'] == parsed_image['tag']:
if get_mapping:
chart_releases_consuming_image[chart_release['name']].append(parsed_image['reference'])
else:
chart_releases_consuming_image.add(chart_release['name'])
return chart_releases_consuming_image if get_mapping else list(chart_releases_consuming_image)
def parse_tags(references: list[str]) -> list[dict[str, str]]:
return [normalize_reference(reference=reference) for reference in references]
def normalize_docker_limits_header(headers: dict) -> dict:
if not all(limit_key in headers for limit_key in ['ratelimit-limit', 'ratelimit-remaining']):
return {'error': 'Unable to retrieve rate limit information from registry'}
total_pull_limit, total_time_limit = headers['ratelimit-limit'].split(';w=')
remaining_pull_limit, remaining_time_limit = headers['ratelimit-remaining'].split(';w=')
return {
'total_pull_limit': int(total_pull_limit),
'total_time_limit_in_secs': int(total_time_limit),
'remaining_pull_limit': int(remaining_pull_limit),
'remaining_time_limit_in_secs': int(remaining_time_limit),
'error': None,
}
| 6,049 | Python | .py | 126 | 41.166667 | 134 | 0.66039 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,072 | dockerhub_ratelimit.py | truenas_middleware/src/middlewared/middlewared/plugins/apps_images/dockerhub_ratelimit.py | from middlewared.schema import Dict, Int, returns, Str
from middlewared.service import accepts, Service
from .client import ContainerRegistryClientMixin
from .utils import normalize_docker_limits_header
class ContainerImagesService(Service):
class Config:
namespace = 'app.image'
@accepts(roles=['APPS_READ'])
@returns(Dict(
Int('total_pull_limit', null=True, description='Total pull limit for Docker Hub registry'),
Int(
'total_time_limit_in_secs', null=True,
description='Total time limit in seconds for Docker Hub registry before the limit renews'
),
Int('remaining_pull_limit', null=True, description='Remaining pull limit for Docker Hub registry'),
Int(
'remaining_time_limit_in_secs', null=True,
description='Remaining time limit in seconds for Docker Hub registry for the '
'current pull limit to be renewed'
),
Str('error', null=True),
))
async def dockerhub_rate_limit(self):
"""
Returns the current rate limit information for Docker Hub registry.
Please refer to https://docs.docker.com/docker-hub/download-rate-limit/ for more information.
"""
limits_header = await ContainerRegistryClientMixin().get_docker_hub_rate_limit_preview()
if limits_header.get('response_obj') and hasattr(limits_header['response_obj'], 'headers'):
return normalize_docker_limits_header(limits_header['response_obj'].headers)
return {
'error': 'Unable to retrieve rate limit information from registry',
}
| 1,644 | Python | .py | 33 | 41.181818 | 107 | 0.676856 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,073 | wait_on_disks.py | truenas_middleware/src/middlewared/middlewared/scripts/wait_on_disks.py | #!/usr/bin/python3
from collections import deque
from time import time, sleep
from sys import exit
from pyudev import Context, Monitor, MonitorObserver
DQ = deque(maxlen=2)
def callback(dev):
if uuid := dev.get('ID_PART_ENTRY_UUID'):
DQ.append({'time': time(), 'name': dev.sys_name, 'uuid': uuid, 'action': dev.action})
def get_observer():
ctx = Context()
mon = Monitor.from_netlink(ctx)
mon.filter_by(subsystem='block')
return MonitorObserver(mon, callback=callback)
def main(max_wait=600.0, interval=5.0):
"""
`max_wait`: float representing the total time (in seconds) we should block
and wait for disk events.
`interval`: float representing the time we sleep between each iteration to
allow new disk events to come in.
"""
obs = get_observer()
obs.start() # start background thread
last_event = dict()
while max_wait > 0:
max_wait -= round(interval, 2)
sleep(interval)
try:
event = DQ[-1]
except IndexError:
# no events received
break
else:
if event == last_event:
# we've waited 5 seconds and no more events have come in
break
else:
last_event = event
obs.send_stop() # clean up background thread (non-blocking)
if __name__ == '__main__':
try:
main()
finally:
exit(0) # always exit success (for now)
| 1,473 | Python | .py | 44 | 26.431818 | 93 | 0.616254 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,074 | setup_cgroups.py | truenas_middleware/src/middlewared/middlewared/scripts/setup_cgroups.py | #!/usr/bin/python3
import contextlib
import os
CGROUP_ROOT_PATH = '/sys/fs/cgroup'
CGROUP_AVAILABLE_CONTROLLERS_PATH = os.path.join(CGROUP_ROOT_PATH, 'cgroup.subtree_control')
def get_available_controllers_for_consumption() -> set:
try:
with open(CGROUP_AVAILABLE_CONTROLLERS_PATH, 'r') as f:
return set(f.read().split())
except FileNotFoundError:
raise Exception(
'Unable to determine cgroup controllers which are available for consumption as '
f'{CGROUP_AVAILABLE_CONTROLLERS_PATH!r} does not exist'
)
def update_available_controllers_for_consumption(to_add_controllers: set) -> set:
# This will try to update available controllers for consumption and return the current state
# regardless of the update failing
with contextlib.suppress(FileNotFoundError, OSError):
with open(CGROUP_AVAILABLE_CONTROLLERS_PATH, 'w') as f:
f.write(f'{" ".join(map(lambda s: f"+{s}", to_add_controllers))}')
return get_available_controllers_for_consumption()
def main():
# Logic copied over from kubernetes
# https://github.com/kubernetes/kubernetes/blob/08fbe92fa76d35048b4b4891b41fc6912e689cc7/
# pkg/kubelet/cm/cgroup_manager_linux.go#L238
# FIXME: See if this is now required for docker
supported_controllers = {'cpu', 'cpuset', 'memory', 'hugetlb', 'pids'}
system_supported_controllers_path = os.path.join(CGROUP_ROOT_PATH, 'cgroup.controllers')
try:
with open(system_supported_controllers_path, 'r') as f:
available_controllers = set(f.read().split())
except FileNotFoundError:
raise Exception(
'Unable to determine available cgroup controllers as '
f'{system_supported_controllers_path!r} does not exist'
)
# What we are doing here is that we get controllers which are supported and required by k8s to function
# then we check if they are available for consumption, if not we try to add them to subtree control
# and then do a final check to confirm they have been added as desired
needed_controllers = supported_controllers & available_controllers
available_controllers_for_consumption = get_available_controllers_for_consumption()
if missing_controllers := needed_controllers - available_controllers_for_consumption:
# If we have missing controllers, lets try adding them to subtree control
available_controllers_for_consumption = update_available_controllers_for_consumption(missing_controllers)
missing_controllers = needed_controllers - available_controllers_for_consumption
if missing_controllers:
raise Exception(
f'Missing {", ".join(missing_controllers)!r} cgroup controller(s) '
'which are required for apps to function'
)
if __name__ == '__main__':
main()
| 2,859 | Python | .py | 52 | 48.173077 | 113 | 0.716792 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,075 | sedhelper.py | truenas_middleware/src/middlewared/middlewared/scripts/sedhelper.py | #!/usr/bin/env python
import argparse
from concurrent.futures import ThreadPoolExecutor
import errno
from truenas_api_client import Client, ClientException
# A particular Kioxia PM6 drive has shown us that it takes longer than
# 60 seconds to unlock/setup SED. Specifically, the "sedutil-cli --initialSetup"
# command took ~135 seconds to complete. Note the other PM6 drives took < 30 secs.
# (60 secs is default websocket timeout so bump this to 300 secs (5mins) to be safe)
TIMEOUT = 300
def setup(password, disk=None):
def sed_setup(client, disk_name, password):
rv = client.call('disk.sed_initial_setup', disk_name, password)
if rv == 'SUCCESS':
print(f'{disk_name}\t\t[\033[92mOK\x1B[0m]')
elif rv == 'SETUP_FAILED':
print(f'{disk_name}\t\t[\033[91mSETUP FAILED\x1B[0m]')
elif rv == 'LOCKING_DISABLED':
print(f'{disk_name}\t\t[\033[91mLOCKING DISABLED\x1B[0m]')
elif rv in ('ACCESS_GRANTED', 'NO_SED'):
pass
return disk_name, rv
with Client(call_timeout=TIMEOUT) as c:
disk_filter = []
if disk:
disk_filter.append(('name', '=', disk))
disks = c.call('disk.query', disk_filter, {'extra': {'passwords': True}})
boot_disks = c.call('boot.get_disks')
disks = list(filter(lambda d: d['name'] not in boot_disks, disks))
if not disks:
print(f'Disk {disk} not found')
return
global_sed_password = c.call('system.advanced.sed_global_password')
if global_sed_password != password and not (disk and disks[0]['passwd'] == password):
print('Given password does not match saved one')
return
action = False
no_sed = False
granted = False
with ThreadPoolExecutor(max_workers=12) as e:
for disk_name, rv in e.map(lambda disk: sed_setup(c, disk['name'], password), disks):
if rv == 'NO_SED':
no_sed = True
if rv in ('SUCCESS', 'SETUP_FAILED', 'LOCKING_DISABLED'):
action = True
if rv == 'ACCESS_GRANTED':
granted = True
if not action:
if no_sed and not granted:
print('No SED disks were found in the system')
else:
print('No new SED disks detected')
def unlock():
with Client(call_timeout=TIMEOUT) as c:
try:
c.call('disk.sed_unlock_all')
except ClientException as e:
if e.errno == errno.EACCES:
print('SED disks failed to unlocked')
else:
raise
else:
print('All SED disks unlocked')
def main():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(help='sub-command help', dest='action')
parser_setup = subparsers.add_parser('setup', help='Setup new SED disks')
parser_setup.add_argument('--disk', help='Perform action only on specified disk')
parser_setup.add_argument('password', help='Password to use on new disks')
subparsers.add_parser('unlock', help='Unlock SED disks')
args = parser.parse_args()
if args.action == 'setup':
setup(args.password, disk=args.disk)
elif args.action == 'unlock':
unlock()
else:
parser.print_help()
if __name__ == '__main__':
main()
| 3,412 | Python | .py | 79 | 34.075949 | 97 | 0.605072 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,076 | vendor_service.py | truenas_middleware/src/middlewared/middlewared/scripts/vendor_service.py | import hashlib
import subprocess
import sys
from middlewared.plugins.system_vendor.vendor import get_vendor
from middlewared.utils.vendor import Vendors
def load_envvars(envvars_file: str):
try:
envvars = []
with open(envvars_file, "r") as f:
for line in f:
if (line := line.strip()) and line[0] != "#":
envvars.append(line.split("=", 1))
return dict(envvars)
except (OSError, ValueError):
return dict()
def get_hostid() -> str | None:
try:
with open("/etc/hostid", "rb") as f:
return hashlib.file_digest(f, "sha256").hexdigest()
except Exception:
pass
def start_hexos_websocat():
url = "wss://api.hexos.com"
envvars_file = "/etc/default/websocat"
envvars = load_envvars(envvars_file)
systemd_opts = (
"--unit=websocat",
"--description=websocat daemon for HexOS",
"--property=Restart=always",
"--property=RestartSec=10",
"--uid=www-data",
f"--setenv=URL={url}",
*[f"--setenv={name}={value}" for name, value in envvars.items()]
)
wsocat_path = "/usr/local/libexec/wsocat"
wsocat_opts = (
"--buffer-size 1048576",
"--ping-interval 30",
"--ping-timeout 60",
"--exit-on-eof",
"--text",
)
local_server = "ws://127.0.0.1:6000/websocket"
hostid_hash = get_hostid()
if hostid_hash is None:
return
ip_output = subprocess.check_output("ip -o -4 route get 8.8.8.8", shell=True, text=True)
ip_address = ip_output.partition("src")[-1].split()[0]
remote_server = f"{url}/server/{hostid_hash}/{ip_address}"
# Start a transient service
subprocess.run([
"systemd-run",
*systemd_opts,
"/bin/bash",
"-c",
" ".join([wsocat_path, *wsocat_opts, local_server, remote_server])
])
def main():
if get_vendor() == Vendors.HEXOS:
start_hexos_websocat()
if __name__ == "__main__":
try:
main()
finally:
sys.exit(0) # Never fail
| 2,088 | Python | .py | 65 | 25.138462 | 92 | 0.583749 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,077 | configure_fips.py | truenas_middleware/src/middlewared/middlewared/scripts/configure_fips.py | #!/usr/bin/python3
import os
import shutil
import sqlite3
import subprocess
from middlewared.utils.db import query_config_table
from middlewared.utils.rootfs import ReadonlyRootfsManager
FIPS_MODULE_FILE = '/usr/lib/ssl/fipsmodule.cnf'
OPENSSL_CONFIG_FILE = '/etc/ssl/openssl.cnf'
BASE_OPENSSL_CONFIG_FILE = '/conf/base/etc/ssl/openssl.cnf'
OPENSSL_FIPS_FILE = '/etc/ssl/openssl_fips.cnf'
def validate_system_state() -> None:
for path in (FIPS_MODULE_FILE, OPENSSL_CONFIG_FILE, OPENSSL_FIPS_FILE, BASE_OPENSSL_CONFIG_FILE):
if not os.path.exists(path):
raise Exception(f'{path!r} does not exist')
def modify_openssl_config(enable_fips: bool) -> None:
shutil.copyfile(BASE_OPENSSL_CONFIG_FILE, OPENSSL_CONFIG_FILE)
if enable_fips:
with open(OPENSSL_CONFIG_FILE, 'a') as f:
f.write(f'\n.include {OPENSSL_FIPS_FILE}\n')
def configure_fips(enable_fips: bool) -> None:
if enable_fips:
subprocess.check_call([
'openssl', 'fipsinstall', '-out', FIPS_MODULE_FILE,
'-module', '/usr/lib/x86_64-linux-gnu/ossl-modules/fips.so',
], timeout=30, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
modify_openssl_config(enable_fips)
def main() -> None:
validate_system_state()
try:
security_settings = query_config_table('system_security')
except (sqlite3.OperationalError, IndexError):
# This is for the case when users are upgrading and in that case table will not exist
# so we should always disable fips as a default because users might not be able to ssh
# into the system
security_settings = {'enable_fips': False}
with ReadonlyRootfsManager('/') as readonly_rootfs:
readonly_rootfs.make_writeable()
configure_fips(security_settings['enable_fips'])
if __name__ == '__main__':
main()
| 1,869 | Python | .py | 41 | 39.902439 | 101 | 0.701435 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,078 | wait_to_hang_and_dump_core.py | truenas_middleware/src/middlewared/middlewared/scripts/wait_to_hang_and_dump_core.py | # -*- coding=utf-8 -*-
import contextlib
import logging
import os
import re
import socket
import subprocess
import time
from truenas_api_client import Client
logger = logging.getLogger(__name__)
"""The point of this script is so that the development team can ask a user
to run this script from the command line during the unfortunate event the
main middleware process "hangs". (We've had past situations where asyncio
main event loop hung up). The idea is to at least allow user to get us some
information so we can try and deduce what might be going on."""
def main():
logging.basicConfig(level=logging.INFO, format="[%(asctime)s] %(message)s")
interval = 10
logger.info("Probing middleware accessibility with %d seconds interval", interval)
while True:
try:
with Client():
pass
except socket.timeout:
logger.info("Caught timeout, dumping core")
dump_core()
break
else:
time.sleep(interval)
def dump_core():
middlewared_pid = int(
re.match(
r"MainPID=([0-9]+)",
subprocess.check_output(
"systemctl show --property MainPID middlewared".split(),
encoding="utf-8",
),
).group(1),
)
logger.info("middlewared PID: %d", middlewared_pid)
core_file = f"core.{middlewared_pid}"
with contextlib.suppress(FileNotFoundError):
os.unlink(core_file)
subprocess.run(
["gdb", "-p", str(middlewared_pid), "-batch", "-ex", "generate-core-file"],
check=True,
)
logger.info("Compressing core file %r", core_file)
subprocess.run(["gzip", core_file], check=True)
logger.info("%r is ready!", f"{core_file}.gz")
if __name__ == "__main__":
main()
| 1,802 | Python | .py | 52 | 28.269231 | 86 | 0.642693 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,079 | gdb.py | truenas_middleware/src/middlewared/middlewared/scripts/gdb.py | import argparse
import logging
import requests
import subprocess
import sys
import tempfile
class MiddlewareGDB(object):
def __init__(self):
self.logger = logging.getLogger('middlewaregdb')
def install_dbg(self):
packages = ['python3-dbg', 'python3-dev']
for p in packages:
try:
subprocess.run(['dpkg', '-L', p], capture_output=True, check=True)
except subprocess.CalledProcessError:
subprocess.run(['apt', 'install', '-y', p], check=True)
def run_gdb(self):
proc = subprocess.Popen(
['pgrep', '-o', '-f', 'middlewared'],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
try:
middlewared_pid = int(proc.communicate()[0].split()[0].strip())
except Exception:
print('Failed to find middlewared process', file=sys.stderr)
sys.exit(1)
proc = subprocess.Popen([
'gdb',
'-p', str(middlewared_pid),
'-batch',
'-ex', 'thread apply all py-list',
'-ex', 'thread apply all py-bt',
], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output = proc.communicate()[0]
outfile = tempfile.NamedTemporaryFile(delete=False)
outfile.write(output)
rv = {'local': outfile.name}
try:
r = requests.post('http://ix.io', {
'f:1': output,
})
rv['remote'] = r.text.strip()
except Exception:
pass
return rv
def main(self):
parser = argparse.ArgumentParser()
parser.parse_args()
print('Making sure debug packages are installed')
self.install_dbg()
print('Running gdb')
rv = self.run_gdb()
if 'local' in rv:
print(f'Local output: {rv["local"]}')
if 'remote' in rv:
print(f'Remote output: {rv["remote"]}')
def main():
MiddlewareGDB().main()
if __name__ == '__main__':
main()
| 2,042 | Python | .py | 60 | 24.766667 | 82 | 0.555951 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,080 | base.py | truenas_middleware/src/middlewared/middlewared/rclone/base.py | class BaseRcloneRemote:
name = NotImplemented
title = NotImplemented
buckets = False
bucket_title = "Bucket"
can_create_bucket = False
custom_list_buckets = False
readonly = False
fast_list = False
rclone_type = NotImplemented
credentials_schema = NotImplemented
credentials_oauth = False
credentials_oauth_name = None
refresh_credentials = []
task_schema = []
extra_methods = []
restic = False
def __init__(self, middleware):
self.middleware = middleware
async def create_bucket(self, credentials, name):
raise NotImplementedError
async def list_buckets(self, credentials):
raise NotImplementedError
async def validate_task_basic(self, task, credentials, verrors):
pass
async def validate_task_full(self, task, credentials, verrors):
pass
async def get_credentials_extra(self, credentials):
return {}
async def get_task_extra(self, task):
return {}
async def get_task_extra_args(self, task):
return []
async def cleanup(self, task, config):
pass
def get_restic_config(self, task):
raise NotImplementedError
| 1,210 | Python | .py | 37 | 26.243243 | 68 | 0.678788 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,081 | sftp.py | truenas_middleware/src/middlewared/middlewared/rclone/remote/sftp.py | import os
import tempfile
from middlewared.rclone.base import BaseRcloneRemote
from middlewared.schema import Int, Str
class SFTPRcloneRemote(BaseRcloneRemote):
name = "SFTP"
title = "SFTP"
rclone_type = "sftp"
credentials_schema = [
Str("host", title="Host", required=True),
Int("port", title="Port"),
Str("user", title="Username", required=True),
Str("pass", title="Password"),
Int("private_key", title="Private Key ID"),
]
async def get_credentials_extra(self, credentials):
result = {}
if "private_key" in credentials["attributes"]:
with tempfile.NamedTemporaryFile(mode="w+", delete=False) as tmp_file:
tmp_file.write((await self.middleware.call("keychaincredential.get_of_type",
credentials["attributes"]["private_key"],
"SSH_KEY_PAIR"))["attributes"]["private_key"])
result["key_file"] = tmp_file.name
return result
async def cleanup(self, task, config):
if "private_key" in task["credentials"]["attributes"]:
os.unlink(config["key_file"])
| 1,231 | Python | .py | 27 | 34.074074 | 105 | 0.585427 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,082 | hubic.py | truenas_middleware/src/middlewared/middlewared/rclone/remote/hubic.py | from middlewared.rclone.base import BaseRcloneRemote
from middlewared.schema import Str
class HubicRcloneRemote(BaseRcloneRemote):
name = "HUBIC"
title = "Hubic"
buckets = True
bucket_title = "Container"
fast_list = True
rclone_type = "hubic"
credentials_schema = [
Str("token", title="Access Token", required=True, max_length=None),
]
| 382 | Python | .py | 12 | 27 | 75 | 0.708791 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,083 | azureblob.py | truenas_middleware/src/middlewared/middlewared/rclone/remote/azureblob.py | import re
from middlewared.rclone.base import BaseRcloneRemote
from middlewared.schema import Str
from middlewared.validators import Match, URL
class AzureBlobRcloneRemote(BaseRcloneRemote):
name = "AZUREBLOB"
title = "Microsoft Azure Blob Storage"
buckets = True
bucket_title = "Container"
fast_list = True
rclone_type = "azureblob"
credentials_schema = [
Str("account", title="Account Name", required=True, validators=[
Match(r"^[a-z0-9\-.]+$", re.IGNORECASE,
"Account Name field can only contain alphanumeric characters, - and .")
]),
Str("key", title="Account Key", required=True),
Str("endpoint", title="Endpoint", default="", validators=[URL(empty=True)]),
]
async def get_task_extra(self, task):
return {"chunk_size": "100Mi"}
| 848 | Python | .py | 21 | 33.952381 | 89 | 0.667888 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,084 | google_drive.py | truenas_middleware/src/middlewared/middlewared/rclone/remote/google_drive.py | import textwrap
from middlewared.rclone.base import BaseRcloneRemote
from middlewared.schema import Bool, Password, Str
class GoogleDriveRcloneRemote(BaseRcloneRemote):
name = "GOOGLE_DRIVE"
title = "Google Drive"
fast_list = True
rclone_type = "drive"
credentials_schema = [
Str("client_id", title="OAuth Client ID", default=""),
Password("client_secret", title="OAuth Client Secret", default=""),
Password("token", title="Access Token", required=True, max_length=None),
Str("team_drive", title="Team Drive ID (if connecting to Team Drive)"),
]
credentials_oauth = True
refresh_credentials = ["token"]
task_schema = [
Bool("acknowledge_abuse",
title="Allow files which return cannotDownloadAbusiveFile to be downloaded.",
description=textwrap.dedent("""\
If downloading a file returns the error "This file has been identified as malware or spam and cannot be
downloaded" with the error code "cannotDownloadAbusiveFile" then enable this flag to indicate you
acknowledge the risks of downloading the file and TrueNAS will download it anyway.
"""), default=False),
]
async def get_credentials_extra(self, credentials):
if credentials["attributes"].get("team_drive"):
return dict()
return dict(root_folder_id="root")
| 1,417 | Python | .py | 29 | 40.862069 | 119 | 0.678028 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,085 | googlephotos.py | truenas_middleware/src/middlewared/middlewared/rclone/remote/googlephotos.py | import os
from middlewared.rclone.base import BaseRcloneRemote
from middlewared.schema import Str
class GooglePhotosRcloneRemote(BaseRcloneRemote):
name = "GOOGLE_PHOTOS"
title = "Google Photos"
rclone_type = "googlephotos"
credentials_schema = [
Str("client_id", title="OAuth Client ID", default=""),
Str("client_secret", title="OAuth Client Secret", default=""),
Str("token", title="Access Token", required=True, max_length=None),
]
refresh_credentials = ["token"]
async def validate_task_full(self, task, credentials, verrors):
# `/media/by-day` contains a huge tree of empty directories for all days starting from 2000-01-01. Listing
# them all will never complete due to the API rate limits.
folder = task["attributes"]["folder"].strip("/")
if not folder:
verrors.add(
"attributes.folder",
"Pulling from the root directory is not allowed. Please, select a specific directory."
)
return
folder = os.path.normpath(folder)
for prohibited in ["media", "media/by-day"]:
if folder == prohibited:
verrors.add(
"attributes.folder",
f"Pulling from the {prohibited} directory is not allowed. Please, select a specific directory."
)
return
| 1,410 | Python | .py | 31 | 35.580645 | 115 | 0.624362 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,086 | mega.py | truenas_middleware/src/middlewared/middlewared/rclone/remote/mega.py | from middlewared.rclone.base import BaseRcloneRemote
from middlewared.schema import Str
class MegaRcloneRemote(BaseRcloneRemote):
name = "MEGA"
title = "Mega"
rclone_type = "mega"
credentials_schema = [
Str("user", title="Username", required=True),
Str("pass", title="Password", required=True),
]
| 337 | Python | .py | 10 | 28.7 | 53 | 0.69969 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,087 | s3.py | truenas_middleware/src/middlewared/middlewared/rclone/remote/s3.py | import textwrap
import boto3
from botocore.client import Config
from middlewared.rclone.base import BaseRcloneRemote
from middlewared.schema import Bool, Int, Password, Str
from middlewared.utils.lang import undefined
class S3RcloneRemote(BaseRcloneRemote):
name = "S3"
title = "Amazon S3"
buckets = True
fast_list = True
rclone_type = "s3"
credentials_schema = [
Str("access_key_id", title="Access Key ID", required=True),
Password("secret_access_key", title="Secret Access Key", required=True),
Str("endpoint", title="Endpoint URL", default=""),
Str("region", title="Region", default=""),
Bool("skip_region", title="Endpoint does not support regions", default=False),
Bool("signatures_v2", title="Use v2 signatures", default=False),
Int("max_upload_parts", title="Maximum number of parts in a multipart upload", description=textwrap.dedent("""\
This option defines the maximum number of multipart chunks to use when doing a multipart upload.
This can be useful if a service does not support the AWS S3 specification of 10,000 chunks (e.g. Scaleway).
"""), default=10000),
]
task_schema = [
Str("region", title="Region", default=""),
Str("encryption", title="Server-Side Encryption", enum=[None, "AES256"], default=None, null=True),
Str("storage_class", title="The storage class to use", enum=["", "STANDARD", "REDUCED_REDUNDANCY",
"STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING",
"GLACIER", "GLACIER_IR", "DEEP_ARCHIVE"]),
]
def _get_client(self, credentials):
config = None
if credentials["attributes"].get("signatures_v2", False):
config = Config(signature_version="s3")
client = boto3.client(
"s3",
config=config,
endpoint_url=credentials["attributes"].get("endpoint", "").strip() or None,
region_name=credentials["attributes"].get("region", "").strip() or None,
aws_access_key_id=credentials["attributes"]["access_key_id"],
aws_secret_access_key=credentials["attributes"]["secret_access_key"],
)
return client
async def validate_task_basic(self, task, credentials, verrors):
if task["attributes"]["encryption"] not in (None, "", "AES256"):
verrors.add("encryption", 'Encryption should be null or "AES256"')
if not credentials["attributes"].get("skip_region", False):
if not credentials["attributes"].get("region", "").strip():
response = await self.middleware.run_in_thread(
self._get_client(credentials).get_bucket_location, Bucket=task["attributes"]["bucket"]
)
task["attributes"]["region"] = response["LocationConstraint"] or "us-east-1"
async def get_credentials_extra(self, credentials):
result = {}
if (credentials["attributes"].get("endpoint") or "").rstrip("/").endswith(".scw.cloud"):
if credentials["attributes"].get("max_upload_parts", 10000) == 10000:
result["max_upload_parts"] = 1000
return result
async def get_task_extra(self, task):
result = dict(
encryption=undefined,
server_side_encryption=task["attributes"].get("encryption") or "",
skip_region=undefined,
signatures_v2=undefined,
provider="Other",
)
if not task["credentials"]["attributes"].get("skip_region", False):
if task["credentials"]["attributes"].get("region", "").strip():
if not (task["attributes"].get("region") or "").strip():
result["region"] = task["credentials"]["attributes"]["region"]
else:
# Some legacy tasks have region=None, it's easier to fix it here than in migration
result["region"] = task["attributes"].get("region") or "us-east-1"
else:
if task["credentials"]["attributes"].get("signatures_v2", False):
result["region"] = "other-v2-signature"
else:
result["region"] = ""
return result
def get_restic_config(self, task):
url = task["credentials"]["attributes"].get("endpoint", "").rstrip("/")
if not url:
url = "s3.amazonaws.com"
env = {
"AWS_ACCESS_KEY_ID": task["credentials"]["attributes"]["access_key_id"],
"AWS_SECRET_ACCESS_KEY": task["credentials"]["attributes"]["secret_access_key"],
}
return url, env
| 4,754 | Python | .py | 89 | 41.88764 | 120 | 0.594356 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,088 | yandex.py | truenas_middleware/src/middlewared/middlewared/rclone/remote/yandex.py | from middlewared.rclone.base import BaseRcloneRemote
from middlewared.schema import Password, Str
class YandexRcloneRemote(BaseRcloneRemote):
name = "YANDEX"
title = "Yandex"
fast_list = True
rclone_type = "yandex"
credentials_schema = [
Str("client_id", title="OAuth Client ID", default=""),
Password("client_secret", title="OAuth Client Secret", default=""),
Str("token", title="Access Token", required=True, max_length=None),
]
credentials_oauth = True
refresh_credentials = ["token"]
| 549 | Python | .py | 14 | 33.857143 | 75 | 0.692453 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,089 | box.py | truenas_middleware/src/middlewared/middlewared/rclone/remote/box.py | from middlewared.rclone.base import BaseRcloneRemote
from middlewared.schema import Password, Str
class BoxRcloneRemote(BaseRcloneRemote):
name = "BOX"
title = "Box"
rclone_type = "box"
credentials_schema = [
Str("client_id", title="OAuth Client ID", default=""),
Password("client_secret", title="OAuth Client Secret", default=""),
Password("token", title="Access Token", required=True, max_length=None),
]
credentials_oauth = True
refresh_credentials = ["token"]
| 520 | Python | .py | 13 | 34.692308 | 80 | 0.691849 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,090 | ftp.py | truenas_middleware/src/middlewared/middlewared/rclone/remote/ftp.py | from middlewared.rclone.base import BaseRcloneRemote
from middlewared.schema import Int, Str
class FTPRcloneRemote(BaseRcloneRemote):
name = "FTP"
title = "FTP"
rclone_type = "ftp"
credentials_schema = [
Str("host", title="Host", required=True),
Int("port", title="Port"),
Str("user", title="Username", required=True),
Str("pass", title="Password", required=True),
]
| 423 | Python | .py | 12 | 29.583333 | 53 | 0.660934 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,091 | dropbox.py | truenas_middleware/src/middlewared/middlewared/rclone/remote/dropbox.py | import textwrap
from middlewared.rclone.base import BaseRcloneRemote
from middlewared.schema import Int, Password, Str
from middlewared.validators import Range
class DropboxRcloneRemote(BaseRcloneRemote):
name = "DROPBOX"
title = "Dropbox"
rclone_type = "dropbox"
credentials_schema = [
Str("client_id", title="OAuth Client ID", default=""),
Password("client_secret", title="OAuth Client Secret", default=""),
Password("token", title="Access Token", required=True, max_length=None),
]
credentials_oauth = True
task_schema = [
Int("chunk_size", title="Upload chunk size (in megabytes)", description=textwrap.dedent("""\
Upload chunk size. Must fit in memory. Note that these chunks are buffered in memory and there might be a
maximum of «--transfers» chunks in progress at once. Dropbox Business accounts can have monthly data
transfer limits per team per month. By using larger chnuk sizes you will decrease the number of data
transfer calls used and you'll be able to transfer more data to your Dropbox Business account.
"""), default=48, validators=[Range(min_=5, max_=149)]),
]
async def get_task_extra(self, task):
return {"chunk_size": str(task["attributes"].get("chunk_size", 48)) + "M"}
| 1,335 | Python | .py | 24 | 48.75 | 117 | 0.695084 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,092 | swift.py | truenas_middleware/src/middlewared/middlewared/rclone/remote/swift.py | from middlewared.rclone.base import BaseRcloneRemote
from middlewared.schema import Password, Str, Int
class OpenStackSwiftRcloneRemote(BaseRcloneRemote):
name = "OPENSTACK_SWIFT"
title = "OpenStack Swift"
buckets = True
bucket_title = "Container"
fast_list = True
rclone_type = "swift"
credentials_schema = [
Str("user", required=True,
title="User name (OS_USERNAME)"),
Password("key", required=True,
title="API key or password (OS_PASSWORD)"),
Str("auth", required=True,
title="Authentication URL for server (OS_AUTH_URL)"),
Str("user_id", default="",
title="User ID to log in - most swift systems use user and leave "
"this blank (v3 auth) (OS_USER_ID)"),
Str("domain", default="",
title="User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)"),
Str("tenant", default="",
title="Tenant name - optional for v1 auth, this or tenant_id "
"required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)"),
Str("tenant_id", default="",
title="Tenant ID - optional for v1 auth, this or tenant required "
"otherwise (OS_TENANT_ID)"),
Str("tenant_domain", default="",
title="Tenant domain - optional (v3 auth) "
"(OS_PROJECT_DOMAIN_NAME)"),
Str("region", default="",
title="Region name (OS_REGION_NAME)"),
Str("storage_url", default="",
title="Storage URL (OS_STORAGE_URL)"),
Password("auth_token", default="",
title="Auth Token from alternate authentication (OS_AUTH_TOKEN)"),
Str("application_credential_id", default="",
title="Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)"),
Str("application_credential_name", default="",
title="Application Credential Name "
"(OS_APPLICATION_CREDENTIAL_NAME)"),
Password("application_credential_secret", default="",
title="Application Credential Secret "
"(OS_APPLICATION_CREDENTIAL_SECRET)"),
Int("auth_version", enum=[0, 1, 2, 3],
title="AuthVersion - set it if your auth URL has no version "
"(ST_AUTH_VERSION)"),
Str("endpoint_type", enum=["public", "internal", "admin"],
title="Endpoint type to choose from the service catalogue "
"(OS_ENDPOINT_TYPE)"),
]
| 2,500 | Python | .py | 51 | 38.176471 | 78 | 0.591077 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,093 | b2.py | truenas_middleware/src/middlewared/middlewared/rclone/remote/b2.py | import textwrap
from middlewared.rclone.base import BaseRcloneRemote
from middlewared.schema import Int, Str
from middlewared.validators import Range
class B2RcloneRemote(BaseRcloneRemote):
name = "B2"
title = "Backblaze B2"
buckets = True
fast_list = True
rclone_type = "b2"
credentials_schema = [
Str("account", title="Account ID or Application Key ID", description=textwrap.dedent("""\
Put your Account ID here and use your Master Application Key as Application Key or create separate
Application Key and use it with its own Application Key ID
"""), required=True),
Str("key", title="Application Key", required=True),
]
task_schema = [
Int("chunk_size", title="Upload chunk size (in megabytes)", description=textwrap.dedent("""\
Upload chunk size. Must fit in memory. Note that these chunks are buffered in memory and there might be a
maximum of «--transfers» chunks in progress at once. Also, your largest file must be split in no more
than 10 000 chunks.
"""), default=96, validators=[Range(min_=5)]),
]
async def get_task_extra(self, task):
chunk_size = task["attributes"].get("chunk_size", 96)
extra = {"chunk_size": f"{chunk_size}M"}
if chunk_size > 200:
extra["upload_cutoff"] = f"{chunk_size}M"
return extra
async def get_task_extra_args(self, task):
chunk_size = task["attributes"].get("chunk_size", 96)
if chunk_size > 128:
return [f"--multi-thread-cutoff={chunk_size * 2 + 1}M"]
return []
| 1,639 | Python | .py | 35 | 39.057143 | 117 | 0.651163 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,094 | google_cloud_storage.py | truenas_middleware/src/middlewared/middlewared/rclone/remote/google_cloud_storage.py | import json
import textwrap
from middlewared.rclone.base import BaseRcloneRemote
from middlewared.schema import Bool, Str
class GoogleCloudStorageRcloneRemote(BaseRcloneRemote):
name = "GOOGLE_CLOUD_STORAGE"
title = "Google Cloud Storage"
buckets = True
fast_list = True
rclone_type = "google cloud storage"
credentials_schema = [
Str("service_account_credentials", title="Service Account", required=True, max_length=None),
]
task_schema = [
Bool("bucket_policy_only", title="Bucket Policy Only", description=textwrap.dedent("""\
Access checks should use bucket-level IAM policies.
If you want to upload objects to a bucket with Bucket Policy Only set then you will need to set this.
"""), default=False),
]
async def get_credentials_extra(self, credentials):
return dict(
service_account_credentials=(credentials["attributes"]["service_account_credentials"].
replace("\r", "").
replace("\n", "")),
project_number=json.loads(credentials["attributes"]["service_account_credentials"])["project_id"],
)
| 1,219 | Python | .py | 26 | 37.461538 | 113 | 0.648649 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,095 | webdav.py | truenas_middleware/src/middlewared/middlewared/rclone/remote/webdav.py | from middlewared.rclone.base import BaseRcloneRemote
from middlewared.schema import Str
class WebDavRcloneRemote(BaseRcloneRemote):
name = "WEBDAV"
title = "WebDAV"
rclone_type = "webdav"
credentials_schema = [
Str("url", title="URL", required=True),
Str("vendor", title="Name of the WebDAV site/service/software",
enum=["NEXTCLOUD", "OWNCLOUD", "SHAREPOINT", "OTHER"], required=True),
Str("user", title="Username", required=True),
Str("pass", title="Password", required=True),
]
async def get_task_extra(self, task):
return dict(vendor=task["credentials"]["attributes"]["vendor"].lower())
| 671 | Python | .py | 15 | 38.333333 | 82 | 0.669739 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,096 | storjix.py | truenas_middleware/src/middlewared/middlewared/rclone/remote/storjix.py | import errno
import io
import xml.etree.ElementTree as ET
from aws_requests_auth.aws_auth import AWSRequestsAuth
import boto3
import botocore
import requests
from middlewared.rclone.base import BaseRcloneRemote
from middlewared.schema import Password, Str
from middlewared.service_exception import CallError
from middlewared.utils.network import INTERNET_TIMEOUT
class StorjIxError(CallError):
pass
class StorjIxRcloneRemote(BaseRcloneRemote):
name = "STORJ_IX"
title = "Storj iX"
buckets = True
can_create_bucket = True
custom_list_buckets = True
fast_list = True
rclone_type = "s3"
credentials_schema = [
Str("access_key_id", title="Access Key ID", required=True),
Password("secret_access_key", title="Secret Access Key", required=True),
]
task_schema = []
async def create_bucket(self, credentials, name):
def create_bucket_sync():
s3_client = boto3.client(
"s3",
config=botocore.config.Config(user_agent="ix-storj-1"),
endpoint_url="https://gateway.storjshare.io",
aws_access_key_id=credentials["attributes"]["access_key_id"],
aws_secret_access_key=credentials["attributes"]["secret_access_key"],
)
# s3 bucket naming rules: https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html
try:
s3_client.create_bucket(Bucket=name)
except s3_client.exceptions.BucketAlreadyExists as e:
raise CallError(str(e), errno=errno.EEXIST)
except botocore.exceptions.ParamValidationError as e:
raise StorjIxError("The bucket name can only contain lowercase letters, numbers, and hyphens.",
errno.EINVAL, str(e))
except botocore.exceptions.ClientError as e:
if "InvalidBucketName" in e.args[0]:
raise StorjIxError("The bucket name must be between 3-63 characters in length and cannot contain "
"uppercase.", errno.EINVAL, str(e))
raise
return await self.middleware.run_in_thread(create_bucket_sync)
async def list_buckets(self, credentials):
def list_buckets_sync():
auth = AWSRequestsAuth(aws_access_key=credentials["attributes"]["access_key_id"],
aws_secret_access_key=credentials["attributes"]["secret_access_key"],
aws_host="gateway.storjshare.io",
aws_region="",
aws_service="s3")
r = requests.get("https://gateway.storjshare.io/?attribution", auth=auth, timeout=INTERNET_TIMEOUT)
r.raise_for_status()
ns = "{http://s3.amazonaws.com/doc/2006-03-01/}"
return [
{
"name": bucket.find(f"{ns}Name").text,
"time": bucket.find(f"{ns}CreationDate").text,
"enabled": "ix-storj-1" in (bucket.find(f"{ns}Attribution").text or ""),
}
for bucket in ET.parse(io.StringIO(r.text)).iter(f"{ns}Bucket")
]
return await self.middleware.run_in_thread(list_buckets_sync)
async def get_credentials_extra(self, credentials):
return {"endpoint": "https://gateway.storjshare.io"}
async def get_task_extra(self, task):
# Storj recommended these settings
return {
"chunk_size": "64M",
"disable_http2": "true",
"upload_cutoff": "64M",
"provider": "Other",
}
def get_restic_config(self, task):
url = "gateway.storjshare.io"
env = {
"AWS_ACCESS_KEY_ID": task["credentials"]["attributes"]["access_key_id"],
"AWS_SECRET_ACCESS_KEY": task["credentials"]["attributes"]["secret_access_key"],
}
return url, env
| 4,014 | Python | .py | 85 | 35.305882 | 118 | 0.599181 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,097 | http.py | truenas_middleware/src/middlewared/middlewared/rclone/remote/http.py | from middlewared.rclone.base import BaseRcloneRemote
from middlewared.schema import Str
class HTTPRcloneRemote(BaseRcloneRemote):
name = "HTTP"
title = "HTTP"
readonly = True
rclone_type = "http"
credentials_schema = [
Str("url", title="URL", required=True),
]
| 298 | Python | .py | 10 | 25.1 | 52 | 0.706714 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,098 | pcloud.py | truenas_middleware/src/middlewared/middlewared/rclone/remote/pcloud.py | from middlewared.rclone.base import BaseRcloneRemote
from middlewared.schema import Password, Str
class PcloudRcloneRemote(BaseRcloneRemote):
name = "PCLOUD"
title = "pCloud"
rclone_type = "pcloud"
credentials_schema = [
Str("client_id", title="OAuth Client ID", default=""),
Password("client_secret", title="OAuth Client Secret", default=""),
Password("token", title="Access Token", required=True, max_length=None),
Str("hostname", title="API hostname"),
]
credentials_oauth = True
credentials_oauth_name = "pcloud2"
refresh_credentials = ["token"]
| 618 | Python | .py | 15 | 35.666667 | 80 | 0.691152 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,099 | conf.py | truenas_middleware/docs/source/conf.py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../../src/middlewared'))
# -- Project information -----------------------------------------------------
project = 'TrueNAS'
copyright = '2021, iXsystems'
author = 'iXsystems'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '22.02'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosectionlabel',
'sphinx.ext.viewcode',
'sphinx-prompt',
'sphinx_substitution_extensions',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
rst_prolog = f'.. |version| replace:: {release}\n'
default_role = 'code'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'TrueNASdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'TrueNAS.tex', 'TrueNAS Documentation',
'iXsystems', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'truenas', 'TrueNAS Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'TrueNAS', 'TrueNAS Documentation',
author, 'TrueNAS', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
| 5,415 | Python | .py | 137 | 37.445255 | 79 | 0.663671 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |