id int64 0 458k | file_name stringlengths 4 119 | file_path stringlengths 14 227 | content stringlengths 24 9.96M | size int64 24 9.96M | language stringclasses 1 value | extension stringclasses 14 values | total_lines int64 1 219k | avg_line_length float64 2.52 4.63M | max_line_length int64 5 9.91M | alphanum_fraction float64 0 1 | repo_name stringlengths 7 101 | repo_stars int64 100 139k | repo_forks int64 0 26.4k | repo_open_issues int64 0 2.27k | repo_license stringclasses 12 values | repo_extraction_date stringclasses 433 values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
24,400 | test_element_types.py | truenas_middleware/src/middlewared/middlewared/pytest/unit/plugins/enclosure/test_element_types.py | # Copyright (c) - iXsystems Inc.
#
# Licensed under the terms of the TrueNAS Enterprise License Agreement
# See the file LICENSE.IX for complete terms and conditions
import pytest
from middlewared.plugins.enclosure_ import element_types
@pytest.mark.parametrize('data', [
(0x800000, 'Identify on'),
(0x400000, 'Fail on'),
(0x000080, 'RQST mute'),
(0x000040, 'Muted'),
(0x000010, 'Remind'),
(0x000008, 'INFO'),
(0x000004, 'NON-CRIT'),
(0x000002, 'CRIT'),
(0x000001, 'UNRECOV'),
(0x000000, None),
(0xf000ff, 'Identify on, Fail on, RQST mute, Muted, Remind, INFO, NON-CRIT, CRIT, UNRECOV')
])
def test_alarm(data):
value_raw, expected_result = data
assert element_types.alarm(value_raw) == expected_result
@pytest.mark.parametrize('data', [
(0x000000, None),
(0x800000, 'Identify on'),
(0x400000, 'Fail on'),
(0x000001, 'Disabled'),
(0xc00001, 'Identify on, Fail on, Disabled'),
])
def test_comm(data):
value_raw, expected_result = data
assert element_types.comm(value_raw) == expected_result
@pytest.mark.parametrize('data', [
(0x000000, '0.0A'),
(0x800000, '0.0A, Identify on'),
(0x400000, '0.0A, Fail on'),
(0x070000, '0.0A, Crit over'),
(0x080000, '0.0A, Warn over'),
# 2.5A
(0x0000fa, '2.5A'),
(0x08010a, '2.66A, Warn over'),
(0x070110, '2.72A, Crit over'),
# 5.0A
(0x0001f4, '5.0A'),
(0x080210, '5.28A, Warn over'),
(0x070220, '5.44A, Crit over'),
# 12.0A
(0x0004b0, '12.0A'),
(0x0804d0, '12.32A, Warn over'),
(0x0704f0, '12.64A, Crit over'),
])
def test_current(data):
value_raw, expected_result = data
assert element_types.current(value_raw) == expected_result
@pytest.mark.parametrize('data', [
(0x800000, 'Identify on'),
(0x000100, 'Warn on'),
(0x000200, 'Fail on'),
(0x000300, 'Fail on, Warn on'),
(0x800100, 'Identify on, Warn on'),
(0x800200, 'Identify on, Fail on'),
(0x800300, 'Identify on, Fail on, Warn on'),
(0x000400, 'Power cycle 1min, power off until manually restored'),
(0x000500, 'Warn on, Power cycle 1min, power off until manually restored'),
(0x000600, 'Fail on, Power cycle 1min, power off until manually restored'),
(0x800400, 'Identify on, Power cycle 1min, power off until manually restored'),
(0x0004f0, 'Power cycle 1min, power off for 60min'),
])
def test_enclosure(data):
value_raw, expected_result = data
assert element_types.enclosure(value_raw) == expected_result
@pytest.mark.parametrize('data', [
(0x100000, '0.0V'),
(0x110000, '0.0V, Crit under'),
(0x410000, '0.0V, Fail on, Crit under'),
(0x810000, '0.0V, Identify on, Crit under'),
(0xf10000, '0.0V, Identify on, Fail on, Crit under'),
(0x220000, '0.0V, Crit over'),
(0x520000, '0.0V, Fail on, Crit over'),
(0x820000, '0.0V, Identify on, Crit over'),
(0xf20000, '0.0V, Identify on, Fail on, Crit over'),
# 2.5V
(0x0100f0, '2.4V, Crit under'),
(0x0000fa, '2.5V'),
(0x020110, '2.72V, Crit over'),
# 5.0V
(0x0101e4, '4.84V, Crit under'),
(0x0001f4, '5.0V'),
(0x020210, '5.28V, Crit over'),
# 12.0V
(0x0104a0, '11.84V, Crit under'),
(0x0004b0, '12.0V'),
(0x0204d0, '12.32V, Crit over'),
])
def test_volt(data):
value_raw, expected_result = data
assert element_types.volt(value_raw) == expected_result
@pytest.mark.parametrize('data', [
(0x000000, '0 RPM'),
(0x001000, '160 RPM'),
(0x010000, '2560 RPM'),
(0x6f0000, '17920 RPM'),
# Ensure mask works correctly
(0xFFFF00, '20470 RPM'),
])
def test_cooling(data):
value_raw, expected_result = data
assert element_types.cooling(value_raw) == expected_result
@pytest.mark.parametrize('data', [
(0x0000, None),
# Check for minimum temperature
(0x0100, '-19C'),
# Check for an arbitrary temperature
(0x8000, '108C'),
# Check for maximum temperature
(0xFF00, '235C'),
# Check that extra bits do not affect the result
(0xFFFF, '235C')
])
def test_temp(data):
value_raw, expected_result = data
assert element_types.temp(value_raw) == expected_result
@pytest.mark.parametrize('data', [
# Test each individual flag
(0x800000, 'Identify on'),
(0x400000, 'Do not remove'),
(0x80, 'Hot swap'),
(0x40, 'Fail on'),
(0x20, 'RQST on'),
(0x10, 'Off'),
(0x800, 'DC overvoltage'),
(0x400, 'DC undervoltage'),
(0x200, 'DC overcurrent'),
(0x8, 'Overtemp fail'),
(0x4, 'Overtemp warn'),
(0x2, 'AC fail'),
(0x1, 'DC fail'),
(0x10, 'Off'),
# Test with no flags set
(0x000000, None),
# Test some combinations
(0x800400, 'Identify on, DC undervoltage'),
(0x40C, 'DC undervoltage, Overtemp fail, Overtemp warn'),
])
def test_psu(data):
value_raw, expected_result = data
assert element_types.psu(value_raw) == expected_result
@pytest.mark.parametrize('data', [
# Test each individual flag
(0x200, 'Identify on'),
(0x20, 'Fault on'),
# Test with no flags set
(0x0, None),
# Test combinations
(0x220, 'Identify on, Fault on')
])
def test_array_dev(data):
value_raw, expected_result = data
assert element_types.array_dev(value_raw) == expected_result
@pytest.mark.parametrize('data', [
# Test each individual flag
(0x000000, 'No information'),
(0x010000, 'SAS 4x receptacle (SFF-8470) [max 4 phys]'),
(0x020000, 'Mini SAS 4x receptacle (SFF-8088) [max 4 phys]'),
(0x030000, 'QSFP+ receptacle (SFF-8436) [max 4 phys]'),
(0x040000, 'Mini SAS 4x active receptacle (SFF-8088) [max 4 phys]'),
(0x050000, 'Mini SAS HD 4x receptacle (SFF-8644) [max 4 phys]'),
(0x060000, 'Mini SAS HD 8x receptacle (SFF-8644) [max 8 phys]'),
(0x070000, 'Mini SAS HD 16x receptacle (SFF-8644) [max 16 phys]'),
(0x080000, 'unknown external connector type: 0x8'),
(0x090000, 'unknown external connector type: 0x9'),
(0x0a0000, 'unknown external connector type: 0xa'),
(0x0b0000, 'unknown external connector type: 0xb'),
(0x0c0000, 'unknown external connector type: 0xc'),
(0x0d0000, 'unknown external connector type: 0xd'),
(0x0e0000, 'unknown external connector type: 0xe'),
(0x0f0000, 'Vendor specific external connector'),
(0x100000, 'SAS 4i plug (SFF-8484) [max 4 phys]'),
(0x110000, 'Mini SAS 4i receptacle (SFF-8087) [max 4 phys]'),
(0x120000, 'Mini SAS HD 4i receptacle (SFF-8643) [max 4 phys]'),
(0x130000, 'Mini SAS HD 8i receptacle (SFF-8643) [max 8 phys]'),
(0x140000, 'Mini SAS HD 16i receptacle (SFF-8643) [max 16 phys]'),
(0x150000, 'SlimSAS 4i (SFF-8654) [max 4 phys]'),
(0x160000, 'SlimSAS 8i (SFF-8654) [max 8 phys]'),
(0x170000, 'SAS MiniLink 4i (SFF-8612) [max 4 phys]'),
(0x180000, 'SAS MiniLink 8i (SFF-8612) [max 8 phys]'),
(0x190000, 'unknown internal wide connector type: 0x19'),
(0x200000, 'SAS Drive backplane receptacle (SFF-8482) [max 2 phys]'),
(0x210000, 'SATA host plug [max 1 phy]'),
(0x220000, 'SAS Drive plug (SFF-8482) [max 2 phys]'),
(0x230000, 'SATA device plug [max 1 phy]'),
(0x240000, 'Micro SAS receptacle [max 2 phys]'),
(0x250000, 'Micro SATA device plug [max 1 phy]'),
(0x260000, 'Micro SAS plug (SFF-8486) [max 2 phys]'),
(0x270000, 'Micro SAS/SATA plug (SFF-8486) [max 2 phys]'),
(0x280000, '12 Gbit/s SAS Drive backplane receptacle (SFF-8680) [max 2 phys]'),
(0x290000, '12 Gbit/s SAS Drive Plug (SFF-8680) [max 2 phys]'),
(0x2a0000, 'Multifunction 12 Gbit/s 6x Unshielded receptacle connector receptacle (SFF-8639) [max 6 phys]'),
(0x2b0000, 'Multifunction 12 Gbit/s 6x Unshielded receptacle connector plug (SFF-8639) [max 6 phys]'),
(0x2c0000, 'SAS Multilink Drive backplane receptacle (SFF-8630) [max 4 phys]'),
(0x2d0000, 'SAS Multilink Drive backplane plug (SFF-8630) [max 4 phys]'),
(0x2e0000, 'unknown internal connector to end device type: 0x2e'),
(0x2f0000, 'SAS virtual connector [max 1 phy]'),
(0x300000, 'reserved for internal connector type: 0x30'),
(0x310000, 'reserved for internal connector type: 0x31'),
(0x320000, 'reserved for internal connector type: 0x32'),
(0x330000, 'reserved for internal connector type: 0x33'),
(0x340000, 'reserved for internal connector type: 0x34'),
(0x350000, 'reserved for internal connector type: 0x35'),
(0x360000, 'reserved for internal connector type: 0x36'),
(0x370000, 'reserved for internal connector type: 0x37'),
(0x380000, 'reserved for internal connector type: 0x38'),
(0x390000, 'reserved for internal connector type: 0x39'),
(0x3a0000, 'reserved for internal connector type: 0x3a'),
(0x3b0000, 'reserved for internal connector type: 0x3b'),
(0x3c0000, 'reserved for internal connector type: 0x3c'),
(0x3d0000, 'reserved for internal connector type: 0x3d'),
(0x3e0000, 'reserved for internal connector type: 0x3e'),
(0x3f0000, 'Vendor specific internal connector'),
(0x400000, 'SAS High Density Drive backplane receptacle (SFF-8631) [max 8 phys]'),
(0x410000, 'SAS High Density Drive backplane plug (SFF-8631) [max 8 phys]'),
(0x420000, 'reserved connector type: 0x42'),
(0x430000, 'reserved connector type: 0x43'),
(0x440000, 'reserved connector type: 0x44'),
(0x450000, 'reserved connector type: 0x45'),
(0x460000, 'reserved connector type: 0x46'),
(0x470000, 'reserved connector type: 0x47'),
(0x480000, 'reserved connector type: 0x48'),
(0x490000, 'reserved connector type: 0x49'),
(0x4a0000, 'reserved connector type: 0x4a'),
(0x4b0000, 'reserved connector type: 0x4b'),
(0x4c0000, 'reserved connector type: 0x4c'),
(0x4d0000, 'reserved connector type: 0x4d'),
(0x4e0000, 'reserved connector type: 0x4e'),
(0x4f0000, 'reserved connector type: 0x4f'),
(0x500000, 'reserved connector type: 0x50'),
(0x510000, 'reserved connector type: 0x51'),
(0x520000, 'reserved connector type: 0x52'),
(0x530000, 'reserved connector type: 0x53'),
(0x540000, 'reserved connector type: 0x54'),
(0x550000, 'reserved connector type: 0x55'),
(0x560000, 'reserved connector type: 0x56'),
(0x570000, 'reserved connector type: 0x57'),
(0x580000, 'reserved connector type: 0x58'),
(0x590000, 'reserved connector type: 0x59'),
(0x5a0000, 'reserved connector type: 0x5a'),
(0x5b0000, 'reserved connector type: 0x5b'),
(0x5c0000, 'reserved connector type: 0x5c'),
(0x5d0000, 'reserved connector type: 0x5d'),
(0x5e0000, 'reserved connector type: 0x5e'),
(0x5f0000, 'reserved connector type: 0x5f'),
(0x600000, 'reserved connector type: 0x60'),
(0x610000, 'reserved connector type: 0x61'),
(0x620000, 'reserved connector type: 0x62'),
(0x630000, 'reserved connector type: 0x63'),
(0x640000, 'reserved connector type: 0x64'),
(0x650000, 'reserved connector type: 0x65'),
(0x660000, 'reserved connector type: 0x66'),
(0x670000, 'reserved connector type: 0x67'),
(0x680000, 'reserved connector type: 0x68'),
(0x690000, 'reserved connector type: 0x69'),
(0x6a0000, 'reserved connector type: 0x6a'),
(0x6b0000, 'reserved connector type: 0x6b'),
(0x6c0000, 'reserved connector type: 0x6c'),
(0x6d0000, 'reserved connector type: 0x6d'),
(0x6e0000, 'reserved connector type: 0x6e'),
(0x6f0000, 'reserved connector type: 0x6f'),
(0x700000, 'vendor specific connector type: 0x70'),
(0x710000, 'vendor specific connector type: 0x71'),
(0x720000, 'vendor specific connector type: 0x72'),
(0x730000, 'vendor specific connector type: 0x73'),
(0x740000, 'vendor specific connector type: 0x74'),
(0x750000, 'vendor specific connector type: 0x75'),
(0x760000, 'vendor specific connector type: 0x76'),
(0x770000, 'vendor specific connector type: 0x77'),
(0x780000, 'vendor specific connector type: 0x78'),
(0x790000, 'vendor specific connector type: 0x79'),
(0x7a0000, 'vendor specific connector type: 0x7a'),
(0x7b0000, 'vendor specific connector type: 0x7b'),
(0x7c0000, 'vendor specific connector type: 0x7c'),
(0x7d0000, 'vendor specific connector type: 0x7d'),
(0x7e0000, 'vendor specific connector type: 0x7e'),
(0x7f0000, 'vendor specific connector type: 0x7f'),
# Test out of bounds connector type
(0x800000, 'No information'),
# Test connector type with fail on
(0x220040, 'SAS Drive plug (SFF-8482) [max 2 phys], Fail on'),
# Test out of bounds connector type with fail on
(0x800040, 'No information, Fail on')
])
def test_sas_conn(data):
value_raw, expected_result = data
assert element_types.sas_conn(value_raw) == expected_result
@pytest.mark.parametrize('data', [
(0x0000, None),
(0x800000, 'Identify on'),
(0x400000, 'Fail on'),
(0xC00000, 'Identify on, Fail on')
])
def test_sas_exp(data):
value_raw, expected_result = data
assert element_types.sas_exp(value_raw) == expected_result
| 13,014 | Python | .py | 302 | 38.529801 | 112 | 0.66953 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,401 | san.py | truenas_middleware/src/middlewared/middlewared/pytest/unit/plugins/crypto/san.py | import pytest
from middlewared.plugins.crypto_.generate_utils import normalize_san
@pytest.mark.parametrize("reference,expected_results", [
(['truenas.domain', '192.168.0.10'], [['DNS', 'truenas.domain'], ['IP', '192.168.0.10']]),
(['DNS:truenas.domain', '192.168.0.10'], [['DNS', 'truenas.domain'], ['IP', '192.168.0.10']]),
(['DNS:truenas.domain', 'IP:192.168.0.10'], [['DNS', 'truenas.domain'], ['IP', '192.168.0.10']]),
])
def test_normalize_san(reference, expected_results):
assert normalize_san(reference) == expected_results
| 551 | Python | .py | 9 | 58.111111 | 101 | 0.660482 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,402 | extensions.py | truenas_middleware/src/middlewared/middlewared/pytest/unit/plugins/crypto/extensions.py | import pytest
from middlewared.plugins.crypto_.generate_ca import generate_certificate_authority
from middlewared.plugins.crypto_.load_utils import load_certificate
from middlewared.plugins.crypto_.utils import DEFAULT_LIFETIME_DAYS
@pytest.mark.parametrize('generate_params,extension_info', [
(
{
'key_type': 'RSA',
'key_length': 4096,
'san': ['domain1', '8.8.8.8'],
'common': 'dev',
'country': 'US',
'state': 'TN',
'city': 'Knoxville',
'organization': 'iX',
'organizational_unit': 'dev',
'email': 'dev@ix.com',
'digest_algorithm': 'SHA256',
'lifetime': DEFAULT_LIFETIME_DAYS,
'serial': 12931,
'ca_certificate': None,
'cert_extensions': {
'BasicConstraints': {
'enabled': True,
'ca': True,
'extension_critical': True,
},
},
},
{'BasicConstraints': 'CA:TRUE'},
),
(
{
'key_type': 'RSA',
'key_length': 4096,
'san': ['domain1', '8.8.8.8'],
'common': 'dev',
'country': 'US',
'state': 'TN',
'city': 'Knoxville',
'organization': 'iX',
'organizational_unit': 'dev',
'email': 'dev@ix.com',
'digest_algorithm': 'SHA256',
'lifetime': DEFAULT_LIFETIME_DAYS,
'serial': 12931,
'ca_certificate': None,
'cert_extensions': {
'KeyUsage': {
'enabled': True,
'key_cert_sign': True,
'crl_sign': True,
'extension_critical': True,
}
},
},
{'KeyUsage': 'Certificate Sign, CRL Sign'},
),
(
{
'key_type': 'RSA',
'key_length': 4096,
'san': ['domain1', '8.8.8.8'],
'common': 'dev',
'country': 'US',
'state': 'TN',
'city': 'Knoxville',
'organization': 'iX',
'organizational_unit': 'dev',
'email': 'dev@ix.com',
'digest_algorithm': 'SHA256',
'lifetime': DEFAULT_LIFETIME_DAYS,
'serial': 12931,
'ca_certificate': None,
'cert_extensions': {
'KeyUsage': {
'enabled': True,
'key_cert_sign': True,
'crl_sign': False,
'extension_critical': True,
}
},
},
{'KeyUsage': 'Certificate Sign'},
),
(
{
'key_type': 'RSA',
'key_length': 4096,
'san': ['domain1', '8.8.8.8'],
'common': 'dev',
'country': 'US',
'state': 'TN',
'city': 'Knoxville',
'organization': 'iX',
'organizational_unit': 'dev',
'email': 'dev@ix.com',
'digest_algorithm': 'SHA256',
'lifetime': DEFAULT_LIFETIME_DAYS,
'serial': 12931,
'ca_certificate': None,
'cert_extensions': {
'ExtendedKeyUsage': {
'enabled': True,
'usages': [
'ANY_EXTENDED_KEY_USAGE', 'CLIENT_AUTH', 'CODE_SIGNING', 'EMAIL_PROTECTION',
'OCSP_SIGNING', 'SERVER_AUTH', 'TIME_STAMPING'
],
},
},
},
{
'ExtendedKeyUsage': 'Any Extended Key Usage, TLS Web Client Authentication, '
'Code Signing, E-mail Protection, OCSP Signing, TLS Web Server '
'Authentication, Time Stamping',
},
),
(
{
'key_type': 'RSA',
'key_length': 4096,
'san': ['domain1', '8.8.8.8'],
'common': 'dev',
'country': 'US',
'state': 'TN',
'city': 'Knoxville',
'organization': 'iX',
'organizational_unit': 'dev',
'email': 'dev@ix.com',
'digest_algorithm': 'SHA256',
'lifetime': DEFAULT_LIFETIME_DAYS,
'serial': 12931,
'ca_certificate': None,
'cert_extensions': {
'KeyUsage': {
'enabled': True,
'digital_signature': True,
'content_commitment': True,
'key_encipherment': True,
'data_encipherment': True,
'key_agreement': True,
},
},
},
{
'KeyUsage': 'Digital Signature, Non Repudiation, Key Encipherment, Data Encipherment, Key Agreement',
},
),
])
def test__generating_ca(generate_params, extension_info):
extensions = load_certificate(generate_certificate_authority(generate_params)[0], True)['extensions']
for k in extension_info:
assert k in extensions, extensions
assert extensions[k] == extension_info[k]
| 5,225 | Python | .py | 154 | 20.857143 | 113 | 0.441002 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,403 | csr.py | truenas_middleware/src/middlewared/middlewared/pytest/unit/plugins/crypto/csr.py | import pytest
from cryptography.hazmat.primitives.asymmetric import rsa
from middlewared.plugins.crypto_.csr import generate_certificate_signing_request
from middlewared.plugins.crypto_.load_utils import load_certificate_request, load_private_key
@pytest.mark.parametrize('generate_params,key_type,key_size,csr_info', [
(
{
'key_type': 'RSA',
'key_length': 4096,
'san': ['domain1', '8.8.8.8'],
'common': 'dev',
'country': 'US',
'state': 'TN',
'city': 'Knoxville',
'organization': 'iX',
'organizational_unit': 'dev',
'email': 'dev@ix.com',
'digest_algorithm': 'SHA256',
},
rsa.RSAPrivateKey, 4096,
{
'DN': '/CN=dev/C=US/ST=TN/L=Knoxville/O=iX/OU=dev/emailAddress=dev@ix.com/subjectAltName='
'DNS:domain1, IP Address:8.8.8.8',
'city': 'Knoxville',
'common': 'dev',
'country': 'US',
'email': 'dev@ix.com',
'extensions': {'SubjectAltName': 'DNS:domain1, IP Address:8.8.8.8'},
'organization': 'iX',
'organizational_unit': 'dev',
'san': ['DNS:domain1', 'IP Address:8.8.8.8'],
'state': 'TN',
'subject_name_hash': None,
}
),
(
{
'key_type': 'RSA',
'key_length': 2048,
'san': ['domain2', '9.9.9.9'],
'common': 'dev2',
'country': 'US',
'state': 'TN',
'city': 'Newyork',
'organization': 'iX-devs',
'organizational_unit': 'dev-dept',
'email': 'info@ix.com',
'digest_algorithm': 'SHA256',
},
rsa.RSAPrivateKey, 2048,
{
'DN': '/CN=dev2/C=US/ST=TN/L=Newyork/O=iX-devs/OU=dev-dept/emailAddress=info@ix.com/'
'subjectAltName=DNS:domain2, IP Address:9.9.9.9',
'city': 'Newyork',
'common': 'dev2',
'country': 'US',
'email': 'info@ix.com',
'extensions': {'SubjectAltName': 'DNS:domain2, IP Address:9.9.9.9'},
'organization': 'iX-devs',
'organizational_unit': 'dev-dept',
'san': ['DNS:domain2', 'IP Address:9.9.9.9'],
'state': 'TN',
'subject_name_hash': None
}
),
])
def test_generating_private_key(generate_params, key_type, key_size, csr_info):
csr, key = generate_certificate_signing_request(generate_params)
csr_details = load_certificate_request(csr)
key_obj = load_private_key(key)
assert csr_details == csr_info, csr_details
assert isinstance(key_obj, rsa.RSAPrivateKey) is True
assert key_obj.key_size == key_size
| 2,789 | Python | .py | 73 | 27.780822 | 102 | 0.527286 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,404 | self_signed_cert.py | truenas_middleware/src/middlewared/middlewared/pytest/unit/plugins/crypto/self_signed_cert.py | from cryptography.hazmat.primitives.asymmetric import rsa
from middlewared.plugins.crypto_.generate_self_signed import DEFAULT_LIFETIME_DAYS, generate_self_signed_certificate
from middlewared.plugins.crypto_.load_utils import load_certificate, load_private_key
SELF_SIGNED_CERT = {
'country': 'US',
'state': 'Tennessee',
'city': 'Maryville',
'organization': 'iXsystems',
'organizational_unit': None,
'common': 'localhost',
'san': ['DNS:localhost'],
'email': 'info@ixsystems.com',
'DN': '/C=US/O=iXsystems/CN=localhost/emailAddress=info@ixsystems.com/ST=Tennessee/'
'L=Maryville/subjectAltName=DNS:localhost',
'extensions': {'SubjectAltName': 'DNS:localhost', 'ExtendedKeyUsage': 'TLS Web Server Authentication'},
'digest_algorithm': 'SHA256',
'lifetime': DEFAULT_LIFETIME_DAYS,
'chain': False,
}
def test__generating_self_signed_cert():
cert, key = generate_self_signed_certificate()
cert_info = load_certificate(cert)
key_obj = load_private_key(key)
for k, v in SELF_SIGNED_CERT.items():
assert k in cert_info, cert_info
assert v == cert_info[k], cert_info
assert isinstance(key_obj, rsa.RSAPrivateKey), f'Private key has different type {key!r}'
| 1,251 | Python | .py | 27 | 41.481481 | 116 | 0.710181 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,405 | private_key.py | truenas_middleware/src/middlewared/middlewared/pytest/unit/plugins/crypto/private_key.py | import pytest
import textwrap
from cryptography.hazmat.primitives.asymmetric import ec, rsa
from middlewared.plugins.crypto_.key_utils import load_private_key, generate_private_key
@pytest.mark.parametrize('generate_params,expected_type,key_size', [
({}, rsa.RSAPrivateKey, 2048),
({'type': 'EC'}, ec.EllipticCurvePrivateKey, 384),
(
{
'type': 'RSA',
'key_length': 4096,
},
rsa.RSAPrivateKey,
4096
),
])
def test_generating_private_key(generate_params, expected_type, key_size):
key = generate_private_key(generate_params)
assert isinstance(key, expected_type) is True
assert key.key_size == key_size
@pytest.mark.parametrize('key_str,expected_type,key_size', [
(
textwrap.dedent('''\
-----BEGIN PRIVATE KEY-----
MIG6AgEAMBQGByqGSM49AgEGCSskAwMCCAEBCwSBnjCBmwIBAQQwYTYZ6gXVzx6X
epQm03qt1oBNJcdy+NN7EslikEJoNDVUWciJRwf39zj/6Z6Ak/vqoWQDYgAEBfty
8bW+Q7uKykK+5PfGZbimKgcvgNg8JlwJoCWLarO3ApFsq97Ea9jTWfaiCBorSs/R
fMBj/3QF+zpTv7Djcxmou+PuSs9B2JclOm2ycPbDFRvQ9bNfGjlABNMB42lV
-----END PRIVATE KEY-----
'''),
ec.EllipticCurvePrivateKey, 384
),
(
textwrap.dedent('''\
-----BEGIN PRIVATE KEY-----
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDVMPccUqq6jd8h
h0ybrwRkvK+pvOJze00IK7F6A8RRyCwDL2Yc0GpWR5ecY+jBiZ1n+TfKfaybdKR0
0hhFFuU74JTsUk298hI1GVBNvwbimgraQciWjg0wDjHAN7AFZL8Jb/Tn7/DZlmn+
TgqdPaFIeD4XnLX6zwrc4VemKYDDcdr5JyDVCt3ZtqTEbbtxQ4WvZbtCxlzlkyJu
xwdmGyCvjkQri55+FaejvnPCUzJSOK28jShBuZCIS3lR7HCcAS4cc05TTrWSZr+i
brLISVEz1XASc0pKz8QGMuz5Hk5uNRLl4JGmWZrSV9lqtFYP9hatpLi5mnhWpgYi
Q0IXvNUXAgMBAAECggEAdbgf+0e6dmC4gO8Q4jZ2GpoF9ZgTAulm08gsq89ArFf3
1ZpqrCZ5UUMe+IBCmfu/KxZ2NB3JHd3+oXMRa7UEx1dvZD7eJrBwVVmw+f0tdBrT
O0lv1ZKCvbJYzmbxj0jeI/vqI9heCggAZyf4vHK3iCi9QJSL9/4zZVwY5eus6j4G
RCMXW8ZqiKX3GLtCjPmZilYQHNDbsfAbqy75AsG81fgaKkYkJS29rte9R34BajZs
OFm+y6nIe6zsf0vhn/yPVN4Yhuu/WhkvqouR2NhSF7ulXckuR/ef55GPpbRcpSOj
VUkwJL3wsHPozvmcks/TnZbqj0u7XBGjZ2VK8sF+gQKBgQDsJGMeeaua5pOITVHk
reHaxy4tLs1+98++L9SffBbsQcCu4OdgMBizCXuUw9bHlMx19B/B56cJst239li3
dHfC/mF4/8em5XOx97FyC0rF02qYCPXViTrTSovSEWHuM/ChmhaRlZdp5F4EBMp7
ELdf4OBCHGz47UCLQF75/FPtJwKBgQDnHn9HuFepY+yV1sNcPKj1GfciaseKzTk1
Iw5VVtqyS2p8vdXNUiJmaF0245S3phRBL6PDhdfd3SwMmNYvhTYsqBc6ZRHO4b9J
SjmHct63286NuEn0piYaa3MZ8sV/xI0a5leAdkzyqPTCcn0HlvDL0HTV34umdmfj
kqC4jsWukQKBgC48cavl5tPNkdV+TiqYYUCU/1WZdGMH4oU6mEch5NsdhLy5DJSo
1i04DhpyvfsWB3KQ+ibdVLdxbjg24+gHxetII42th0oGY0DVXskVrO5PFu/t0TSe
SgZU8kuPW71oLhV2NjULNTpmnIHs7jhqbX04arCHIE8dJSYe1HneDhDBAoGBALTk
4txgxYQYaNFykd/8voVwuETg7KOQM0mK0aor2+qXKpbOAqy8r54V63eNsxX20H2g
6v2bIbVOai7F5Ua2bguP2PZkqwaRHKYhiVuhpf6j9UxpRMFO1h3xodpacQiq74Jx
bWVnspxvb3tOHtw04O21j+ziFizJGlE9r7wkS0dxAoGAeq/Ecb+nJp/Ce4h5US1O
4rruiLLYMkcFGmhSMcQ+lVbGOn4eSpqrGWn888Db2oiu7mv+u0TK9ViXwHkfp4FP
Hnm0S8e25py1Lj+bk1tH0ku1I8qcAtihYBtSwPGj+66Qyr8KOlxZP2Scvcqu+zBc
cyhsrrlRc3Gky9L5gtdxdeo=
-----END PRIVATE KEY-----
'''),
rsa.RSAPrivateKey, 2048
),
])
def test_loading_private_key(key_str, expected_type, key_size):
key = load_private_key(key_str)
assert isinstance(key, expected_type) is True
assert key.key_size == key_size
| 3,547 | Python | .py | 70 | 41.471429 | 88 | 0.744742 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,406 | certs.py | truenas_middleware/src/middlewared/middlewared/pytest/unit/plugins/crypto/certs.py | import textwrap
import pytest
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import rsa
from middlewared.plugins.crypto_.generate_certs import generate_certificate
from middlewared.plugins.crypto_.generate_utils import sign_csr_with_ca
from middlewared.plugins.crypto_.load_utils import load_certificate, load_private_key
from middlewared.plugins.crypto_.utils import DEFAULT_LIFETIME_DAYS
@pytest.mark.parametrize('generate_params,key_type,key_size,cert_info', [
(
{
'key_type': 'RSA',
'key_length': 4096,
'san': ['domain2', '9.9.9.9'],
'common': 'dev',
'country': 'US',
'state': 'TN',
'city': 'Knoxville',
'organization': 'iX',
'organizational_unit': 'dev',
'email': 'iamchild@ix.com',
'digest_algorithm': 'SHA256',
'lifetime': DEFAULT_LIFETIME_DAYS,
'serial': 12934,
'ca_certificate': textwrap.dedent('''\
-----BEGIN CERTIFICATE-----
MIIFmzCCA4OgAwIBAgICMoMwDQYJKoZIhvcNAQELBQAwcjEMMAoGA1UEAwwDZGV2
MQswCQYDVQQGEwJVUzELMAkGA1UECAwCVE4xEjAQBgNVBAcMCUtub3h2aWxsZTEL
MAkGA1UECgwCaVgxDDAKBgNVBAsMA2RldjEZMBcGCSqGSIb3DQEJARYKZGV2QGl4
LmNvbTAeFw0yMjAxMjQxOTI0MTRaFw0yMzAyMjUxOTI0MTRaMHIxDDAKBgNVBAMM
A2RldjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlROMRIwEAYDVQQHDAlLbm94dmls
bGUxCzAJBgNVBAoMAmlYMQwwCgYDVQQLDANkZXYxGTAXBgkqhkiG9w0BCQEWCmRl
dkBpeC5jb20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDuy5kKf7eT
LOuxm1pn51kFLgJHD6k05pROjMOXEZel7CsmrDKEehSSdwDB/WUim3idOsImLrc+
ApXsnKwVY93f7yn1rfF4lgKsa3sb6oqAcPEobgTUqSmJ/OQVilUqOtj/dmFaEWIS
21eKNzaByNdpyOcoRF/+uDylEsE1Gj0GjkBneVRxyTZFV7LdVyDk38hljesnd8FX
gnD0DCdI3jBvqSYvd+GvQ2nQ2624HAmEQwfllqKi9PRDngeZIeiTQSWN+rybJbDY
yonRS0FPxJydt/sDlzi43qzHnrTqUbL+2RjYIqcOqeivNtDZ2joh+xqfRdKzACWu
QWrhGCL5+9bnqA6PEPA7GQ2jp00gDkjB7+HlQLI8ZCZcST6mkbfs/EaW00WYIcw5
lb+5oJ8oJqWebnQB21iwvPjvAv353iA1ApTJxBdo13x7oXBwWsrpxWk6SdL2Z5zU
NXrC9ZyaoeQ5uZ/oBXbCxJfhSkISyI5D8yeYLjmMxn+AvRBQpkRmVvcy3ls2SHGX
4XEJ4Q0wj3a0rPqmDZUwpWErbmf+N6D7J+uK8n3pcGlvkFIUaP60UQGp4gwnZA2O
dZdhVQ4whQHyjTmL7kRKl+gR/vTp+iPvKMfTO1HBQp97iK8IPM7Q2Gpe6U4n/Ll2
TDaZ9DroM83Vnc6cX69Th555SA9+gP6HWQIDAQABozswOTAYBgNVHREEETAPggdk
b21haW4xhwQICAgIMB0GA1UdDgQWBBSz0br/9U9mwYZfuRO1JmKTEorq1DANBgkq
hkiG9w0BAQsFAAOCAgEAK7nBNA+qjgvkmcSLQC+yXPOwb3o55D+N0J2QLxJFT4NV
b0GKf0dkz92Ew1pkKYzsH6lLlKRE23cye6EZLIwkkhhF0sTwYeu8HNy7VmkSDsp0
aKbqxgBzIJx+ztQGNgZ1fQMRjHCRLf8TaSAxnVXaXXUeU6fUBq2gHbYq6BfZkGmU
6f8DzL7uKHzcMEmWfC5KxfSskFFPOyaz/VGViQ0yffwH1NB+txDlU58rmu9w0wLe
cOrOjVUNg8axQen2Uejjj3IRmDC18ZfY7EqI8O1PizCtIcPSm+NnZYg/FvVj0KmM
o2QwGMd5QTU2J5lz988Xlofm/r3GBH32+ETqIcJolBw9bBkwruBvHpcmyLSFcFWK
sdGgi2gK2rGb+oKwzpHSeCtQVwgQth55qRH1DQGaAdpA1uTriOdcR96i65/jcz96
aD2B958hF1B/7I4Md+LFYhxgwREBhyQkU6saf7GR0Q+p4F8/oIkjhdLsyzk4YHyI
PVtK00W8zQMKF6zhHjfaF2uDRO/ycMKCq9NIqQJCZNqwNAo0r4FOmilwud/tzFY8
GQ9FXeQSqWo7hUIXdbej+aJ7DusYeuE/CwQFNUnz1khvIFJ5B7YP+gYCyUW7V2Hr
Mv+cZ473U8hYQ1Ij7pXi7DxsOWqWCDhyK0Yp6MZsw0rNaAIPHnTTxYdMfmIYHT0=
-----END CERTIFICATE-----
'''),
'cert_extensions': {
'BasicConstraints': {
'enabled': False,
},
'AuthorityKeyIdentifier': {
'enabled': False,
},
'ExtendedKeyUsage': {
'enabled': False,
},
'KeyUsage': {
'enabled': False,
}
},
}, rsa.RSAPrivateKey, 4096,
{
'DN': '/CN=dev/C=US/ST=TN/L=Knoxville/O=iX/OU=dev/emailAddress=iamchild@ix.com/'
'subjectAltName=DNS:domain2, IP Address:9.9.9.9',
'chain': False,
'city': 'Knoxville',
'common': 'dev',
'country': 'US',
'digest_algorithm': 'SHA256',
'email': 'iamchild@ix.com',
'extensions': {
'SubjectAltName': 'DNS:domain2, IP Address:9.9.9.9',
},
'fingerprint': '5C:BF:5A:CF:76:12:48:1B:85:A0:AE:2C:5D:E0:51:85:B3:C2:40:79',
'from': 'Mon Jan 24 11:28:07 2022',
'issuer_dn': '/CN=dev/C=US/ST=TN/L=Knoxville/O=iX/OU=dev/emailAddress=dev@ix.com',
'lifetime': DEFAULT_LIFETIME_DAYS,
'organization': 'iX',
'organizational_unit': 'dev',
'san': ['DNS:domain2', 'IP Address:9.9.9.9'],
'serial': 12934,
'state': 'TN',
'subject_name_hash': 3214950212,
'until': 'Sat Feb 25 11:28:07 2023'
}
),
(
{
'key_type': 'RSA',
'key_length': 2048,
'san': ['domain3', '10.10.10.10'],
'common': 'dev',
'country': 'US',
'state': 'TN',
'city': 'Knoxville',
'organization': 'iX',
'organizational_unit': 'dev',
'email': 'iamacert@ix.com',
'digest_algorithm': 'SHA256',
'lifetime': DEFAULT_LIFETIME_DAYS,
'serial': 12936,
'ca_certificate': textwrap.dedent('''
-----BEGIN CERTIFICATE-----
MIIFmzCCA4OgAwIBAgICMoMwDQYJKoZIhvcNAQELBQAwcjEMMAoGA1UEAwwDZGV2
MQswCQYDVQQGEwJVUzELMAkGA1UECAwCVE4xEjAQBgNVBAcMCUtub3h2aWxsZTEL
MAkGA1UECgwCaVgxDDAKBgNVBAsMA2RldjEZMBcGCSqGSIb3DQEJARYKZGV2QGl4
LmNvbTAeFw0yMjAxMjQxOTI0MTRaFw0yMzAyMjUxOTI0MTRaMHIxDDAKBgNVBAMM
A2RldjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlROMRIwEAYDVQQHDAlLbm94dmls
bGUxCzAJBgNVBAoMAmlYMQwwCgYDVQQLDANkZXYxGTAXBgkqhkiG9w0BCQEWCmRl
dkBpeC5jb20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDuy5kKf7eT
LOuxm1pn51kFLgJHD6k05pROjMOXEZel7CsmrDKEehSSdwDB/WUim3idOsImLrc+
ApXsnKwVY93f7yn1rfF4lgKsa3sb6oqAcPEobgTUqSmJ/OQVilUqOtj/dmFaEWIS
21eKNzaByNdpyOcoRF/+uDylEsE1Gj0GjkBneVRxyTZFV7LdVyDk38hljesnd8FX
gnD0DCdI3jBvqSYvd+GvQ2nQ2624HAmEQwfllqKi9PRDngeZIeiTQSWN+rybJbDY
yonRS0FPxJydt/sDlzi43qzHnrTqUbL+2RjYIqcOqeivNtDZ2joh+xqfRdKzACWu
QWrhGCL5+9bnqA6PEPA7GQ2jp00gDkjB7+HlQLI8ZCZcST6mkbfs/EaW00WYIcw5
lb+5oJ8oJqWebnQB21iwvPjvAv353iA1ApTJxBdo13x7oXBwWsrpxWk6SdL2Z5zU
NXrC9ZyaoeQ5uZ/oBXbCxJfhSkISyI5D8yeYLjmMxn+AvRBQpkRmVvcy3ls2SHGX
4XEJ4Q0wj3a0rPqmDZUwpWErbmf+N6D7J+uK8n3pcGlvkFIUaP60UQGp4gwnZA2O
dZdhVQ4whQHyjTmL7kRKl+gR/vTp+iPvKMfTO1HBQp97iK8IPM7Q2Gpe6U4n/Ll2
TDaZ9DroM83Vnc6cX69Th555SA9+gP6HWQIDAQABozswOTAYBgNVHREEETAPggdk
b21haW4xhwQICAgIMB0GA1UdDgQWBBSz0br/9U9mwYZfuRO1JmKTEorq1DANBgkq
hkiG9w0BAQsFAAOCAgEAK7nBNA+qjgvkmcSLQC+yXPOwb3o55D+N0J2QLxJFT4NV
b0GKf0dkz92Ew1pkKYzsH6lLlKRE23cye6EZLIwkkhhF0sTwYeu8HNy7VmkSDsp0
aKbqxgBzIJx+ztQGNgZ1fQMRjHCRLf8TaSAxnVXaXXUeU6fUBq2gHbYq6BfZkGmU
6f8DzL7uKHzcMEmWfC5KxfSskFFPOyaz/VGViQ0yffwH1NB+txDlU58rmu9w0wLe
cOrOjVUNg8axQen2Uejjj3IRmDC18ZfY7EqI8O1PizCtIcPSm+NnZYg/FvVj0KmM
o2QwGMd5QTU2J5lz988Xlofm/r3GBH32+ETqIcJolBw9bBkwruBvHpcmyLSFcFWK
sdGgi2gK2rGb+oKwzpHSeCtQVwgQth55qRH1DQGaAdpA1uTriOdcR96i65/jcz96
aD2B958hF1B/7I4Md+LFYhxgwREBhyQkU6saf7GR0Q+p4F8/oIkjhdLsyzk4YHyI
PVtK00W8zQMKF6zhHjfaF2uDRO/ycMKCq9NIqQJCZNqwNAo0r4FOmilwud/tzFY8
GQ9FXeQSqWo7hUIXdbej+aJ7DusYeuE/CwQFNUnz1khvIFJ5B7YP+gYCyUW7V2Hr
Mv+cZ473U8hYQ1Ij7pXi7DxsOWqWCDhyK0Yp6MZsw0rNaAIPHnTTxYdMfmIYHT0=
-----END CERTIFICATE-----
'''),
'cert_extensions': {
'BasicConstraints': {
'enabled': False,
},
'AuthorityKeyIdentifier': {
'enabled': False,
},
'ExtendedKeyUsage': {
'enabled': False,
},
'KeyUsage': {
'enabled': False,
}
},
}, rsa.RSAPrivateKey, 2048,
{
'DN': '/CN=dev/C=US/ST=TN/L=Knoxville/O=iX/OU=dev/emailAddress=iamacert@ix.com/'
'subjectAltName=DNS:domain3, IP Address:10.10.10.10',
'chain': False,
'city': 'Knoxville',
'common': 'dev',
'country': 'US',
'digest_algorithm': 'SHA256',
'email': 'iamacert@ix.com',
'extensions': {
'SubjectAltName': 'DNS:domain3, IP Address:10.10.10.10',
},
'fingerprint': '5C:BF:5A:CF:76:12:48:1B:85:A0:AE:2C:5D:E0:51:85:B3:C2:40:79',
'from': 'Mon Jan 24 11:28:07 2022',
'issuer_dn': '/CN=dev/C=US/ST=TN/L=Knoxville/O=iX/OU=dev/emailAddress=dev@ix.com',
'lifetime': DEFAULT_LIFETIME_DAYS,
'organization': 'iX',
'organizational_unit': 'dev',
'san': ['DNS:domain3', 'IP Address:10.10.10.10'],
'serial': 12936,
'state': 'TN',
'subject_name_hash': 3214950212,
'until': 'Sat Feb 25 11:28:07 2023'
}
)
])
def test__generating_cert(generate_params, key_type, key_size, cert_info):
ca_str, key = generate_certificate(generate_params)
cert_details = load_certificate(ca_str, True)
key_obj = load_private_key(key)
assert isinstance(key_obj, rsa.RSAPrivateKey) is True
assert key_obj.key_size == key_size
# there are certain keys which are special and we should not be validating those as they would differ
special_props = ['fingerprint', 'from', 'until', 'subject_name_hash']
for k in cert_info:
assert k in cert_details, cert_details
if k in special_props:
continue
if k == 'extensions':
assert 'SubjectKeyIdentifier' in cert_details[k], cert_details[k]
cert_details[k].pop('SubjectKeyIdentifier')
assert cert_info[k] == cert_details[k], cert_details
@pytest.mark.parametrize('data', [
{
'ca_certificate': textwrap.dedent('''
-----BEGIN CERTIFICATE-----
MIIFvjCCA6agAwIBAgIUYSm33fbU0nxOLQM+1iUeoA9IN98wDQYJKoZIhvcNAQEL
BQAwZDELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFDASBgNVBAcM
C0xvcyBBbmdlbGVzMRcwFQYDVQQKDA5NeU9yZ2FuaXphdGlvbjERMA8GA1UEAwwI
TXlSb290Q0EwHhcNMjQwOTIzMDgxMDAwWhcNMjkwOTIyMDgxMDAwWjBsMQswCQYD
VQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEUMBIGA1UEBwwLTG9zIEFuZ2Vs
ZXMxFzAVBgNVBAoMDk15T3JnYW5pemF0aW9uMRkwFwYDVQQDDBBNeUludGVybWVk
aWF0ZUNBMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEArulYocfEJrxb
Pv6r1I6d+5DPt+SHgHJcdMImOHyrkyZaOumOLRns8UBsxoBQFaqKdnrn1MkT51xF
tarqcCFUpkdad8WeK9OqKSAuxziccZfqGgwXWkpQyNvKUo4dGu7svYTOvyBEiQkY
g3/Dd0W/DgoHG28pXh4qXMxl5LAhRSXFvLt1DHsntyUpULduanCGV6yvOacpJz6K
e4/kxUG0HSnq0K7ActCicSUwkQOtAzOExJWdufGinR5PpplpX0lGloGCXc3sMnIb
Sn1xg6Q2F9BFPuJ1DA0KuVVr3McK2v41zHn9HqBjpDXLOXggcu68HHFFw+USe+9Z
QxZKeS7+lyEl2q/DiBVTSib5Ebt6QXeGfT7Y3NCBf/+H0YmwItaGjprs8ORB0X3N
gEuzv2kdF9OfxNCqpsBQu6cdVQNSYw1GAkJaVkZ/mZsJAEad9c1alSc6PtN2KfUD
Lc5cKoG6Akojiq+LZAwbS3PJIKa8mVZWfOP0DOdRkVsE22pUHtU8zk/Z8k6uvv1D
l2IwSgZ/H4uCKkW2AzmfrdJnwlUjs7s9xDzXxOBBdJx5+RJADP9wiMc2935sXurv
nlWccAgy704QgNJvgcF7lKnpWFVgB6hour4xZz3vWM1GL+bI15TG+cS1h5WqqYFG
Y/Et9lpV/0iT6rEYU/T/upDSUjWQERsCAwEAAaNgMF4wDAYDVR0TBAUwAwEB/zAO
BgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJ0WfCTCab4MJ1bKlpb1rQT/7Lz/MB8G
A1UdIwQYMBaAFI8bOrNOIEIA5MdAg+g89Rm4aVhjMA0GCSqGSIb3DQEBCwUAA4IC
AQA4ygRkmlnidblc8J08FbjYdpQ2FiG1tUun4rQAfmLIAC45rts8spgfFupf4D6j
OD2KAPFs+3AXbcRLMpeQAv22HgRJvVOjOrtUQroEmTmXuPWXE3iYSoMLnmLiL+kr
FgKLSawXjOtt9WN+OOwdSIqqhm5zNkdvLrgvCKGg+GZYqsGANG9MHGfTF1DNhK/2
n9E5HKnSPs5Z4ABKlRVr6FEhGQvfljBRx8Wf0ocgs+d5zOQUL9W55gV0G82rDSW1
8jchUlMTdEE2UyNcJgBKkBdKz+dQF2gMbIcyFQgEh6ZQOWcl+ZO4gWX7/V9wNeUC
8yOQbcCYN5YnlggL/4n+KWtT0MEoY7KZsX+Npnw3Cq85z/OIoxTadvYESt/azFRK
U4DQ3epAkb0leOJjGdDkjJM8VEcS2lNfuGsv9t0mTJHdNA00cWR7bRoo4IYaJMjL
mCG3h5rCPVoIXvXNHyW/GRfJzJTayMVCMLVneFMpQFprSWUAS/+m47BEFliKxZEd
nTyOp15PoUxzSSV2OoOMtm/ZRSAtSGXKQAv43YEoBYuAboyJinodlxpLGbCUijOi
6L3hkc2ZPh/nKOeXroQDO4sTAW2Ki8/SKOSKBH57dGbm/zSIu7OHhkBZeGotbSnf
kYnwsj6Q8znUgY36f4oPJ+7+t6EZvl8G0IeMDP9oYcDv9A==
-----END CERTIFICATE-----
'''),
'ca_privatekey': textwrap.dedent('''
-----BEGIN PRIVATE KEY-----
MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQCu6Vihx8QmvFs+
/qvUjp37kM+35IeAclx0wiY4fKuTJlo66Y4tGezxQGzGgFAVqop2eufUyRPnXEW1
qupwIVSmR1p3xZ4r06opIC7HOJxxl+oaDBdaSlDI28pSjh0a7uy9hM6/IESJCRiD
f8N3Rb8OCgcbbyleHipczGXksCFFJcW8u3UMeye3JSlQt25qcIZXrK85pyknPop7
j+TFQbQdKerQrsBy0KJxJTCRA60DM4TElZ258aKdHk+mmWlfSUaWgYJdzewychtK
fXGDpDYX0EU+4nUMDQq5VWvcxwra/jXMef0eoGOkNcs5eCBy7rwccUXD5RJ771lD
Fkp5Lv6XISXar8OIFVNKJvkRu3pBd4Z9Ptjc0IF//4fRibAi1oaOmuzw5EHRfc2A
S7O/aR0X05/E0KqmwFC7px1VA1JjDUYCQlpWRn+ZmwkARp31zVqVJzo+03Yp9QMt
zlwqgboCSiOKr4tkDBtLc8kgpryZVlZ84/QM51GRWwTbalQe1TzOT9nyTq6+/UOX
YjBKBn8fi4IqRbYDOZ+t0mfCVSOzuz3EPNfE4EF0nHn5EkAM/3CIxzb3fmxe6u+e
VZxwCDLvThCA0m+BwXuUqelYVWAHqGi6vjFnPe9YzUYv5sjXlMb5xLWHlaqpgUZj
8S32WlX/SJPqsRhT9P+6kNJSNZARGwIDAQABAoICAAYFCuNafDI3Fk7XNfO1StOf
Gr8B8vXlObBdBDK6e68vSTiw1A9STpjI9lVokhkEywoj1bm5h+FVCCMl9DaStxaX
6xGnL/fjK36J2IJLvPqd11U5KE6XsysOgWqQ8Ih+Q5CMMw9Z3XH36auQ6JnAwUAK
8U6s5zgRgrS55iHWO/bkw2bo7rDUxjuj4EWiYn7wS3dV/pvV2HE80khJXf658ah1
SlsPQJlS+9w4AvFitoAfNEkNuyVsnwOYSPZ7XiiE3ZSNdX6j+SaNTcolAhSdQK1W
IiP1aEDXbBCP04wAH6wExrY4VpFIxNUgctOSAk/iToAOF/ATgKzaQnCwIjUEfIeJ
Tibj90Bnjy4foahzo8gbMzIFoTDLtefXkzX/pZsPM2yHSPegONnpGaeTEBit8YjI
FeAcVOoOFC5z4c4I3wvBuFCGeOtDQR/UFkx6pUY9sKmR7GjNSBo5rHIVeSgEdVL3
vDi/sTuuab1/botQdOTdxNVvwd3dABVI47uHGTcn2OdHhECj8ljiasT/z3r/oVi7
vjoGtVhzCajt9oUCaDh7Qzdm1F3GugmCsGq0KO8tdBbRyaFoUwJ0Ze7/e7TWnE2A
j57XI6Tjd1y4ztaexD+90AOBKwXD+OSfY+luv52ittr2k0EUQBR1OICP5Dn1p5v3
ahQyEtnaib60ChLi8DtNAoIBAQDyPHx3wEDp4+hlh148JQX5I9T5dlHAmfOtjpC2
OG1MrECX3CF8ya5ucolnhU+h08pjAOVq3o1cdCAZjV3BnQ8xJ/zAJ3jd12d+NSdt
wcKGErvx34fpXx9oB4eVZkk3V2x4L4G8GzVrikZ+nb0CsHQ6450+pI4iUl8fGB+h
qE5OqjO98o7vER/tnfgonFEKHsoic4959zu0j6FyC2t/d+oqEeNaV4qE8eHKGI6b
Hs48j88n25dN4+HU6BCW5fq7xdUygBrxIsYL6a3ol54oKv4lT76O955pLpF/OfAE
N8rj+xsdYkGnq+X0+3+WTwaGYl8KgUBVf2425DXT0XQIIdEdAoIBAQC42ZG8Deeu
PA8XjxabK7hYnL6JdgRZM+2JEnEi9ncxJ3V8gvHjBFn/6XG7EytGx5pSo4D/pOXO
EjujU0Cr4PQ0o7JzViMFhvzM2iOJYZN5WGPW3h9QIz3g1yE1lb8OpSldI9IGysUO
KPMeXYdHonCCe/yxBf1zc/SpyoXG4mEeM9EPJdovjLqcmM5Rh2VPMT8Y7it1nPA5
D9M3DWchWYMkW97WJ9sejXYGFpMW+rrxfwbuIcCOds7CkAy0XGp643hUEsOom6to
0LPUuJET2s2wwKVWCi27w9UQISdZyw+3uI32d5NkpEfMbL1hMbIKXgTNFAPV43mz
8CJNHgu/GE2XAoIBAFZ7rdx7MTHQApqs98H1XeqTFmhyC8H9tPgT3CqSfsNPBEiz
eOk6gCJCljf1anbWTH2IRmAfUMzfUM6OoBiN4GymYCCidw6M5xAyHf+bm72OVreG
HNn+8hGMDqYSPLWbasiF/YWRGUNpvL1bx618HiMgPHWu/mfWjMtnK3PlyP9g2NRK
EynbLVECgyTMmkpIr9YY9/KNiC1w0i0LUrfRsjKO7GLGule4m+oxVkifePY6SbVr
Otu+LlR6/eFB/oCnovRCtFu/FIIQTdyqtPaWUuIzVE9qrI3U6HFJ2B1JZhB7CDU3
bUVVo+YRHH4nfKbh3Bi+CJ/9vPWwCF+1ef2lRSkCggEBAJdJEbosUzJJWzy30hOb
O5viZRrRqQtssXqeylOIDdL/7WrDLL7Uv+fvojIupRufnxEFWj1gMuhuCbtJFsPV
L2iplRJKLA2JBfuOiMkWQAFvMv8W+d+3iBwtVbOqZBzTVcAx4eHHMHG/WALBH0ek
jZptaUlkkqNcAqC37LbybfOvCunv29tQvSYO9cTKIEMpFfAMdSskD4NUDVSgNoiX
3vnx6rWxFuexdSfUb/u9pyShBwX8P8EZQW0BQjSW8lqzMrb0SIgFJ8t4C8YMbEgo
qnF/qZF6cSWcSBOUXsVhqPJ/LEjMYqhav5xyXqheaM2NVzaUq/Lw4pk+7oPZFFoI
xO8CggEAev7GFTAk604n0qZ7c0Y3yCF0WTedFnirlxt3ngJUV0MxUgM7Y5phZsdY
UqWKJB7GkmHQyifrKMU8MFx1cFthdlnbw6qJpiu9Kqg5eIm5KoXfq+RfLpJtx+nE
/rAyIrwIyr8vCgimCCXp0Vf7gU8nQWqiNGEZ57Wp/VDXw1AyV4TwS/3fpW8ftj7n
qh6U0B1Ysp0OC9IY5f5ikr/VaT9DrkxYRqjAh0xDRk+Ug4MfkNOF/Ui3qZfMhETU
qTrdKJAHGp5eZX6+1nILUPuKL9qTcQEd8eHn4DDCEGZlHbZlAEbtx8vyYQRqNMA2
+/ITTvwhewd07bFIPLU7UqXVIfSZNg==
-----END PRIVATE KEY-----
'''),
'csr': textwrap.dedent('''
-----BEGIN CERTIFICATE REQUEST-----
MIIDMDCCAhgCAQAwgYExDjAMBgNVBAMMBU15Q1NSMQswCQYDVQQGEwJVUzETMBEG
A1UECAwKQ2FsaWZvcm5pYTEUMBIGA1UEBwwLTG9zIEFuZ2VsZXMxFzAVBgNVBAoM
Dk15T3JnYW5pemF0aW9uMR4wHAYJKoZIhvcNAQkBFg9hYmNAZXhhbXBsZS5jb20w
ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCoyhG7UTKGI3Mh/YWvIPQT
E1h633JYCbEN7k/uJoCR0EwUtIZm4RT9MM1mT+uuGiFvgAvpYLtKkPDJ7/3nNvkS
VQRFeJnNHW+pj0XzcuoCgrU6lMLid+TfSQS3yDOuFYosozBzOFW63uGNjAPU4zbf
3hEKfeFPoJsy5q9LPoGctO/ooo1aDCwHSSPL17d8ip4Zn6VjaIXiN1nDcFIImu5U
FJY7yGaOVItJCtrLXb489WCDNK6c39GIEFYlJCuXZY9z/SDy1qESEXlOlWBymdCv
JuUJKHqxSIGKj0DHbbDWPLx9PbiGGuboVFuJifoqAVQpmCzFnKJdhlyNSv6sRz+J
AgMBAAGgaTBnBgkqhkiG9w0BCQ4xWjBYMBYGA1UdEQQPMA2CC2V4YW1wbGUuY29t
MAwGA1UdEwEB/wQCMAAwIAYDVR0lAQH/BBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMC
MA4GA1UdDwEB/wQEAwIDqDANBgkqhkiG9w0BAQsFAAOCAQEAKDO0G6K+xQGmno1x
hoZAayj91r6PZhact54vXij9dFxUh5Z4V2AVIHlIEdfXEj494ZKIWSW46/qgkGc7
fDUYstUjNTmLE9OzMIwXEkLlQG1RQ1sweMlvrapQ4hdxz7vO9lJ0imYrJLS5Xi1Q
a814O4H22tvt6KeBp7I9hj2OSmTbdaNh0rNLL9eTq5PclIAshw+fw9OWqPznIj90
55I3x14uk4TMs8gXG7IJQPtfzGLRwVWl7jhEPnTp5yEyuUHZUOGZrLHXcZk5v9Jj
kdhmk7kTAAXsO378HZn2DZx1FlLvJjNheOtiSAV2tQpKIKCGHzDHARD7AjVXrD+1
L4JCDA==
-----END CERTIFICATE REQUEST-----
'''),
'csr_privatekey': textwrap.dedent('''
-----BEGIN PRIVATE KEY-----
MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQCoyhG7UTKGI3Mh
/YWvIPQTE1h633JYCbEN7k/uJoCR0EwUtIZm4RT9MM1mT+uuGiFvgAvpYLtKkPDJ
7/3nNvkSVQRFeJnNHW+pj0XzcuoCgrU6lMLid+TfSQS3yDOuFYosozBzOFW63uGN
jAPU4zbf3hEKfeFPoJsy5q9LPoGctO/ooo1aDCwHSSPL17d8ip4Zn6VjaIXiN1nD
cFIImu5UFJY7yGaOVItJCtrLXb489WCDNK6c39GIEFYlJCuXZY9z/SDy1qESEXlO
lWBymdCvJuUJKHqxSIGKj0DHbbDWPLx9PbiGGuboVFuJifoqAVQpmCzFnKJdhlyN
Sv6sRz+JAgMBAAECggEARsp9NllfPdgXXR2l2GYTR/7YoKwfmmHyMrwNJP5b9Qvu
JM70AakMMwapVuxVFe+ar1d+Z3KtCqCQhLlVfYhWXURv5q0moFrkrrJK7ch38fad
CMVEmVQclzNaObRLTIt3KLKGywRJHHeHFOUw5DQpmynZbtON0GY1QVt0ELRWCwE/
qDc9G1RVqkwn94AIdI+RScSOT1F6Ebsh0ma9PzcZEyNnvI6RaJXPF/QJOVHJPd50
F5lSXRHwiTMFJTa7ihkl87jAYYLrjnOVPSsghSO55Fav+NqR1bO5v4A5iZr3aGGN
3EZXmKcATqwLAai4m4LqpBwWbl4dTiLU8LfeF/CLowKBgQDUOtR6RpVLUteNeZGA
BJt77G988qmhOhCbvzeU1h8dpN/DaReqxTlKSgh7XgdfaL9Hoi9A8WsoTAu6rAlo
F2admQU/9OG/x2DEXXP4gyfqj64qA8i6dUjrK1lWE6O/SE79LdtH8zadoVCL6Z78
ybQrc1jZMvR/7Zja1i7WYb9STwKBgQDLmbKfyBSC9PrP7yrnnyA0NZTINUoLhoXB
TcyLpiDRZ20WqgFlFB8Pv2ji+E+lOF0tfCewZgPOciYDIJawXKrbGnBh+9qAzHrB
cXNjREawGTK3g5Z2V4Y8SIz8N92pFOEe0ZLc2F25Ciy+MAfg1REK5gwwfR0lJlpr
gtMq/6ESpwKBgQCCRJYVc+vBr1jV4w/3V1yk5UzNkhmi+AQnxWh1eDTjOkeLJ5+6
V5LB0c2BBAdcfewjKR7+KvGOa5crftvLQ8nd5IY/aq2CzPvNrFs56C+BH65U5bu5
D7Kxfws39ZgmGlk5uIMHl/cnLFRHfR/0pE5t+UBJGajQOWQAt0VKm/cWqwKBgQCj
BkKc5hxmb7qU3LDCHgwvQegMF39ekyxuh9kMyMzmX6Zdy2qqgN4OQfm+I43Cgcs6
LHurJ0RM/eGqB2IhfVHhdt4d1wgysYhpdGosRfND9ilCAD9uKs71XjJlkmYOiQVp
I+4wn58MFzWUY+krAfBPhbyk5sl7gaZNB8gGWgGjaQKBgQCmq5QGa+WWxI7Oxq3b
eVHijSzg+C4HVXU4L8lrvLOvze7mzjL2nw1hRsW7tq/csWD2K/ySU3ABslxDudBW
wn+FoP5qs4E5F4bR+vt+y+3qd9WfVju5+yMVgtV6QduREyR8BbU77P67BrGP6fE+
ojKM5TWJfQdZ2fyIKpBYunYk+A==
-----END PRIVATE KEY-----
'''),
'serial': 554702452401875914103556532740307722432552646627,
'digest_algorithm': 'SHA256',
'cert_extensions': {
'BasicConstraints': {
'ca': False,
'enabled': False,
'path_length': None,
'extension_critical': False
},
'AuthorityKeyIdentifier': {
'authority_cert_issuer': False,
'enabled': False,
'extension_critical': False
},
'ExtendedKeyUsage': {
'usages': [],
'enabled': False,
'extension_critical': False
},
'KeyUsage': {
'enabled': False,
'digital_signature': False,
'content_commitment': False,
'key_encipherment': False,
'data_encipherment': False,
'key_agreement': False,
'key_cert_sign': False,
'crl_sign': False,
'encipher_only': False,
'decipher_only': False,
'extension_critical': False
}
}
}
], ids=['Test ca_sign_csr']
)
def test_ca_sign_csr(data):
cert = sign_csr_with_ca(data)
cert_data = x509.load_pem_x509_certificate(cert.encode('utf-8'), default_backend())
cert_issuer = cert_data.issuer
ca_pem_data = data['ca_certificate'].encode('utf-8')
ca_data = x509.load_pem_x509_certificate(ca_pem_data, default_backend())
ca_subject = ca_data.subject
assert cert is not None
assert cert_issuer == ca_subject
| 22,289 | Python | .py | 393 | 43.628499 | 105 | 0.697295 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,407 | ca.py | truenas_middleware/src/middlewared/middlewared/pytest/unit/plugins/crypto/ca.py | import pytest
import textwrap
from cryptography.hazmat.primitives.asymmetric import rsa
from middlewared.plugins.crypto_.generate_ca import generate_certificate_authority
from middlewared.plugins.crypto_.load_utils import load_certificate, load_private_key
from middlewared.plugins.crypto_.utils import DEFAULT_LIFETIME_DAYS
@pytest.mark.parametrize('generate_params,key_type,key_size,ca_info', [
(
{
'key_type': 'RSA',
'key_length': 4096,
'san': ['domain1', '8.8.8.8'],
'common': 'dev',
'country': 'US',
'state': 'TN',
'city': 'Knoxville',
'organization': 'iX',
'organizational_unit': 'dev',
'email': 'dev@ix.com',
'digest_algorithm': 'SHA256',
'lifetime': DEFAULT_LIFETIME_DAYS,
'serial': 12931,
'ca_certificate': None,
'cert_extensions': {
'BasicConstraints': {
'enabled': True,
'ca': True,
'extension_critical': True,
},
'AuthorityKeyIdentifier': {
'enabled': False,
},
'ExtendedKeyUsage': {
'enabled': True,
'usages': ['SERVER_AUTH'],
},
'KeyUsage': {
'enabled': True,
'key_cert_sign': True,
'crl_sign': True,
'extension_critical': True,
}
},
},
rsa.RSAPrivateKey, 4096,
{
'DN': '/CN=dev/C=US/ST=TN/L=Knoxville/O=iX/OU=dev/emailAddress=dev@ix.com/subjectAlt'
'Name=DNS:domain1, IP Address:8.8.8.8',
'chain': False,
'city': 'Knoxville',
'common': 'dev',
'country': 'US',
'digest_algorithm': 'SHA256',
'email': 'dev@ix.com',
'extensions': {
'SubjectAltName': 'DNS:domain1, IP Address:8.8.8.8',
'BasicConstraints': 'CA:TRUE',
'ExtendedKeyUsage': 'TLS Web Server Authentication',
'KeyUsage': 'Certificate Sign, CRL Sign',
},
'fingerprint': '45:43:04:3D:73:3D:01:CD:98:E9:63:93:8C:61:DC:2F:68:ED:E3:77',
'from': 'Mon Jan 24 10:20:50 2022',
'issuer_dn': '/CN=dev/C=US/ST=TN/L=Knoxville/O=iX/OU=dev/emailAddress=dev@ix.com',
'lifetime': DEFAULT_LIFETIME_DAYS,
'organization': 'iX',
'organizational_unit': 'dev',
'san': ['DNS:domain1', 'IP Address:8.8.8.8'],
'serial': 12931,
'state': 'TN',
'subject_name_hash': 877114495,
'until': 'Sat Feb 25 10:20:50 2023'
},
),
(
{
'key_type': 'RSA',
'key_length': 4096,
'san': ['domain2', '9.9.9.9'],
'common': 'dev',
'country': 'US',
'state': 'TN',
'city': 'Knoxville',
'organization': 'iX',
'organizational_unit': 'dev',
'email': 'iamchild@ix.com',
'digest_algorithm': 'SHA256',
'lifetime': DEFAULT_LIFETIME_DAYS,
'serial': 12934,
'ca_certificate': textwrap.dedent('''\
-----BEGIN CERTIFICATE-----
MIIFmzCCA4OgAwIBAgICMoMwDQYJKoZIhvcNAQELBQAwcjEMMAoGA1UEAwwDZGV2
MQswCQYDVQQGEwJVUzELMAkGA1UECAwCVE4xEjAQBgNVBAcMCUtub3h2aWxsZTEL
MAkGA1UECgwCaVgxDDAKBgNVBAsMA2RldjEZMBcGCSqGSIb3DQEJARYKZGV2QGl4
LmNvbTAeFw0yMjAxMjQxOTI0MTRaFw0yMzAyMjUxOTI0MTRaMHIxDDAKBgNVBAMM
A2RldjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlROMRIwEAYDVQQHDAlLbm94dmls
bGUxCzAJBgNVBAoMAmlYMQwwCgYDVQQLDANkZXYxGTAXBgkqhkiG9w0BCQEWCmRl
dkBpeC5jb20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDuy5kKf7eT
LOuxm1pn51kFLgJHD6k05pROjMOXEZel7CsmrDKEehSSdwDB/WUim3idOsImLrc+
ApXsnKwVY93f7yn1rfF4lgKsa3sb6oqAcPEobgTUqSmJ/OQVilUqOtj/dmFaEWIS
21eKNzaByNdpyOcoRF/+uDylEsE1Gj0GjkBneVRxyTZFV7LdVyDk38hljesnd8FX
gnD0DCdI3jBvqSYvd+GvQ2nQ2624HAmEQwfllqKi9PRDngeZIeiTQSWN+rybJbDY
yonRS0FPxJydt/sDlzi43qzHnrTqUbL+2RjYIqcOqeivNtDZ2joh+xqfRdKzACWu
QWrhGCL5+9bnqA6PEPA7GQ2jp00gDkjB7+HlQLI8ZCZcST6mkbfs/EaW00WYIcw5
lb+5oJ8oJqWebnQB21iwvPjvAv353iA1ApTJxBdo13x7oXBwWsrpxWk6SdL2Z5zU
NXrC9ZyaoeQ5uZ/oBXbCxJfhSkISyI5D8yeYLjmMxn+AvRBQpkRmVvcy3ls2SHGX
4XEJ4Q0wj3a0rPqmDZUwpWErbmf+N6D7J+uK8n3pcGlvkFIUaP60UQGp4gwnZA2O
dZdhVQ4whQHyjTmL7kRKl+gR/vTp+iPvKMfTO1HBQp97iK8IPM7Q2Gpe6U4n/Ll2
TDaZ9DroM83Vnc6cX69Th555SA9+gP6HWQIDAQABozswOTAYBgNVHREEETAPggdk
b21haW4xhwQICAgIMB0GA1UdDgQWBBSz0br/9U9mwYZfuRO1JmKTEorq1DANBgkq
hkiG9w0BAQsFAAOCAgEAK7nBNA+qjgvkmcSLQC+yXPOwb3o55D+N0J2QLxJFT4NV
b0GKf0dkz92Ew1pkKYzsH6lLlKRE23cye6EZLIwkkhhF0sTwYeu8HNy7VmkSDsp0
aKbqxgBzIJx+ztQGNgZ1fQMRjHCRLf8TaSAxnVXaXXUeU6fUBq2gHbYq6BfZkGmU
6f8DzL7uKHzcMEmWfC5KxfSskFFPOyaz/VGViQ0yffwH1NB+txDlU58rmu9w0wLe
cOrOjVUNg8axQen2Uejjj3IRmDC18ZfY7EqI8O1PizCtIcPSm+NnZYg/FvVj0KmM
o2QwGMd5QTU2J5lz988Xlofm/r3GBH32+ETqIcJolBw9bBkwruBvHpcmyLSFcFWK
sdGgi2gK2rGb+oKwzpHSeCtQVwgQth55qRH1DQGaAdpA1uTriOdcR96i65/jcz96
aD2B958hF1B/7I4Md+LFYhxgwREBhyQkU6saf7GR0Q+p4F8/oIkjhdLsyzk4YHyI
PVtK00W8zQMKF6zhHjfaF2uDRO/ycMKCq9NIqQJCZNqwNAo0r4FOmilwud/tzFY8
GQ9FXeQSqWo7hUIXdbej+aJ7DusYeuE/CwQFNUnz1khvIFJ5B7YP+gYCyUW7V2Hr
Mv+cZ473U8hYQ1Ij7pXi7DxsOWqWCDhyK0Yp6MZsw0rNaAIPHnTTxYdMfmIYHT0=
-----END CERTIFICATE-----
'''),
'cert_extensions': {
'BasicConstraints': {
'enabled': True,
'ca': True,
'extension_critical': True,
},
'AuthorityKeyIdentifier': {
'enabled': False,
},
'ExtendedKeyUsage': {
'enabled': True,
'usages': ['SERVER_AUTH'],
},
'KeyUsage': {
'enabled': True,
'key_cert_sign': True,
'crl_sign': True,
'extension_critical': True,
}
},
}, rsa.RSAPrivateKey, 4096,
{
'DN': '/CN=dev/C=US/ST=TN/L=Knoxville/O=iX/OU=dev/emailAddress=iamchild@ix.com/'
'subjectAltName=DNS:domain2, IP Address:9.9.9.9',
'chain': False,
'city': 'Knoxville',
'common': 'dev',
'country': 'US',
'digest_algorithm': 'SHA256',
'email': 'iamchild@ix.com',
'extensions': {
'SubjectAltName': 'DNS:domain2, IP Address:9.9.9.9',
'BasicConstraints': 'CA:TRUE',
'ExtendedKeyUsage': 'TLS Web Server Authentication',
'KeyUsage': 'Certificate Sign, CRL Sign',
},
'fingerprint': '5C:BF:5A:CF:76:12:48:1B:85:A0:AE:2C:5D:E0:51:85:B3:C2:40:79',
'from': 'Mon Jan 24 11:28:07 2022',
'issuer_dn': '/CN=dev/C=US/ST=TN/L=Knoxville/O=iX/OU=dev/emailAddress=dev@ix.com',
'lifetime': DEFAULT_LIFETIME_DAYS,
'organization': 'iX',
'organizational_unit': 'dev',
'san': ['DNS:domain2', 'IP Address:9.9.9.9'],
'serial': 12934,
'state': 'TN',
'subject_name_hash': 3214950212,
'until': 'Sat Feb 25 11:28:07 2023'
}
)
])
def test__generating_ca(generate_params, key_type, key_size, ca_info):
ca_str, key = generate_certificate_authority(generate_params)
ca_details = load_certificate(ca_str, True)
key_obj = load_private_key(key)
assert isinstance(key_obj, rsa.RSAPrivateKey) is True
assert key_obj.key_size == key_size
# there are certain keys which are special and we should not be validating those as they would differ
special_props = ['fingerprint', 'from', 'until', 'subject_name_hash']
for k in ca_info:
assert k in ca_details, ca_details
if k in special_props:
continue
if k == 'extensions':
assert 'SubjectKeyIdentifier' in ca_details[k], ca_details[k]
ca_details[k].pop('SubjectKeyIdentifier')
assert ca_info[k] == ca_details[k], ca_details
| 8,701 | Python | .py | 188 | 32.5 | 105 | 0.569951 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,408 | test_custom_scale_version_checks.py | truenas_middleware/src/middlewared/middlewared/pytest/unit/plugins/catalog/test_custom_scale_version_checks.py | import pytest
from middlewared.plugins.catalog.apps_util import custom_scale_version_checks
@pytest.mark.parametrize('min_version, max_version, sys_scale_version, expected', [
(
'21.0',
'23.1',
'22.01',
''
),
(
'22.15',
'21.05',
'22.01',
'Your TrueNAS system version (22.01) is less than the minimum version (22.15) required by this application.'
),
(
'22.01',
'23.01',
'22.01',
''
),
(
'22.01',
'23.02',
'24.05',
'Your TrueNAS system version (24.05) is greater than the maximum version (23.02) required by this application.'
),
(
'22.01',
'21.03',
'22.0',
'Unable to determine your TrueNAS system version'
)
])
def test_custom_scale_version(min_version, max_version, sys_scale_version, expected):
result = custom_scale_version_checks(min_version, max_version, sys_scale_version)
assert result == expected
| 1,017 | Python | .py | 37 | 20.783784 | 119 | 0.578301 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,409 | test_get_app_details.py | truenas_middleware/src/middlewared/middlewared/pytest/unit/plugins/catalog/test_get_app_details.py | import unittest
import pytest
from middlewared.plugins.catalog.apps_util import get_app_details
@pytest.mark.parametrize('app_data, versions, expected', [
(
{
'app_readme': '',
'categories': [
'media'
],
'description': '',
'healthy': True,
'healthy_error': None,
'home': 'https://actualbudget.org',
'location': '/mnt/.ix-apps/truenas_catalog/trains/community/actual-budget',
'latest_version': '1.1.11',
'latest_app_version': '24.10.1',
'latest_human_version': '24.10.1_1.1.11',
'last_update': '12-02-21 00:00:00',
'name': 'actual-budget',
'recommended': False,
'title': 'Actual Budget',
},
{
'1.0.1': {
'name': 'chia',
'categories': [],
'app_readme': None,
'location': '/mnt/mypool/ix-applications/catalogs/'
'github_com_truenas_charts_git_master/charts/chia',
'healthy': True,
'supported': True,
'healthy_error': None,
'required_features': [],
'version': '1.0.1',
'human_version': '1.15.12',
'home': None,
'readme': None,
'changelog': None,
'last_update': '1200-20-00 00:00:00',
'app_metadata': {
'name': 'chia',
'train': 'stable',
'version': '1.0.1',
'app_version': '1.0.1',
'title': 'chia',
'description': 'desc',
'home': 'None',
},
'schema': {
"groups": [],
"questions": []
}
}
},
{
'app_readme': '',
'categories': ['media'],
'description': '',
'healthy': True,
'healthy_error': None,
'home': 'https://actualbudget.org',
'location': '/mnt/.ix-apps/truenas_catalog/trains/community/actual-budget',
'latest_version': '1.1.11',
'latest_app_version': '24.10.1',
'latest_human_version': '24.10.1_1.1.11',
'last_update': '12-02-21 00:00:00',
'name': 'actual-budget',
'recommended': False,
'title': 'Actual Budget',
'versions': {
'1.0.1': {
'name': 'chia',
'categories': [],
'app_readme': None,
'location': '/path/to/app/1.0.1',
'healthy': True,
'supported': True,
'healthy_error': None,
'required_features': [],
'version': '1.0.1',
'human_version': '1.15.12',
'home': None,
'readme': None,
'changelog': None,
'last_update': '1200-20-00 00:00:00',
'app_metadata': {
'name': 'chia',
'train': 'stable',
'version': '1.0.1',
'app_version': '1.0.1',
'title': 'chia',
'description': 'desc',
'home': 'None',
},
'schema': {
'groups': [],
'questions': []
},
'values': {}
}
}
}
),
(
{
'app_readme': '',
'categories': [
'media'
],
'description': '',
'healthy': True,
'healthy_error': None,
'home': 'https://actualbudget.org',
'location': '/mnt/.ix-apps/truenas_catalog/trains/community/actual-budget',
'latest_version': '1.1.11',
'latest_app_version': '24.10.1',
'latest_human_version': '24.10.1_1.1.11',
'last_update': '12-02-21 00:00:00',
'name': 'actual-budget',
'recommended': False,
'title': 'Actual Budget',
},
{},
{
'app_readme': '',
'categories': ['media'],
'description': '',
'healthy': True,
'healthy_error': None,
'home': 'https://actualbudget.org',
'location': '/mnt/.ix-apps/truenas_catalog/trains/community/actual-budget',
'latest_version': '1.1.11',
'latest_app_version': '24.10.1',
'latest_human_version': '24.10.1_1.1.11',
'last_update': '12-02-21 00:00:00',
'name': 'actual-budget',
'recommended': False,
'title': 'Actual Budget',
'versions': {}
}
),
])
@unittest.mock.patch('middlewared.plugins.catalog.apps_util.normalize_questions')
@unittest.mock.patch('middlewared.plugins.catalog.apps_util.retrieve_cached_versions_data')
def test_get_app_details(mock_retrieve_cached_versions_data, mock_normalize_questions, app_data, versions, expected):
mock_retrieve_cached_versions_data.return_value = versions
if isinstance(expected, dict):
result = get_app_details('/path/to/app', app_data, {})
assert expected == result
| 5,566 | Python | .py | 150 | 22.473333 | 117 | 0.423397 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,410 | test_retrieve_cached_versions_data.py | truenas_middleware/src/middlewared/middlewared/pytest/unit/plugins/catalog/test_retrieve_cached_versions_data.py | import textwrap
import unittest
import pytest
from middlewared.plugins.catalog.apps_util import retrieve_cached_versions_data
from middlewared.service import CallError
@pytest.mark.parametrize('file, should_work', [
(
'''
version: 1.0.1
''',
False
),
(
'''
{
'versions': '1.0.1'
}
''',
False
),
(
None,
False
),
(
textwrap.dedent(
'''
{
"1.0.1": {
"name": "chia",
"categories": [],
"app_readme": null,
"location": "/mnt/mypool/ix-applications/catalogs/github_com_truenas_charts_git_master/charts/chia",
"healthy": true,
"supported": true,
"healthy_error": null,
"required_features": [],
"version": "1.0.1",
"human_version": "1.15.12",
"home": null,
"readme": null,
"changelog": null,
"last_update": "2024-10-09 00:00:00",
"app_metadata": {
"name": "chia",
"train": "stable",
"version": "1.0.1",
"app_version": "1.0.1",
"title": "chia",
"description": "desc",
"home": "",
"sources": [],
"maintainers": [],
"run_as_context": [],
"capabilities": [],
"host_mounts": []
},
"schema": {
"groups": [],
"questions": []
}
}
}
'''
),
True
),
])
@unittest.mock.patch('builtins.open', new_callable=unittest.mock.mock_open)
def test_retrieve_cached_versions_data(mock_file, file, should_work):
mock_file.return_value.read.return_value = file
if should_work:
result = retrieve_cached_versions_data('/path/to/app', 'actual-budget')
assert isinstance(result, dict)
else:
with pytest.raises(CallError):
retrieve_cached_versions_data('/path/to/app', 'actual-budget')
| 2,433 | Python | .py | 77 | 17.805195 | 120 | 0.410464 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,411 | test_min_max_scale_version_update.py | truenas_middleware/src/middlewared/middlewared/pytest/unit/plugins/catalog/test_min_max_scale_version_update.py | import unittest
import pytest
from middlewared.plugins.catalog.apps_util import minimum_scale_version_check_update
@pytest.mark.parametrize('version_data, expected', [
(
{
'healthy': True,
'supported': True,
'healthy_error': None,
'location': '/mnt/.ix-apps/truenas_catalog/trains/community/actual-budget/1.1.11',
'last_update': '2024-10-09 20:30:25',
'human_version': '24.10.1_1.1.11',
'chart_metadata': {
'annotations': {
'min_scale_version': '21.01',
'max_scale_version': '24.04'
}
},
'version': '1.1.11',
'schema': {
'groups': [
{
'name': 'Actual Budget Configuration',
'description': 'Configure Actual Budget'
}
]
}
},
{
'healthy': True,
'supported': False,
'healthy_error': None,
'location': '/mnt/.ix-apps/truenas_catalog/trains/community/actual-budget/1.1.11',
'last_update': '2024-10-09 20:30:25',
'human_version': '24.10.1_1.1.11',
'chart_metadata': {
'annotations': {
'min_scale_version': '21.01',
'max_scale_version': '24.04'
}
},
'version': '1.1.11',
'schema': {
'groups': [
{
'name': 'Actual Budget Configuration',
'description': 'Configure Actual Budget'
}
]
}
}
),
(
{
'healthy': True,
'supported': True,
'healthy_error': None,
'location': '/mnt/.ix-apps/truenas_catalog/trains/community/actual-budget/1.1.11',
'last_update': '2024-10-09 20:30:25',
'human_version': '24.10.1_1.1.11',
'chart_metadata': {
'annotations': {
'min_scale_version': '21.01',
'max_scale_version': '27.04'
}
},
'version': '1.1.11',
'schema': {
'groups': [
{
'name': 'Actual Budget Configuration',
'description': 'Configure Actual Budget'
}
]
}
},
{
'healthy': True,
'supported': True,
'healthy_error': None,
'location': '/mnt/.ix-apps/truenas_catalog/trains/community/actual-budget/1.1.11',
'last_update': '2024-10-09 20:30:25',
'human_version': '24.10.1_1.1.11',
'chart_metadata': {
'annotations': {
'min_scale_version': '21.01',
'max_scale_version': '27.04'
}
},
'version': '1.1.11',
'schema': {
'groups': [
{
'name': 'Actual Budget Configuration',
'description': 'Configure Actual Budget'
}
]
}
}
),
(
{
'healthy': True,
'supported': True,
'healthy_error': None,
'location': '/mnt/.ix-apps/truenas_catalog/trains/community/actual-budget/1.1.11',
'last_update': '2024-10-09 20:30:25',
'human_version': '24.10.1_1.1.11',
'chart_metadata': {
'annotations': {
'min_scale_version': '26.04',
'max_scale_version': '24.04'
}
},
'version': '1.1.11',
'schema': {
'groups': [
{
'name': 'Actual Budget Configuration',
'description': 'Configure Actual Budget'
}
]
}
},
{
'healthy': True,
'supported': False,
'healthy_error': None,
'location': '/mnt/.ix-apps/truenas_catalog/trains/community/actual-budget/1.1.11',
'last_update': '2024-10-09 20:30:25',
'human_version': '24.10.1_1.1.11',
'chart_metadata': {
'annotations': {
'min_scale_version': '26.04',
'max_scale_version': '24.04'
}
},
'version': '1.1.11',
'schema': {
'groups': [
{
'name': 'Actual Budget Configuration',
'description': 'Configure Actual Budget'
}
]
}
}
),
])
@unittest.mock.patch('middlewared.plugins.catalog.apps_util.sw_info')
def test_min_max_scale_version_update(sw_info, version_data, expected):
sw_info.return_value = {'version': '25.04.0'}
result = minimum_scale_version_check_update(version_data)
assert result == expected
| 5,261 | Python | .py | 154 | 19.655844 | 94 | 0.412894 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,412 | test_netdata_stats_func.py | truenas_middleware/src/middlewared/middlewared/pytest/unit/plugins/reporting/test_netdata_stats_func.py | from unittest.mock import patch, mock_open
from middlewared.plugins.reporting.netdata.utils import NETDATA_UPDATE_EVERY
from middlewared.plugins.reporting.realtime_reporting import (
get_arc_stats, get_cpu_stats, get_disk_stats, get_interface_stats, get_memory_info,
)
from middlewared.plugins.reporting.realtime_reporting.utils import normalize_value, safely_retrieve_dimension
NETDATA_ALL_METRICS = {
'system.cpu': {
'name': 'system.cpu',
'family': 'cpu',
'context': 'system.cpu',
'units': 'percentage',
'last_updated': 1691150349,
'dimensions': {
'guest_nice': {
'name': 'guest_nice',
'value': 0.5375124
},
'guest': {
'name': 'guest',
'value': 0.5375124
},
'steal': {
'name': 'steal',
'value': 0.5275124
},
'softirq': {
'name': 'softirq',
'value': 0.5175124
},
'irq': {
'name': 'irq',
'value': 0.4975124
},
'user': {
'name': 'user',
'value': 0.4975124
},
'system': {
'name': 'system',
'value': 0.4975124
},
'nice': {
'name': 'nice',
'value': 49.75124
},
'iowait': {
'name': 'iowait',
'value': 4.75124
},
'idle': {
'name': 'idle',
'value': 99.0049751
}
}
},
'cpu.cpu0': {
'name': 'cpu.cpu0',
'family': 'utilization',
'context': 'cpu.cpu',
'units': 'percentage',
'last_updated': 1691150349,
'dimensions': {
'guest_nice': {
'name': 'guest_nice',
'value': 0.2575124
},
'guest': {
'name': 'guest',
'value': 0.2575124
},
'steal': {
'name': 'steal',
'value': 0.2575124
},
'softirq': {
'name': 'softirq',
'value': 0.2375124
},
'irq': {
'name': 'irq',
'value': 0.2075124
},
'user': {
'name': 'user',
'value': 0.3275124
},
'system': {
'name': 'system',
'value': 0.3275124
},
'nice': {
'name': 'nice',
'value': 26.75124
},
'iowait': {
'name': 'iowait',
'value': 2.75124
},
'idle': {
'name': 'idle',
'value': 49.0049751
}
}
},
'cpu.cpu1': {
'name': 'cpu.cpu1',
'family': 'utilization',
'context': 'cpu.cpu',
'units': 'percentage',
'last_updated': 1691150349,
'dimensions': {
'guest_nice': {
'name': 'guest_nice',
'value': 0.2575124
},
'guest': {
'name': 'guest',
'value': 0.2575124
},
'steal': {
'name': 'steal',
'value': 0.2575124
},
'softirq': {
'name': 'softirq',
'value': 0.2375124
},
'irq': {
'name': 'irq',
'value': 0.2075124
},
'user': {
'name': 'user',
'value': 0.3275124
},
'system': {
'name': 'system',
'value': 0.3275124
},
'nice': {
'name': 'nice',
'value': 26.75124
},
'iowait': {
'name': 'iowait',
'value': 2.75124
},
'idle': {
'name': 'idle',
'value': 49.0049751
}
}
},
'system.ram': {
'name': 'system.ram',
'family': 'ram',
'context': 'system.ram',
'units': 'MiB',
'last_updated': 1691150349,
'dimensions': {
'free': {
'name': 'free',
'value': 301.0585938
},
'used': {
'name': 'used',
'value': 1414.1318359
},
'cached': {
'name': 'cached',
'value': 250.1103516
},
'buffers': {
'name': 'buffers',
'value': 2.0820312
}
}
},
'mem.available': {
'name': 'mem.available',
'family': 'system',
'context': 'mem.available',
'units': 'MiB',
'last_updated': 1691150349,
'dimensions': {
'MemAvailable': {
'name': 'avail',
'value': 428.5869141
}
}
},
'mem.committed': {
'name': 'mem.committed',
'family': 'system',
'context': 'mem.committed',
'units': 'MiB',
'last_updated': 1691150349,
'dimensions': {
'Committed_AS': {
'name': 'Committed_AS',
'value': 1887.0546875
}
}
},
'mem.kernel': {
'name': 'mem.kernel',
'family': 'kernel',
'context': 'mem.kernel',
'units': 'MiB',
'last_updated': 1691150349,
'dimensions': {
'Slab': {
'name': 'Slab',
'value': 150.78125
},
'KernelStack': {
'name': 'KernelStack',
'value': 5.25
},
'PageTables': {
'name': 'PageTables',
'value': 6.53125
},
'VmallocUsed': {
'name': 'VmallocUsed',
'value': 99.6875
},
'Percpu': {
'name': 'Percpu',
'value': 0.8984375
}
}
},
'net.enp1s0': {
'name': 'net.enp1s0',
'family': 'enp1s0',
'context': 'net.net',
'units': 'kilobits/s',
'last_updated': 1691150349,
'dimensions': {
'received': {
'name': 'received',
'value': 4.0394645
},
'sent': {
'name': 'sent',
'value': -5.8688266
}
}
},
'net_speed.enp1s0': {
'name': 'net_speed.enp1s0',
'family': 'enp1s0',
'context': 'net.speed',
'units': 'kilobits/s',
'last_updated': 1691150349,
'dimensions': {
'speed': {
'name': 'speed',
'value': 0
}
}
},
'net_operstate.enp1s0': {
'name': 'net_operstate.enp1s0',
'family': 'enp1s0',
'context': 'net.operstate',
'units': 'state',
'last_updated': 1691150349,
'dimensions': {
'up': {
'name': 'up',
'value': 1
},
'down': {
'name': 'down',
'value': 0
},
'notpresent': {
'name': 'notpresent',
'value': 0
},
'lowerlayerdown': {
'name': 'lowerlayerdown',
'value': 0
},
'testing': {
'name': 'testing',
'value': 0
},
'dormant': {
'name': 'dormant',
'value': 0
},
'unknown': {
'name': 'unknown',
'value': 0
}
}
},
'truenas_disk_stats.io.{devicename}sda': {
'name': 'disk.sda',
'family': 'vda',
'context': 'disk.io',
'units': 'KiB/s',
'last_updated': 1691150349,
'dimensions': {
'reads': {
'name': 'reads',
'value': 10
},
'writes': {
'name': 'writes',
'value': 20
}
}
},
'truenas_disk_stats.ops.{devicename}sda': {
'name': 'disk_ops.sda',
'family': 'vda',
'context': 'disk.ops',
'units': 'operations/s',
'last_updated': 1691150349,
'dimensions': {
'reads': {
'name': 'reads',
'value': 2
},
'writes': {
'name': 'writes',
'value': 3
}
}
},
'truenas_disk_stats.busy.{devicename}sda': {
'name': 'disk_busy.sda',
'family': 'vda',
'context': 'disk.busy',
'units': 'milliseconds',
'last_updated': 1691150349,
'dimensions': {
'busy': {
'name': 'busy',
'value': 0
}
}
},
'truenas_disk_stats.io.{devicename}sdb': {
'name': 'disk.sdb',
'family': 'vdb',
'context': 'disk.io',
'units': 'KiB/s',
'last_updated': 1691150349,
'dimensions': {
'reads': {
'name': 'reads',
'value': 3
},
'writes': {
'name': 'writes',
'value': 3
}
}
},
'truenas_disk_stats.ops.{devicename}sdb': {
'name': 'disk_ops.sdb',
'family': 'vdb',
'context': 'disk.ops',
'units': 'operations/s',
'last_updated': 1691150349,
'dimensions': {
'reads': {
'name': 'reads',
'value': 1
},
'writes': {
'name': 'writes',
'value': 1
}
}
},
'truenas_disk_stats.busy.{devicename}sdb': {
'name': 'disk_busy.sdb',
'family': 'vdb',
'context': 'disk.busy',
'units': 'milliseconds',
'last_updated': 1691150349,
'dimensions': {
'busy': {
'name': 'busy',
'value': 0
}
}
},
'truenas_disk_stats.io.{devicename}sdc': {
'name': 'disk.sdc',
'family': 'vdc',
'context': 'disk.io',
'units': 'KiB/s',
'last_updated': 1691150349,
'dimensions': {
'reads': {
'name': 'reads',
'value': 3
},
'writes': {
'name': 'writes',
'value': 4
}
}
},
'truenas_disk_stats.ops.{devicename}sdc': {
'name': 'disk_ops.sdc',
'family': 'vdc',
'context': 'disk.ops',
'units': 'operations/s',
'last_updated': 1691150349,
'dimensions': {
'reads': {
'name': 'reads',
'value': 6
},
'writes': {
'name': 'writes',
'value': 6
}
}
},
'truenas_disk_stats.busy.{devicename}sdc': {
'name': 'disk_busy.sdc',
'family': 'vdc',
'context': 'disk.busy',
'units': 'milliseconds',
'last_updated': 1691150349,
'dimensions': {
'busy': {
'name': 'busy',
'value': 0
}
}
},
'truenas_disk_stats.io.{devicename}sdd': {
'name': 'disk.sdd',
'family': 'vdd',
'context': 'disk.io',
'units': 'KiB/s',
'last_updated': 1691150349,
'dimensions': {
'reads': {
'name': 'reads',
'value': 2
},
'writes': {
'name': 'writes',
'value': 3
}
}
},
'truenas_disk_stats.ops.{devicename}sdd': {
'name': 'disk_ops.sdd',
'family': 'vdd',
'context': 'disk.ops',
'units': 'operations/s',
'last_updated': 1691150349,
'dimensions': {
'reads': {
'name': 'reads',
'value': 1
},
'writes': {
'name': 'writes',
'value': 1
}
}
},
'truenas_disk_stats.busy.{devicename}sdd': {
'name': 'disk_busy.sdd',
'family': 'vdd',
'context': 'disk.busy',
'units': 'milliseconds',
'last_updated': 1691150349,
'dimensions': {
'busy': {
'name': 'busy',
'value': 0
}
}
},
'zfs.arc_size': {
'name': 'zfs.arc_size',
'family': 'size',
'context': 'zfs.arc_size',
'units': 'MiB',
'last_updated': 1691150349,
'dimensions': {
'size': {
'name': 'arcsz',
'value': 210.9588394
},
'target': {
'name': 'target',
'value': 256.2307129
},
'min': {
'name': 'min (hard limit)',
'value': 61.4807129
},
'max': {
'name': 'max (high water)',
'value': 983.6914062
}
}
},
'zfs.hits': {
'name': 'zfs.hits',
'family': 'efficiency',
'context': 'zfs.hits',
'units': 'percentage',
'last_updated': 1691150349,
'dimensions': {
'hits': {
'name': 'hits',
'value': 3
},
'misses': {
'name': 'misses',
'value': 4
}
}
}
}
MEM_INFO = '''Active: 67772 kB
Inactive: 1379892 kB
Mapped: 54768 kB
'''
def test_arc_stats():
arc_stats = get_arc_stats(NETDATA_ALL_METRICS)
assert arc_stats['arc_max_size'] == normalize_value(
safely_retrieve_dimension(NETDATA_ALL_METRICS, 'zfs.arc_size', 'max', 0), multiplier=1024 * 1024,
)
assert arc_stats['arc_size'] == normalize_value(
safely_retrieve_dimension(NETDATA_ALL_METRICS, 'zfs.arc_size', 'size', 0), multiplier=1024 * 1024,
)
total = safely_retrieve_dimension(NETDATA_ALL_METRICS, 'zfs.hits', 'hits', 0) + safely_retrieve_dimension(
NETDATA_ALL_METRICS, 'zfs.hits', 'misses', 0)
assert arc_stats['cache_hit_ratio'] == safely_retrieve_dimension(
NETDATA_ALL_METRICS, 'zfs.hits', 'hits', 0
) / total
def test_cpu_stats():
cpu_stats = get_cpu_stats(NETDATA_ALL_METRICS, 2)
cpu_stat = {'system.cpu': cpu_stats['average'], 'cpu.cpu0': cpu_stats['0'], 'cpu.cpu1': cpu_stats['1']}
for chart_name, metrics in cpu_stat.items():
total_sum = sum(metrics.values()) - metrics['usage']
assert metrics['user'] == safely_retrieve_dimension(NETDATA_ALL_METRICS, chart_name, 'user', 0)
assert metrics['nice'] == safely_retrieve_dimension(NETDATA_ALL_METRICS, chart_name, 'nice', 0)
assert metrics['system'] == safely_retrieve_dimension(NETDATA_ALL_METRICS, chart_name, 'system', 0)
assert metrics['idle'] == safely_retrieve_dimension(NETDATA_ALL_METRICS, chart_name, 'idle', 0)
assert metrics['iowait'] == safely_retrieve_dimension(NETDATA_ALL_METRICS, chart_name, 'iowait', 0)
assert metrics['irq'] == safely_retrieve_dimension(NETDATA_ALL_METRICS, chart_name, 'irq', 0)
assert metrics['softirq'] == safely_retrieve_dimension(NETDATA_ALL_METRICS, chart_name, 'softirq', 0)
assert metrics['steal'] == safely_retrieve_dimension(NETDATA_ALL_METRICS, chart_name, 'steal', 0)
assert metrics['guest'] == safely_retrieve_dimension(NETDATA_ALL_METRICS, chart_name, 'guest', 0)
assert metrics['guest_nice'] == safely_retrieve_dimension(NETDATA_ALL_METRICS, chart_name, 'guest_nice', 0)
assert metrics['usage'] == ((total_sum - metrics['idle'] - metrics['iowait']) / total_sum) * 100
def test_disk_stats():
disks = ['sda', 'sdb', 'sdc', 'sdd']
disk_mapping = {
'sda': '{devicename}sda', 'sdb': '{devicename}sdb', 'sdc': '{devicename}sdc', 'sdd': '{devicename}sdd'
}
disk_stats = get_disk_stats(NETDATA_ALL_METRICS, disks, disk_mapping)
read_ops = read_bytes = write_ops = write_bytes = busy = 0
for disk in disks:
mapped_key = disk_mapping.get(disk)
read_ops += safely_retrieve_dimension(
NETDATA_ALL_METRICS, f'truenas_disk_stats.ops.{mapped_key}', f'{mapped_key}.read_ops', 0
)
read_bytes += normalize_value(
safely_retrieve_dimension(
NETDATA_ALL_METRICS, f'truenas_disk_stats.io.{mapped_key}', f'{mapped_key}.reads', 0
), multiplier=1024,
)
write_ops += normalize_value(safely_retrieve_dimension(
NETDATA_ALL_METRICS, f'truenas_disk_stats.ops.{mapped_key}', f'{mapped_key}.write_ops', 0
))
write_bytes += normalize_value(
safely_retrieve_dimension(
NETDATA_ALL_METRICS, f'truenas_disk_stats.io.{mapped_key}', f'{mapped_key}.writes', 0
), multiplier=1024,
)
busy += safely_retrieve_dimension(
NETDATA_ALL_METRICS, f'truenas_disk_stats.busy.{mapped_key}', f'{mapped_key}.busy', 0
)
assert disk_stats['read_ops'] == read_ops
assert disk_stats['read_bytes'] == read_bytes
assert disk_stats['write_ops'] == write_ops
assert disk_stats['write_bytes'] == write_bytes
assert disk_stats['busy'] == busy
def test_network_stats():
interfaces = ['enp1s0']
for interface_name, metrics in get_interface_stats(NETDATA_ALL_METRICS, interfaces).items():
send_bytes_rate = normalize_value(
safely_retrieve_dimension(NETDATA_ALL_METRICS, f'net.{interface_name}', 'sent', 0),
multiplier=1000, divisor=8
)
received_bytes_rate = normalize_value(
safely_retrieve_dimension(NETDATA_ALL_METRICS, f'net.{interface_name}', 'received', 0),
multiplier=1000, divisor=8
)
assert metrics['received_bytes_rate'] == received_bytes_rate
assert metrics['sent_bytes_rate'] == send_bytes_rate
assert metrics['speed'] == normalize_value(safely_retrieve_dimension(
NETDATA_ALL_METRICS, f'net_speed.{interface_name}', 'speed', 0), divisor=1000
)
assert metrics['link_state'] == 'LINK_STATE_UP'
def test_memory_stats():
with patch('builtins.open', mock_open(read_data=MEM_INFO)):
memory_stats = get_memory_info(NETDATA_ALL_METRICS)
assert memory_stats['classes']['page_tables'] == normalize_value(
safely_retrieve_dimension(NETDATA_ALL_METRICS, 'mem.kernel', 'PageTables', 0), multiplier=1024 * 1024
)
assert memory_stats['classes']['slab_cache'] == normalize_value(
safely_retrieve_dimension(NETDATA_ALL_METRICS, 'mem.kernel', 'Slab', 0), multiplier=1024 * 1024
)
assert memory_stats['classes']['cache'] == normalize_value(
safely_retrieve_dimension(NETDATA_ALL_METRICS, 'system.ram', 'cached', 0), multiplier=1024 * 1024
)
assert memory_stats['classes']['buffers'] == normalize_value(
safely_retrieve_dimension(NETDATA_ALL_METRICS, 'system.ram', 'buffers', 0), multiplier=1024 * 1024
)
assert memory_stats['classes']['unused'] == normalize_value(
safely_retrieve_dimension(NETDATA_ALL_METRICS, 'system.ram', 'free', 0), multiplier=1024 * 1024
)
assert memory_stats['classes']['arc'] == normalize_value(
safely_retrieve_dimension(NETDATA_ALL_METRICS, 'zfs.arc_size', 'size', 0), multiplier=1024 * 1024
)
assert memory_stats['classes']['apps'] == normalize_value(
safely_retrieve_dimension(NETDATA_ALL_METRICS, 'system.ram', 'used', 0), multiplier=1024 * 1024
)
assert memory_stats['extra']['inactive'] == 1413009408 * 1024
assert memory_stats['extra']['committed'] == normalize_value(
safely_retrieve_dimension(NETDATA_ALL_METRICS, 'mem.committed', 'Committed_AS', 0), multiplier=1024 * 1024,
)
assert memory_stats['extra']['active'] == 69398528 * 1024
assert memory_stats['extra']['vmalloc_used'] == normalize_value(
safely_retrieve_dimension(NETDATA_ALL_METRICS, 'mem.kernel', 'VmallocUsed', 0), multiplier=1024 * 1024
)
assert memory_stats['extra']['mapped'] == 56082432 * 1024
| 20,842 | Python | .py | 648 | 20.776235 | 119 | 0.453126 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,413 | test_netdata_graphs.py | truenas_middleware/src/middlewared/middlewared/pytest/unit/plugins/reporting/test_netdata_graphs.py | import pytest
from unittest.mock import AsyncMock, patch
from middlewared.plugins.reporting.netdata.graphs import (
CPUPlugin, CPUTempPlugin, DiskTempPlugin, DISKPlugin, InterfacePlugin, LoadPlugin, MemoryPlugin,
UptimePlugin, ARCActualRatePlugin, ARCRatePlugin, ARCSizePlugin
)
from middlewared.pytest.unit.middleware import Middleware
@pytest.mark.parametrize('obj, identifier, legend', [
(CPUPlugin, 'cpu', ['time']),
(CPUTempPlugin, 'cputemp', ['time']),
(DISKPlugin, 'sda', ['time']),
(InterfacePlugin, 'enp1s0', ['time', 'received', 'sent']),
(LoadPlugin, 'load', ['time']),
(MemoryPlugin, 'memory', ['time']),
(UptimePlugin, 'uptime', ['time']),
(ARCActualRatePlugin, 'arcactualrate', ['time']),
(ARCRatePlugin, 'arcrate', ['time']),
(ARCSizePlugin, 'arcsize', ['time']),
(DiskTempPlugin, 'sda', ['time', 'temperature_value']),
])
@pytest.mark.asyncio
async def test_netdata_client_malformed_response_error(obj, identifier, legend):
plugin_object = obj(Middleware())
api_response = {'error': 'test error', 'data': [], 'identifier': identifier, 'uri': 'http://test_uri'}
with patch(
'middlewared.plugins.reporting.netdata.connector.ClientMixin.fetch', AsyncMock(return_value=api_response)
):
if obj in (DISKPlugin, DiskTempPlugin):
plugin_object.disk_mapping = {identifier: ''}
data = await plugin_object.export_multiple_identifiers({'after': 0, 'before': 0}, [identifier])
else:
data = await plugin_object.export_multiple_identifiers({'after': 0, 'before': 0}, [identifier])
assert set(data[0]['legend']) == set(legend)
assert data[0]['identifier'] == identifier
assert data[0]['data'] == []
| 1,757 | Python | .py | 35 | 44.685714 | 113 | 0.674242 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,414 | test_netdata_approximation.py | truenas_middleware/src/middlewared/middlewared/pytest/unit/plugins/reporting/test_netdata_approximation.py | import pytest
from middlewared.plugins.reporting.utils import get_metrics_approximation, calculate_disk_space_for_netdata
@pytest.mark.parametrize('disk_count,core_count,interface_count,pool_count,services_count,vms_count,expected_output', [
(4, 2, 1, 2, 10, 2, {1: 796, 60: 4}),
(1600, 32, 4, 4, 10, 1, {1: 9283, 60: 1600}),
(10, 16, 2, 2, 12, 3, {1: 1131, 60: 10}),
])
def test_netdata_metrics_count_approximation(
disk_count, core_count, interface_count, pool_count, services_count, vms_count, expected_output
):
assert get_metrics_approximation(
disk_count, core_count, interface_count, pool_count, vms_count, services_count
) == expected_output
@pytest.mark.parametrize(
'disk_count,core_count,interface_count,pool_count,services_count,vms_count,days,'
'bytes_per_point,tier_interval,expected_output', [
(4, 2, 1, 2, 10, 2, 7, 1, 1, 459),
(4, 2, 1, 2, 10, 1, 7, 4, 60, 28),
(1600, 32, 4, 12, 2, 4, 4, 1, 1, 3115),
(1600, 32, 4, 10, 1, 4, 4, 4, 900, 13),
(10, 16, 2, 2, 12, 1, 3, 1, 1, 255),
(10, 16, 2, 2, 10, 3, 3, 4, 60, 18),
(1600, 32, 4, 4, 12, 3, 18, 1, 1, 13967),
(1600, 32, 4, 4, 12, 1, 18, 4, 900, 61),
],
)
def test_netdata_disk_space_approximation(
disk_count, core_count, interface_count, pool_count, services_count,
vms_count, days, bytes_per_point, tier_interval, expected_output
):
assert calculate_disk_space_for_netdata(get_metrics_approximation(
disk_count, core_count, interface_count, pool_count, vms_count, services_count
), days, bytes_per_point, tier_interval) == expected_output
@pytest.mark.parametrize(
'disk_count,core_count,interface_count,pool_count,services_count,vms_count,days,bytes_per_point,tier_interval', [
(4, 2, 1, 2, 10, 2, 7, 1, 1),
(4, 2, 1, 2, 12, 2, 7, 4, 60),
(1600, 32, 4, 4, 10, 3, 4, 1, 1),
(1600, 32, 4, 4, 12, 3, 4, 4, 900),
(10, 16, 2, 2, 10, 4, 3, 1, 1),
(10, 16, 2, 2, 12, 4, 3, 4, 60),
(1600, 32, 4, 4, 10, 5, 18, 1, 1),
(1600, 32, 4, 4, 12, 5, 18, 4, 900),
],
)
def test_netdata_days_approximation(
disk_count, core_count, interface_count, pool_count, services_count, vms_count, days, bytes_per_point, tier_interval
):
metric_intervals = get_metrics_approximation(
disk_count, core_count, interface_count, pool_count, vms_count, services_count
)
disk_size = calculate_disk_space_for_netdata(metric_intervals, days, bytes_per_point, tier_interval)
total_metrics = metric_intervals[1] + (metric_intervals[60] / 60)
assert round((disk_size * 1024 * 1024) / (bytes_per_point * total_metrics * (86400 / tier_interval))) == days
| 2,724 | Python | .py | 54 | 44.944444 | 120 | 0.630116 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,415 | test_docker_address_pools_validtaion.py | truenas_middleware/src/middlewared/middlewared/pytest/unit/plugins/docker/test_docker_address_pools_validtaion.py | import pytest
from middlewared.plugins.docker.validation_utils import validate_address_pools
from middlewared.service_exception import ValidationErrors
IP_IN_USE = [
{
'type': 'INET',
'address': '172.20.0.33',
'netmask': 16,
'broadcast': '172.20.0.63'
}
]
@pytest.mark.parametrize('user_specified_networks,error_msg', (
(
[],
'At least one address pool must be specified'),
(
[{'base': '172.20.2.0/24', 'size': 27}],
'Base network 172.20.2.0/24 overlaps with an existing system network'),
(
[{'base': '172.21.2.0/16', 'size': 10}],
'Base network 172.21.2.0/16 cannot be smaller than the specified subnet size 10'),
(
[{'base': '172.21.2.0/16', 'size': 27}, {'base': '172.21.2.0/16', 'size': 27}],
'Base network 172.21.2.0/16 is a duplicate of another specified network'
),
(
[{'base': '172.21.0.0/16', 'size': 27}, {'base': '172.22.0.0/16', 'size': 27}],
''
),
))
@pytest.mark.asyncio
async def test_address_pools_validation(user_specified_networks, error_msg):
if error_msg:
with pytest.raises(ValidationErrors) as ve:
validate_address_pools(IP_IN_USE, user_specified_networks)
assert ve.value.errors[0].errmsg == error_msg
else:
assert validate_address_pools(IP_IN_USE, user_specified_networks) is None
| 1,383 | Python | .py | 38 | 30.710526 | 90 | 0.625093 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,416 | test_convert_aliases_to_datastore.py | truenas_middleware/src/middlewared/middlewared/pytest/unit/plugins/network/test_convert_aliases_to_datastore.py | import pytest
from unittest.mock import Mock
from middlewared.plugins.network import InterfaceService
OBJ = InterfaceService(Mock())
non_ha_with_1_v4ip = (
{
'aliases': [
{
'address': '1.1.1.1',
'type': 'INET',
'netmask': 24,
},
]
},
(
{
'address': '1.1.1.1',
'address_b': '',
'netmask': 24,
'vip': '',
'version': 4,
},
[]
)
)
non_ha_with_1_v6ip = (
{
'aliases': [
{
'address': 'aaaa:bbbb:cccc:dddd::1',
'type': 'INET6',
'netmask': 64,
},
]
},
(
{
'address': 'aaaa:bbbb:cccc:dddd::1',
'address_b': '',
'netmask': 64,
'version': 6,
'vip': '',
},
[]
)
)
non_ha_with_2_v4ips = (
{
'aliases': [
{
'address': '1.1.1.1',
'type': 'INET',
'netmask': 24,
},
{
'address': '2.2.2.2',
'type': 'INET',
'netmask': 24,
},
]
},
(
{
'address': '1.1.1.1',
'address_b': '',
'netmask': 24,
'version': 4,
'vip': '',
},
[{
'address': '2.2.2.2',
'address_b': '',
'netmask': 24,
'version': 4,
'vip': '',
}]
)
)
non_ha_with_2_v6ips = (
{
'aliases': [
{
'address': 'aaaa:bbbb:cccc:dddd::1',
'type': 'INET6',
'netmask': 64,
},
{
'address': 'aaaa:bbbb:cccc:eeee::1',
'type': 'INET6',
'netmask': 64,
},
]
},
(
{
'address': 'aaaa:bbbb:cccc:dddd::1',
'address_b': '',
'netmask': 64,
'version': 6,
'vip': '',
},
[{
'address': 'aaaa:bbbb:cccc:eeee::1',
'address_b': '',
'netmask': 64,
'version': 6,
'vip': '',
}]
)
)
non_ha_with_2_mixed_ips = (
{
'aliases': [
{
'address': '1.1.1.1',
'type': 'INET',
'netmask': 24,
},
{
'address': 'aaaa:bbbb:cccc:dddd::1',
'type': 'INET6',
'netmask': 64,
},
]
},
(
{
'address': '1.1.1.1',
'address_b': '',
'netmask': 24,
'version': 4,
'vip': '',
},
[{
'address': 'aaaa:bbbb:cccc:dddd::1',
'address_b': '',
'netmask': 64,
'version': 6,
'vip': '',
}]
)
)
ha_with_1_v4ip = (
{
'aliases': [{
'address': '1.1.1.1',
'type': 'INET',
'netmask': 24,
}],
'failover_aliases': [{
'address': '1.1.1.2',
'type': 'INET',
}],
'failover_virtual_aliases': [{
'address': '1.1.1.3',
'type': 'INET',
}],
},
(
{
'address': '1.1.1.1',
'address_b': '1.1.1.2',
'netmask': 24,
'version': 4,
'vip': '1.1.1.3',
},
[]
)
)
ha_with_1_v6ip = (
{
'aliases': [{
'address': 'aaaa:bbbb:cccc:dddd::1',
'type': 'INET6',
'netmask': 64,
}],
'failover_aliases': [{
'address': 'aaaa:bbbb:cccc:dddd::2',
'type': 'INET6',
}],
'failover_virtual_aliases': [{
'address': 'aaaa:bbbb:cccc:dddd::3',
'type': 'INET6',
}],
},
(
{
'address': 'aaaa:bbbb:cccc:dddd::1',
'address_b': 'aaaa:bbbb:cccc:dddd::2',
'netmask': 64,
'version': 6,
'vip': 'aaaa:bbbb:cccc:dddd::3',
},
[]
)
)
ha_with_2_v4ips = (
{
'aliases': [
{
'address': '1.1.1.1',
'type': 'INET',
'netmask': 24,
},
{
'address': '2.2.2.1',
'type': 'INET',
'netmask': 24,
},
],
'failover_aliases': [
{
'address': '1.1.1.2',
'type': 'INET',
},
{
'address': '2.2.2.2',
'type': 'INET',
},
],
'failover_virtual_aliases': [
{
'address': '1.1.1.3',
'type': 'INET',
},
{
'address': '2.2.2.3',
'type': 'INET'
},
],
},
(
{
'address': '1.1.1.1',
'address_b': '1.1.1.2',
'netmask': 24,
'version': 4,
'vip': '1.1.1.3',
},
[{
'address': '2.2.2.1',
'address_b': '2.2.2.2',
'netmask': 24,
'version': 4,
'vip': '2.2.2.3'
}]
)
)
ha_with_2_v6ips = (
{
'aliases': [
{
'address': 'aaaa:bbbb:cccc:dddd::1',
'type': 'INET6',
'netmask': 64,
},
{
'address': 'aaaa:bbbb:3333:eeee::1',
'type': 'INET6',
'netmask': 64,
},
],
'failover_aliases': [
{
'address': 'aaaa:bbbb:cccc:dddd::2',
'type': 'INET6',
},
{
'address': 'aaaa:bbbb:3333:eeee::2',
'type': 'INET6',
},
],
'failover_virtual_aliases': [
{
'address': 'aaaa:bbbb:cccc:dddd::3',
'type': 'INET6',
},
{
'address': 'aaaa:bbbb:3333:eeee::3',
'type': 'INET6',
},
],
},
(
{
'address': 'aaaa:bbbb:cccc:dddd::1',
'address_b': 'aaaa:bbbb:cccc:dddd::2',
'netmask': 64,
'version': 6,
'vip': 'aaaa:bbbb:cccc:dddd::3',
},
[{
'address': 'aaaa:bbbb:3333:eeee::1',
'address_b': 'aaaa:bbbb:3333:eeee::2',
'netmask': 64,
'version': 6,
'vip': 'aaaa:bbbb:3333:eeee::3'
}]
)
)
ha_with_2_mixed_ips = (
{
'aliases': [
{
'address': 'aaaa:bbbb:cccc:dddd::1',
'type': 'INET6',
'netmask': 64,
},
{
'address': '1.1.1.1',
'type': 'INET',
'netmask': 24,
},
],
'failover_aliases': [
{
'address': 'aaaa:bbbb:cccc:dddd::2',
'type': 'INET6',
},
{
'address': '1.1.1.2',
'type': 'INET',
},
],
'failover_virtual_aliases': [
{
'address': 'aaaa:bbbb:cccc:dddd::3',
'type': 'INET6',
},
{
'address': '1.1.1.3',
'type': 'INET',
},
],
},
(
{
'address': 'aaaa:bbbb:cccc:dddd::1',
'address_b': 'aaaa:bbbb:cccc:dddd::2',
'netmask': 64,
'version': 6,
'vip': 'aaaa:bbbb:cccc:dddd::3',
},
[{
'address': '1.1.1.1',
'address_b': '1.1.1.2',
'netmask': 24,
'version': 4,
'vip': '1.1.1.3'
}]
)
)
@pytest.mark.parametrize('data, result', [
non_ha_with_1_v4ip,
non_ha_with_1_v6ip,
non_ha_with_2_v4ips,
non_ha_with_2_v6ips,
non_ha_with_2_mixed_ips,
ha_with_1_v4ip,
ha_with_1_v6ip,
ha_with_2_v4ips,
ha_with_2_v6ips,
ha_with_2_mixed_ips
])
def test_convert_aliases_to_datastore(data, result):
iface, aliases = OBJ.convert_aliases_to_datastore(data)
assert iface == result[0]
assert aliases == result[1]
| 8,522 | Python | .py | 368 | 12.255435 | 59 | 0.327845 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,417 | test_get_latest_version.py | truenas_middleware/src/middlewared/middlewared/pytest/unit/plugins/apps/test_get_latest_version.py | import pytest
from middlewared.plugins.apps.version_utils import get_latest_version_from_app_versions
from middlewared.service import CallError
@pytest.mark.parametrize('versions, should_work, expected', [
(
{
'1.1.9': {
'healthy': True,
'supported': True,
'healthy_error': None,
'location': '/mnt/.ix-apps/truenas_catalog/trains/community/actual-budget/1.1.9',
'last_update': '2024-10-02 18:57:15',
'required_features': [
'definitions/certificate',
'definitions/port',
'normalize/acl',
'normalize/ix_volume'
],
}
},
True,
'1.1.9'
),
(
{
'1.1.9': {
'healthy': None,
'supported': True,
'healthy_error': None,
'location': '/mnt/.ix-apps/truenas_catalog/trains/community/actual-budget/1.1.9',
'last_update': '2024-10-02 18:57:15',
'required_features': [
'definitions/certificate',
'definitions/port',
'normalize/acl',
'normalize/ix_volume'
],
}
},
False,
None
),
(
{},
False,
None
),
(
{
'1.1.9': {
'healthy': None,
'supported': True,
'healthy_error': None,
'location': '/mnt/.ix-apps/truenas_catalog/trains/community/actual-budget/1.1.9',
'last_update': '2024-10-02 18:57:15',
'required_features': [
'definitions/certificate',
'definitions/port',
'normalize/acl',
'normalize/ix_volume'
],
},
'2.0.1': {
'healthy': True,
'supported': True,
'healthy_error': None,
'location': '/mnt/.ix-apps/truenas_catalog/trains/community/actual-budget/2.0.1',
'last_update': '2024-10-02 18:57:15',
'required_features': [
'definitions/certificate',
'definitions/port',
'normalize/acl',
'normalize/ix_volume'
],
}
},
True,
'2.0.1'
),
])
def test_get_latest_version(versions, should_work, expected):
if should_work:
version = get_latest_version_from_app_versions(versions)
assert version == expected
else:
with pytest.raises(CallError):
get_latest_version_from_app_versions(versions)
| 2,826 | Python | .py | 87 | 19.264368 | 97 | 0.452851 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,418 | test_normalize_CA.py | truenas_middleware/src/middlewared/middlewared/pytest/unit/plugins/apps/test_normalize_CA.py | import textwrap
import pytest
from middlewared.plugins.apps.schema_normalization import AppSchemaService
from middlewared.pytest.unit.middleware import Middleware
from middlewared.schema import Int
@pytest.mark.parametrize('cert, value, should_work', [
(
textwrap.dedent(
'''
-----BEGIN CERTIFICATE-----
MIIFmzCCA4OgAwIBAgICMoMwDQYJKoZIhvcNAQELBQAwcjEMMAoGA1UEAwwDZGV2
MQswCQYDVQQGEwJVUzELMAkGA1UECAwCVE4xEjAQBgNVBAcMCUtub3h2aWxsZTEL
MAkGA1UECgwCaVgxDDAKBgNVBAsMA2RldjEZMBcGCSqGSIb3DQEJARYKZGV2QGl4
LmNvbTAeFw0yMjAxMjQxOTI0MTRaFw0yMzAyMjUxOTI0MTRaMHIxDDAKBgNVBAMM
A2RldjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlROMRIwEAYDVQQHDAlLbm94dmls
bGUxCzAJBgNVBAoMAmlYMQwwCgYDVQQLDANkZXYxGTAXBgkqhkiG9w0BCQEWCmRl
dkBpeC5jb20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDuy5kKf7eT
LOuxm1pn51kFLgJHD6k05pROjMOXEZel7CsmrDKEehSSdwDB/WUim3idOsImLrc+
ApXsnKwVY93f7yn1rfF4lgKsa3sb6oqAcPEobgTUqSmJ/OQVilUqOtj/dmFaEWIS
21eKNzaByNdpyOcoRF/+uDylEsE1Gj0GjkBneVRxyTZFV7LdVyDk38hljesnd8FX
gnD0DCdI3jBvqSYvd+GvQ2nQ2624HAmEQwfllqKi9PRDngeZIeiTQSWN+rybJbDY
yonRS0FPxJydt/sDlzi43qzHnrTqUbL+2RjYIqcOqeivNtDZ2joh+xqfRdKzACWu
QWrhGCL5+9bnqA6PEPA7GQ2jp00gDkjB7+HlQLI8ZCZcST6mkbfs/EaW00WYIcw5
lb+5oJ8oJqWebnQB21iwvPjvAv353iA1ApTJxBdo13x7oXBwWsrpxWk6SdL2Z5zU
NXrC9ZyaoeQ5uZ/oBXbCxJfhSkISyI5D8yeYLjmMxn+AvRBQpkRmVvcy3ls2SHGX
4XEJ4Q0wj3a0rPqmDZUwpWErbmf+N6D7J+uK8n3pcGlvkFIUaP60UQGp4gwnZA2O
dZdhVQ4whQHyjTmL7kRKl+gR/vTp+iPvKMfTO1HBQp97iK8IPM7Q2Gpe6U4n/Ll2
TDaZ9DroM83Vnc6cX69Th555SA9+gP6HWQIDAQABozswOTAYBgNVHREEETAPggdk
b21haW4xhwQICAgIMB0GA1UdDgQWBBSz0br/9U9mwYZfuRO1JmKTEorq1DANBgkq
hkiG9w0BAQsFAAOCAgEAK7nBNA+qjgvkmcSLQC+yXPOwb3o55D+N0J2QLxJFT4NV
b0GKf0dkz92Ew1pkKYzsH6lLlKRE23cye6EZLIwkkhhF0sTwYeu8HNy7VmkSDsp0
aKbqxgBzIJx+ztQGNgZ1fQMRjHCRLf8TaSAxnVXaXXUeU6fUBq2gHbYq6BfZkGmU
6f8DzL7uKHzcMEmWfC5KxfSskFFPOyaz/VGViQ0yffwH1NB+txDlU58rmu9w0wLe
cOrOjVUNg8axQen2Uejjj3IRmDC18ZfY7EqI8O1PizCtIcPSm+NnZYg/FvVj0KmM
o2QwGMd5QTU2J5lz988Xlofm/r3GBH32+ETqIcJolBw9bBkwruBvHpcmyLSFcFWK
sdGgi2gK2rGb+oKwzpHSeCtQVwgQth55qRH1DQGaAdpA1uTriOdcR96i65/jcz96
aD2B958hF1B/7I4Md+LFYhxgwREBhyQkU6saf7GR0Q+p4F8/oIkjhdLsyzk4YHyI
PVtK00W8zQMKF6zhHjfaF2uDRO/ycMKCq9NIqQJCZNqwNAo0r4FOmilwud/tzFY8
GQ9FXeQSqWo7hUIXdbej+aJ7DusYeuE/CwQFNUnz1khvIFJ5B7YP+gYCyUW7V2Hr
Mv+cZ473U8hYQ1Ij7pXi7DxsOWqWCDhyK0Yp6MZsw0rNaAIPHnTTxYdMfmIYHT0=
-----END CERTIFICATE-----
''',
),
12,
True
),
(
textwrap.dedent(
'''
-----BEGIN CERTIFICATE-----
MIIFmzCCA4OgAwIBAgICMoMwDQYJKoZIhvcNAQELBQAwcjEMMAoGA1UEAwwDZGV2
MQswCQYDVQQGEwJVUzELMAkGA1UECAwCVE4xEjAQBgNVBAcMCUtub3h2aWxsZTEL
MAkGA1UECgwCaVgxDDAKBgNVBAsMA2RldjEZMBcGCSqGSIb3DQEJARYKZGV2QGl4
LmNvbTAeFw0yMjAxMjQxOTI0MTRaFw0yMzAyMjUxOTI0MTRaMHIxDDAKBgNVBAMM
A2RldjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlROMRIwEAYDVQQHDAlLbm94dmls
bGUxCzAJBgNVBAoMAmlYMQwwCgYDVQQLDANkZXYxGTAXBgkqhkiG9w0BCQEWCmRl
dkBpeC5jb20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDuy5kKf7eT
LOuxm1pn51kFLgJHD6k05pROjMOXEZel7CsmrDKEehSSdwDB/WUim3idOsImLrc+
ApXsnKwVY93f7yn1rfF4lgKsa3sb6oqAcPEobgTUqSmJ/OQVilUqOtj/dmFaEWIS
21eKNzaByNdpyOcoRF/+uDylEsE1Gj0GjkBneVRxyTZFV7LdVyDk38hljesnd8FX
gnD0DCdI3jBvqSYvd+GvQ2nQ2624HAmEQwfllqKi9PRDngeZIeiTQSWN+rybJbDY
yonRS0FPxJydt/sDlzi43qzHnrTqUbL+2RjYIqcOqeivNtDZ2joh+xqfRdKzACWu
QWrhGCL5+9bnqA6PEPA7GQ2jp00gDkjB7+HlQLI8ZCZcST6mkbfs/EaW00WYIcw5
lb+5oJ8oJqWebnQB21iwvPjvAv353iA1ApTJxBdo13x7oXBwWsrpxWk6SdL2Z5zU
NXrC9ZyaoeQ5uZ/oBXbCxJfhSkISyI5D8yeYLjmMxn+AvRBQpkRmVvcy3ls2SHGX
4XEJ4Q0wj3a0rPqmDZUwpWErbmf+N6D7J+uK8n3pcGlvkFIUaP60UQGp4gwnZA2O
dZdhVQ4whQHyjTmL7kRKl+gR/vTp+iPvKMfTO1HBQp97iK8IPM7Q2Gpe6U4n/Ll2
TDaZ9DroM83Vnc6cX69Th555SA9+gP6HWQIDAQABozswOTAYBgNVHREEETAPggdk
b21haW4xhwQICAgIMB0GA1UdDgQWBBSz0br/9U9mwYZfuRO1JmKTEorq1DANBgkq
hkiG9w0BAQsFAAOCAgEAK7nBNA+qjgvkmcSLQC+yXPOwb3o55D+N0J2QLxJFT4NV
b0GKf0dkz92Ew1pkKYzsH6lLlKRE23cye6EZLIwkkhhF0sTwYeu8HNy7VmkSDsp0
aKbqxgBzIJx+ztQGNgZ1fQMRjHCRLf8TaSAxnVXaXXUeU6fUBq2gHbYq6BfZkGmU
6f8DzL7uKHzcMEmWfC5KxfSskFFPOyaz/VGViQ0yffwH1NB+txDlU58rmu9w0wLe
cOrOjVUNg8axQen2Uejjj3IRmDC18ZfY7EqI8O1PizCtIcPSm+NnZYg/FvVj0KmM
o2QwGMd5QTU2J5lz988Xlofm/r3GBH32+ETqIcJolBw9bBkwruBvHpcmyLSFcFWK
sdGgi2gK2rGb+oKwzpHSeCtQVwgQth55qRH1DQGaAdpA1uTriOdcR96i65/jcz96
aD2B958hF1B/7I4Md+LFYhxgwREBhyQkU6saf7GR0Q+p4F8/oIkjhdLsyzk4YHyI
PVtK00W8zQMKF6zhHjfaF2uDRO/ycMKCq9NIqQJCZNqwNAo0r4FOmilwud/tzFY8
GQ9FXeQSqWo7hUIXdbej+aJ7DusYeuE/CwQFNUnz1khvIFJ5B7YP+gYCyUW7V2Hr
Mv+cZ473U8hYQ1Ij7pXi7DxsOWqWCDhyK0Yp6MZsw0rNaAIPHnTTxYdMfmIYHT0=
-----END CERTIFICATE-----
''',
),
None,
False
),
], ids=['valid_ca', 'invalid_ca'])
@pytest.mark.asyncio
async def test_normalize_CA(cert, value, should_work):
middleware = Middleware()
app_schema_obj = AppSchemaService(middleware)
middleware['certificateauthority.get_instance'] = lambda *args: cert
complete_config = {'ix_certificate_authorities': {value: cert}}
result = await app_schema_obj.normalize_certificate_authorities(Int('CA'), value, complete_config, '')
if should_work:
assert result is not None
else:
assert result is None
| 5,783 | Python | .py | 98 | 48.377551 | 106 | 0.780144 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,419 | test_get_schema.py | truenas_middleware/src/middlewared/middlewared/pytest/unit/plugins/apps/test_get_schema.py | import pytest
from middlewared.plugins.apps.schema_utils import get_schema, SCHEMA_MAPPING
@pytest.mark.parametrize('data', [
{
'variable': 'actual_budget',
'label': '',
'group': 'Actual Budget Configuration',
'schema': {
'type': 'dict',
'attrs': [
{
'variable': 'additional_envs',
'label': 'Additional Environment Variables',
'description': 'Configure additional environment variables for Actual Budget.',
'schema': {
'type': 'list',
'default': [],
'items': [
{
'variable': 'env',
'label': 'Environment Variable',
'schema': {
'type': 'dict',
'attrs': [
{
'variable': 'name',
'label': 'Name',
'schema': {
'type': 'string',
'required': True
}
},
]
}
}
]
}
}
]
}
},
])
def test_get_schema_success(data):
result = get_schema(data, False)
assert result is not None
valid_types = tuple(v for v in SCHEMA_MAPPING.values() if isinstance(v, type))
assert isinstance(result[0], valid_types)
@pytest.mark.parametrize('data', [
{
'variable': 'actual_budget',
'label': '',
'group': 'Actual Budget Configuration',
},
{
'variable': 'actual_budget',
'label': '',
'group': 'Actual Budget Configuration',
'schema': {
'type': 'dict',
'attrs': [
{
'variable': 'additional_envs',
'label': 'Additional Environment Variables',
'description': 'Configure additional environment variables for Actual Budget.',
'schema': {
'type': 'custom',
'default': [],
'items': []
}
}
]
}
}
])
def test_get_schema_KeyError(data):
with pytest.raises(KeyError):
get_schema(data, False)
@pytest.mark.parametrize('data, existing', [
(
{
'variable': 'actual_budget',
'label': '',
'group': 'Actual Budget Configuration',
'schema': {
'type': 'dict',
'immutable': True,
'attrs': [
{
'variable': 'additional_envs',
'label': 'Additional Environment Variables',
'description': 'Configure additional environment variables for Actual Budget.',
'schema': {
'type': 'list',
'default': [],
'immutable': True,
'items': [
{
'variable': 'env',
'label': 'Environment Variable',
'schema': {
'type': 'dict',
'attrs': [
{
'variable': 'name',
'label': 'Name',
'schema': {
'type': 'string',
'required': True
}
},
]
}
}
]
}
}
]
}
},
{
'actual_budget': {'env': {'name': 'EXAMPLE_ENV', 'value': 'example_value'}}
}
),
])
def test_get_schema_existing(data, existing):
result = get_schema(data, False, existing)
assert result is not None
valid_types = tuple(v for v in SCHEMA_MAPPING.values() if isinstance(v, type))
assert isinstance(result[0], valid_types)
@pytest.mark.parametrize('data', [
{
'variable': 'actual_budget',
'label': '',
'group': 'Actual Budget Configuration',
'schema': {
'type': 'dict',
'attrs': [
{
'variable': 'additional_envs',
'label': 'Additional Environment Variables',
'description': 'Configure additional environment variables for Actual Budget.',
'schema': {
'type': 'list',
'items': [
{
'variable': 'env',
'label': 'Environment Variable',
'schema': {
'type': 'dict',
'attrs': [
{
'variable': 'name',
'label': 'Name',
'schema': {
'type': 'string',
'enum': [],
'required': True
}
},
{
'variable': 'network',
'label': '',
'group': 'Network Configuration',
'schema': {
'type': 'dict',
'attrs': [
{
'variable': 'web_port',
'label': 'WebUI Port',
'description': 'The port for Actual Budget WebUI',
'schema': {
'type': 'int',
'default': 31012,
'required': True,
'$ref': [
'definitions/port'
],
'min': 1,
'max': 65535
}
}
]
}
}
]
}
}
]
}
}
]
}
},
])
def test_get_schema_port_min_max(data):
result = get_schema(data, False)
assert result is not None
valid_types = tuple(v for v in SCHEMA_MAPPING.values() if isinstance(v, type))
assert isinstance(result[0], valid_types)
@pytest.mark.parametrize('data', [
{
'variable': 'actual_budget',
'label': '',
'group': 'Actual Budget Configuration',
'schema': {
'type': 'dict',
'attrs': [
{
'variable': 'additional_envs',
'label': 'Additional Environment Variables',
'description': 'Configure additional environment variables for Actual Budget.',
'schema': {
'type': 'list',
'default': [],
'items': [
{
'variable': 'env',
'label': 'Environment Variable',
'schema': {
'type': 'dict',
'attrs': [
{
'variable': 'name',
'label': 'Name',
'schema': {
'type': 'string',
'valid_chars': ('char1'),
'required': True
}
}
]
}
}
]
}
}
]
}
},
])
def test_get_schema_valid_chars(data):
result = get_schema(data, False)
assert result is not None
valid_types = tuple(v for v in SCHEMA_MAPPING.values() if isinstance(v, type))
assert isinstance(result[0], valid_types)
@pytest.mark.parametrize('data', [
{
'variable': 'actual_budget',
'label': '',
'group': 'Actual Budget Configuration',
'schema': {
'type': 'dict',
'subquestions': [
{
'variable': 'sub_question_1',
'schema': {
'type': 'dict',
'attrs': []
}
},
{
'variable': 'sub_question_2',
'schema': {
'type': 'dict',
'attrs': []
}
}
],
'attrs': [
{
'variable': 'additional_envs',
'label': 'Additional Environment Variables',
'description': 'Configure additional environment variables for Actual Budget.',
'schema': {
'type': 'list',
'default': [],
'items': [
{
'variable': 'env',
'label': 'Environment Variable',
'schema': {
'type': 'dict',
'attrs': [
{
'variable': 'name',
'label': 'Name',
'schema': {
'type': 'string',
'required': True
}
}
]
}
}
]
}
}
]
}
},
])
def test_get_schema_subquestions(data):
result = get_schema(data, False)
assert result is not None
valid_types = tuple(v for v in SCHEMA_MAPPING.values() if isinstance(v, type))
assert isinstance(result[0], valid_types)
| 12,436 | Python | .py | 306 | 16.356209 | 106 | 0.282578 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,420 | test_normalize_questions.py | truenas_middleware/src/middlewared/middlewared/pytest/unit/plugins/apps/test_normalize_questions.py | import pytest
from middlewared.plugins.apps.schema_normalization import AppSchemaService, REF_MAPPING
from middlewared.pytest.unit.middleware import Middleware
from middlewared.schema import Dict, List
@pytest.mark.parametrize('question_attr, ref, value, update', [
(
Dict(),
'definitions/certificate',
{'attr1': 'some_value'},
False
),
(
Dict(),
'normalize/acl',
{'attr1': 'some_value'},
False
),
(
Dict(),
'normalize/acl',
{'attr1': 'some_value'},
True
),
(
Dict(),
'definitions/certificate',
None,
False
)
])
@pytest.mark.asyncio
async def test_normalize_question(question_attr, ref, value, update):
middleware = Middleware()
app_schema_obj = AppSchemaService(middleware)
middleware[f'app.schema.normalize_{REF_MAPPING[ref]}'] = lambda *args: value
question_attr.ref = [ref]
result = await app_schema_obj.normalize_question(question_attr, value, update, '', '')
assert result == value
@pytest.mark.parametrize('question_attr, ref, value, update', [
(
List(
items=[
Dict('question1', additional_attrs=True),
Dict('question2', additional_attrs=True),
Dict('question3', additional_attrs=True),
]
),
'definitions/certificate',
[
{'question1': 'val1'},
{'question2': 'val2'},
{'question3': 'val3'}
],
False
),
])
@pytest.mark.asyncio
async def test_normalize_question_List(question_attr, ref, value, update):
middleware = Middleware()
app_schema_obj = AppSchemaService(middleware)
middleware[f'app.schema.normalize_{REF_MAPPING[ref]}'] = lambda *args: value
for attr in question_attr.items:
attr.ref = [ref]
question_attr.ref = [ref]
result = await app_schema_obj.normalize_question(question_attr, value, update, '', '')
assert result == value
| 2,032 | Python | .py | 66 | 23.878788 | 90 | 0.611735 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,421 | test_list_apps.py | truenas_middleware/src/middlewared/middlewared/pytest/unit/plugins/apps/test_list_apps.py | import collections
import unittest
import pytest
from middlewared.plugins.apps.ix_apps.query import list_apps
AVAILABLE_MAPPING = {
'community': {
'whoogle': {
'version': '1.0.20',
'app_version': '0.9.0'
},
'rsyncd': {
'version': '1.0.14',
'app_version': '1.0.0'
},
'actual-budget': {
'version': '1.1.13',
'app_version': '24.10.1'
},
}
}
KWARGS = {
'host_ip': None,
'retrieve_config': False,
'image_update_cache': {
'registry-1.docker.io/actualbudget/actual-server:24.10.1': False,
'registry-1.docker.io/library/bash:latest': False
}
}
METADATA = {
'actual-budget': {
'custom_app': False,
'human_version': '24.10.1_1.1.13',
'metadata': {
'app_version': '24.10.1',
'capabilities': [],
'categories': ['media'],
'description': 'Actual Budget is a super fast and privacy-focused app for managing your finances.',
'home': 'https://actualbudget.org',
'host_mounts': [],
'last_update': '2024-10-23 14:29:45',
'lib_version': '1.1.4',
'lib_version_hash': '6e32ff5969906d9c3a10fea2b17fdd3197afb052d3432344da03188d8a907113',
'name': 'actual-budget',
'title': 'Actual Budget',
'train': 'community',
'version': '1.1.13'
},
'migrated': False,
'portals': {
'Web UI': 'http://0.0.0.0:31012/'
},
'version': '1.1.13'
}
}
def common_impl(
mock_get_collective_metadata, mock_list_resources_by_project,
mock_translate_resources_to_desired_workflow, scandir, workload, desired_state
):
mock_get_collective_metadata.return_value = METADATA
mock_list_resources_by_project.return_value = collections.defaultdict(None, workload)
mock_translate_resources_to_desired_workflow.return_value = workload['ix-actual-budget']
mock_entry1 = unittest.mock.Mock(is_file=lambda: True, name='config1.json')
scandir.return_value.__enter__.return_value = [mock_entry1]
result = list_apps(AVAILABLE_MAPPING, **KWARGS)
assert result is not None
assert isinstance(result, list)
assert isinstance(result[0], dict)
assert result[0]['state'] == desired_state
@pytest.mark.parametrize('workload', [
{
'ix-actual-budget': {
'containers': 2,
'container_details': [
{
'service_name': 'actual_budget',
'image': 'actualbudget/actual-server:24.10.1',
'state': 'starting',
'id': 'a30866299d667597baca8433aa51d83948075f4ae7e99d88569d6ec0bfcf89f0'
},
{
'service_name': 'db',
'image': 'actualbudget/actual-server:24.10.1',
'state': 'crashed',
'id': 'a30866299d667597baca8433aa51d83948075f4ae7e99d88569d6ec0bfcf89f0'
},
],
'images': [
'actualbudget/actual-server:24.10.1',
'bash'
]
}
},
{
'ix-actual-budget': {
'containers': 2,
'container_details': [
{
'service_name': 'actual_budget',
'image': 'actualbudget/actual-server:24.10.1',
'state': 'created',
'id': 'a30866299d667597baca8433aa51d83948075f4ae7e99d88569d6ec0bfcf89f0'
},
{
'service_name': 'db',
'image': 'actualbudget/actual-server:24.10.1',
'state': 'crashed',
'id': 'a30866299d667597baca8433aa51d83948075f4ae7e99d88569d6ec0bfcf89f0'
},
],
'images': [
'actualbudget/actual-server:24.10.1',
'bash'
]
}
},
{
'ix-actual-budget': {
'containers': 4,
'container_details': [
{
'service_name': 'actual_budget',
'image': 'actualbudget/actual-server:24.10.1',
'state': 'running',
'id': 'a30866299d667597baca8433aa51d83948075f4ae7e99d88569d6ec0bfcf89f0'
},
{
'service_name': 'redis',
'image': 'actualbudget/actual-server:24.10.1',
'state': 'crashed',
'id': 'a30866299d667597baca8433aa51d83948075f4ae7e99d88569d6ec0bfcf89f0'
},
{
'service_name': 'db',
'image': 'actualbudget/actual-server:24.10.1',
'state': 'crashed',
'id': 'a30866299d667597baca8433aa51d83948075f4ae7e99d88569d6ec0bfcf89f0'
},
{
'service_name': 'web',
'image': 'actualbudget/actual-server:24.10.1',
'state': 'crashed',
'id': 'a30866299d667597baca8433aa51d83948075f4ae7e99d88569d6ec0bfcf89f0'
},
],
'images': [
'actualbudget/actual-server:24.10.1',
'bash'
]
}
},
{
'ix-actual-budget': {
'containers': 4,
'container_details': [
{
'service_name': 'actual_budget',
'image': 'actualbudget/actual-server:24.10.1',
'state': 'running',
'id': 'a30866299d667597baca8433aa51d83948075f4ae7e99d88569d6ec0bfcf89f0'
},
{
'service_name': 'redis',
'image': 'actualbudget/actual-server:24.10.1',
'state': 'crashed',
'id': 'a30866299d667597baca8433aa51d83948075f4ae7e99d88569d6ec0bfcf89f0'
},
{
'service_name': 'db',
'image': 'actualbudget/actual-server:24.10.1',
'state': 'crashed',
'id': 'a30866299d667597baca8433aa51d83948075f4ae7e99d88569d6ec0bfcf89f0'
},
{
'service_name': 'web',
'image': 'actualbudget/actual-server:24.10.1',
'state': 'running',
'id': 'a30866299d667597baca8433aa51d83948075f4ae7e99d88569d6ec0bfcf89f0'
},
],
'images': [
'actualbudget/actual-server:24.10.1',
'bash'
]
}
},
{
'ix-actual-budget': {
'containers': 4,
'container_details': [
{
'service_name': 'actual_budget',
'image': 'actualbudget/actual-server:24.10.1',
'state': 'exited',
'id': 'a30866299d667597baca8433aa51d83948075f4ae7e99d88569d6ec0bfcf89f0'
},
{
'service_name': 'redis',
'image': 'actualbudget/actual-server:24.10.1',
'state': 'running',
'id': 'a30866299d667597baca8433aa51d83948075f4ae7e99d88569d6ec0bfcf89f0'
},
{
'service_name': 'db',
'image': 'actualbudget/actual-server:24.10.1',
'state': 'crashed',
'id': 'a30866299d667597baca8433aa51d83948075f4ae7e99d88569d6ec0bfcf89f0'
},
{
'service_name': 'web',
'image': 'actualbudget/actual-server:24.10.1',
'state': 'running',
'id': 'a30866299d667597baca8433aa51d83948075f4ae7e99d88569d6ec0bfcf89f0'
},
],
'images': [
'actualbudget/actual-server:24.10.1',
'bash'
]
}
},
{
'ix-actual-budget': {
'containers': 4,
'container_details': [
{
'service_name': 'actual_budget',
'image': 'actualbudget/actual-server:24.10.1',
'state': 'crashed',
'id': 'a30866299d667597baca8433aa51d83948075f4ae7e99d88569d6ec0bfcf89f0'
},
{
'service_name': 'redis',
'image': 'actualbudget/actual-server:24.10.1',
'state': 'crashed',
'id': 'a30866299d667597baca8433aa51d83948075f4ae7e99d88569d6ec0bfcf89f0'
},
{
'service_name': 'db',
'image': 'actualbudget/actual-server:24.10.1',
'state': 'crashed',
'id': 'a30866299d667597baca8433aa51d83948075f4ae7e99d88569d6ec0bfcf89f0'
},
{
'service_name': 'web',
'image': 'actualbudget/actual-server:24.10.1',
'state': 'running',
'id': 'a30866299d667597baca8433aa51d83948075f4ae7e99d88569d6ec0bfcf89f0'
},
],
'images': [
'actualbudget/actual-server:24.10.1',
'bash'
]
}
},
], ids=[
'starting-crashed', 'created-crashed', 'running-crashedx3',
'running-crashedx2-running', 'exited-running-crashed-running',
'crashedx3-running'
]
)
@unittest.mock.patch('os.scandir')
@unittest.mock.patch('middlewared.plugins.apps.ix_apps.query.translate_resources_to_desired_workflow')
@unittest.mock.patch('middlewared.plugins.apps.ix_apps.query.list_resources_by_project')
@unittest.mock.patch('middlewared.plugins.apps.ix_apps.query.get_collective_metadata')
def test_app_event_crashed(
mock_get_collective_metadata, mock_list_resources_by_project,
mock_translate_resources_to_desired_workflow, scandir, workload
):
common_impl(
mock_get_collective_metadata, mock_list_resources_by_project, mock_translate_resources_to_desired_workflow,
scandir, workload, 'CRASHED',
)
@pytest.mark.parametrize('workload', [
{
'ix-actual-budget': {
'containers': 2,
'container_details': [
{
'service_name': 'actual_budget',
'image': 'actualbudget/actual-server:24.10.1',
'state': 'starting',
'id': 'a30866299d667597baca8433aa51d83948075f4ae7e99d88569d6ec0bfcf89f0'
},
{
'service_name': 'db',
'image': 'actualbudget/actual-server:24.10.1',
'state': 'created',
'id': 'a30866299d667597baca8433aa51d83948075f4ae7e99d88569d6ec0bfcf89f0'
},
],
'images': [
'actualbudget/actual-server:24.10.1',
'bash'
]
}
},
{
'ix-actual-budget': {
'containers': 2,
'container_details': [
{
'service_name': 'actual_budget',
'image': 'actualbudget/actual-server:24.10.1',
'state': 'running',
'id': 'a30866299d667597baca8433aa51d83948075f4ae7e99d88569d6ec0bfcf89f0'
},
{
'service_name': 'db',
'image': 'actualbudget/actual-server:24.10.1',
'state': 'starting',
'id': 'a30866299d667597baca8433aa51d83948075f4ae7e99d88569d6ec0bfcf89f0'
},
],
'images': [
'actualbudget/actual-server:24.10.1',
'bash'
]
}
},
{
'ix-actual-budget': {
'containers': 2,
'container_details': [
{
'service_name': 'actual_budget',
'image': 'actualbudget/actual-server:24.10.1',
'state': 'exited',
'id': 'a30866299d667597baca8433aa51d83948075f4ae7e99d88569d6ec0bfcf89f0'
},
{
'service_name': 'db',
'image': 'actualbudget/actual-server:24.10.1',
'state': 'starting',
'id': 'a30866299d667597baca8433aa51d83948075f4ae7e99d88569d6ec0bfcf89f0'
},
],
'images': [
'actualbudget/actual-server:24.10.1',
'bash'
]
}
},
{
'ix-actual-budget': {
'containers': 2,
'container_details': [
{
'service_name': 'actual_budget',
'image': 'actualbudget/actual-server:24.10.1',
'state': 'created',
'id': 'a30866299d667597baca8433aa51d83948075f4ae7e99d88569d6ec0bfcf89f0'
},
{
'service_name': 'db',
'image': 'actualbudget/actual-server:24.10.1',
'state': 'created',
'id': 'a30866299d667597baca8433aa51d83948075f4ae7e99d88569d6ec0bfcf89f0'
},
],
'images': [
'actualbudget/actual-server:24.10.1',
'bash'
]
}
},
], ids=['starting-created', 'running-starting', 'exited-starting', 'created-created'])
@unittest.mock.patch('os.scandir')
@unittest.mock.patch('middlewared.plugins.apps.ix_apps.query.translate_resources_to_desired_workflow')
@unittest.mock.patch('middlewared.plugins.apps.ix_apps.query.list_resources_by_project')
@unittest.mock.patch('middlewared.plugins.apps.ix_apps.query.get_collective_metadata')
def test_app_event_deploying(
mock_get_collective_metadata, mock_list_resources_by_project,
mock_translate_resources_to_desired_workflow, scandir, workload
):
common_impl(
mock_get_collective_metadata, mock_list_resources_by_project, mock_translate_resources_to_desired_workflow,
scandir, workload, 'DEPLOYING',
)
@pytest.mark.parametrize('workload', [
{
'ix-actual-budget': {
'containers': 2,
'container_details': [
{
'service_name': 'actual_budget',
'image': 'actualbudget/actual-server:24.10.1',
'state': 'running',
'id': 'a30866299d667597baca8433aa51d83948075f4ae7e99d88569d6ec0bfcf89f0'
},
{
'service_name': 'db',
'image': 'actualbudget/actual-server:24.10.1',
'state': 'running',
'id': 'a30866299d667597baca8433aa51d83948075f4ae7e99d88569d6ec0bfcf89f0'
},
],
'images': [
'actualbudget/actual-server:24.10.1',
'bash'
]
}
},
], ids=['running-running'])
@unittest.mock.patch('os.scandir')
@unittest.mock.patch('middlewared.plugins.apps.ix_apps.query.translate_resources_to_desired_workflow')
@unittest.mock.patch('middlewared.plugins.apps.ix_apps.query.list_resources_by_project')
@unittest.mock.patch('middlewared.plugins.apps.ix_apps.query.get_collective_metadata')
def test_app_event_running(
mock_get_collective_metadata, mock_list_resources_by_project,
mock_translate_resources_to_desired_workflow, scandir, workload
):
common_impl(
mock_get_collective_metadata, mock_list_resources_by_project, mock_translate_resources_to_desired_workflow,
scandir, workload, 'RUNNING',
)
@pytest.mark.parametrize('workload', [
{
'ix-actual-budget': {
'containers': 2,
'container_details': [
{
'service_name': 'actual_budget',
'image': 'actualbudget/actual-server:24.10.1',
'state': 'exited',
'id': 'a30866299d667597baca8433aa51d83948075f4ae7e99d88569d6ec0bfcf89f0'
},
{
'service_name': 'db',
'image': 'actualbudget/actual-server:24.10.1',
'state': 'exited',
'id': 'a30866299d667597baca8433aa51d83948075f4ae7e99d88569d6ec0bfcf89f0'
},
],
'images': [
'actualbudget/actual-server:24.10.1',
'bash'
]
}
},
{
'ix-actual-budget': {
'containers': 2,
'container_details': [
{
'service_name': 'actual_budget',
'image': 'actualbudget/actual-server:24.10.1',
'state': 'stopping',
'id': 'a30866299d667597baca8433aa51d83948075f4ae7e99d88569d6ec0bfcf89f0'
},
{
'service_name': 'db',
'image': 'actualbudget/actual-server:24.10.1',
'state': 'stopping',
'id': 'a30866299d667597baca8433aa51d83948075f4ae7e99d88569d6ec0bfcf89f0'
},
],
'images': [
'actualbudget/actual-server:24.10.1',
'bash'
]
}
},
], ids=['exited-exited', 'stopping-stopping'])
@unittest.mock.patch('os.scandir')
@unittest.mock.patch('middlewared.plugins.apps.ix_apps.query.translate_resources_to_desired_workflow')
@unittest.mock.patch('middlewared.plugins.apps.ix_apps.query.list_resources_by_project')
@unittest.mock.patch('middlewared.plugins.apps.ix_apps.query.get_collective_metadata')
def test_app_event_stopped(
mock_get_collective_metadata, mock_list_resources_by_project,
mock_translate_resources_to_desired_workflow, scandir, workload
):
common_impl(
mock_get_collective_metadata, mock_list_resources_by_project, mock_translate_resources_to_desired_workflow,
scandir, workload, 'STOPPED',
)
| 18,351 | Python | .py | 476 | 25.109244 | 115 | 0.512262 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,422 | test_normalize_values.py | truenas_middleware/src/middlewared/middlewared/pytest/unit/plugins/apps/test_normalize_values.py | import pytest
from middlewared.plugins.apps.schema_normalization import AppSchemaService
from middlewared.pytest.unit.middleware import Middleware
from middlewared.schema import Dict
@pytest.mark.parametrize('dict_obj, values, update, context, expected', [
(
Dict(
'actual-budget',
Dict('run_as'),
Dict('network'),
Dict('resources')
),
{
'ix_certificates': {},
'ix_certificate_authorities': {},
'ix_volumes': {},
'ix_context': {}
},
False,
{'app': {'name': 'app', 'path': '/path/to/app'}, 'actions': []},
(
{
'ix_certificates': {},
'ix_certificate_authorities': {},
'ix_volumes': {},
'ix_context': {}
},
{
'app': {
'name': 'app',
'path': '/path/to/app'
},
'actions': []
}
)
),
(
Dict(
'actual-budget',
Dict('run_as'),
Dict('network'),
Dict('resources')
),
{
'ix_certificates': {},
'ix_certificate_authorities': {},
'ix_volumes': {},
'ix_context': {}
},
True,
{'app': {'name': 'app', 'path': '/path/to/app'}, 'actions': []},
(
{
'ix_certificates': {},
'ix_certificate_authorities': {},
'ix_volumes': {},
'ix_context': {}
},
{
'app': {
'name': 'app',
'path': '/path/to/app'
},
'actions': []
}
)
),
])
@pytest.mark.asyncio
async def test_normalize_values(dict_obj, values, update, context, expected):
middleware = Middleware()
app_schema_obj = AppSchemaService(middleware)
result = await app_schema_obj.normalize_values(
dict_obj,
values,
update,
context
)
assert result == expected
| 2,171 | Python | .py | 79 | 16.468354 | 77 | 0.424605 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,423 | test_normalize_acl.py | truenas_middleware/src/middlewared/middlewared/pytest/unit/plugins/apps/test_normalize_acl.py | import pytest
from middlewared.plugins.apps.schema_normalization import AppSchemaService
from middlewared.pytest.unit.middleware import Middleware
from middlewared.schema import Dict
@pytest.mark.parametrize('attr, value, context', [
(
Dict(),
{
'entries': [{'type': 'ALLOW', 'permissions': 'read'}],
'path': '/mnt/data'
},
{'actions': []},
),
(
Dict(),
{
'entries': [{'type': 'ALLOW', 'permissions': 'write'}],
'path': '/mnt/data'
},
{
'actions': [
{
'method': 'apply_acls',
'args': [
{
'path': {
'entries': [
{
'type': 'ALLOW',
'permissions': 'read'
}
]
}
}
]
}
]
},
),
(
Dict(),
{
'entries': [],
'path': ''
},
{'actions': []},
),
(
Dict(),
{
'entries': [{'type': 'ALLOW', 'permissions': 'rw'}],
'path': ''
},
{'actions': []},
),
])
@pytest.mark.asyncio
async def test_normalize_acl(attr, value, context):
middleware = Middleware()
app_schema_obj = AppSchemaService(middleware)
result = await app_schema_obj.normalize_acl(attr, value, '', context)
if all(value[k] for k in ('entries', 'path')):
assert len(context['actions']) > 0
else:
assert len(context['actions']) == 0
assert result == value
| 1,840 | Python | .py | 66 | 15.621212 | 74 | 0.388481 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,424 | test_construct_schema.py | truenas_middleware/src/middlewared/middlewared/pytest/unit/plugins/apps/test_construct_schema.py | import pytest
from middlewared.plugins.apps.schema_utils import construct_schema
from middlewared.schema import Dict, ValidationErrors
@pytest.mark.parametrize('data, new_values, update', [
(
{
'schema': {
'groups': [
{
'name': 'Actual Budget Configuration',
'description': 'Configure Actual Budget'
},
],
'questions': [
{
'variable': 'actual_budget',
'label': '',
'group': 'Actual Budget Configuration',
'schema': {
'type': 'dict',
'attrs': [
{
'variable': 'additional_envs',
'label': 'Additional Environment Variables',
'description': 'Configure additional environment variables for Actual Budget.',
'schema': {
'type': 'list',
'default': [],
'items': [
{
'variable': 'env',
'label': 'Environment Variable',
'schema': {
'type': 'dict',
'attrs': [
{
'variable': 'name',
'label': 'Name',
'schema': {
'type': 'string',
'required': True
}
},
]
}
}
]
}
}
]
}
},
]
}
},
{
'actual_budget': {'additional_envs': []}
},
False,
),
])
def test_construct_schema_update_False(data, new_values, update):
result = construct_schema(data, new_values, update)
assert isinstance(result['verrors'], ValidationErrors)
assert len(result['verrors'].errors) == 0
assert isinstance(result['dict_obj'], Dict)
assert result['new_values'] == new_values
assert result['schema_name'] == 'app_create'
@pytest.mark.parametrize('data, new_values, update', [
(
{
'schema': {
'groups': [
{
'name': 'Actual Budget Configuration',
'description': 'Configure Actual Budget'
},
],
'questions': [
{
'variable': 'actual_budget',
'label': '',
'group': 'Actual Budget Configuration',
'schema': {
'type': 'dict',
'attrs': [
{
'variable': 'additional_envs',
'label': 'Additional Environment Variables',
'description': 'Configure additional environment variables for Actual Budget.',
'schema': {
'type': 'list',
'default': [],
'items': [
{
'variable': 'env',
'label': 'Environment Variable',
'schema': {
'type': 'dict',
'attrs': [
{
'variable': 'name',
'label': 'Name',
'schema': {
'type': 'string',
'required': True
}
},
]
}
}
]
}
}
]
}
},
]
}
},
{
'actual_budget': {'additional_envs': []}
},
True
)
])
def test_construct_schema_update_True(data, new_values, update):
result = construct_schema(data, new_values, update)
assert isinstance(result['verrors'], ValidationErrors)
assert len(result['verrors'].errors) == 0
assert isinstance(result['dict_obj'], Dict)
assert result['new_values'] == new_values
assert result['schema_name'] == 'app_update'
@pytest.mark.parametrize('data, update', [
(
{
'schema': {
'groups': [
{
'name': 'Actual Budget Configuration',
'description': 'Configure Actual Budget'
},
],
}
},
True,
),
])
def test_construct_schema_KeyError(data, update):
with pytest.raises(KeyError):
construct_schema(data, {}, update)
@pytest.mark.parametrize('data, new_values, update', [
(
{
'schema': {
'groups': [
{
'name': 'Actual Budget Configuration',
'description': 'Configure Actual Budget'
},
],
'questions': [
{
'variable': 'actual_budget',
'label': '',
'group': 'Actual Budget Configuration',
'schema': {
'type': 'dict',
'attrs': [
{
'variable': 'additional_envs',
'label': 'Additional Environment Variables',
'description': 'Configure additional environment variables for Actual Budget.',
'schema': {
'type': 'list',
'default': [],
'items': [
{
'variable': 'env',
'label': 'Environment Variable',
'schema': {
'type': 'dict',
'attrs': [
{
'variable': 'name',
'label': 'Name',
'schema': {
'type': 'string',
'required': True
}
},
]
}
}
]
}
}
]
}
},
]
}
},
{
'actual_budget': {'additional_envs': 'abc'}
},
True,
),
])
def test_construct_schema_ValidationError(data, new_values, update):
result = construct_schema(data, new_values, update)
assert isinstance(result['verrors'], ValidationErrors)
assert len(result['verrors'].errors) > 0
assert isinstance(result['dict_obj'], Dict)
assert result['new_values'] == new_values
assert result['schema_name'] == 'app_update' if update else 'app_create'
| 9,531 | Python | .py | 216 | 16.87963 | 115 | 0.274339 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,425 | test_normalize_and_validate.py | truenas_middleware/src/middlewared/middlewared/pytest/unit/plugins/apps/test_normalize_and_validate.py | import pytest
from middlewared.plugins.apps.schema_normalization import AppSchemaService
from middlewared.pytest.unit.middleware import Middleware
from middlewared.schema import Dict
@pytest.mark.parametrize('app_detail, values, expected', [
(
{
'healthy': True,
'supported': True,
'healthy_error': None,
'location': '/mnt/.ix-apps/truenas_catalog/trains/community/actual-budget/1.1.11',
'last_update': '2024-10-13 21:17:53',
'required_features': [],
'human_version': '24.10.1_1.1.11',
'version': '1.1.11',
'app_metadata': {
'app_version': '24.10.1',
'capabilities': [],
'categories': ['media'],
'description': 'Actual Budget is a super fast and privacy-focused app for managing your finances.',
'home': 'https://actualbudget.org',
'host_mounts': [],
'lib_version': '1.1.2',
'lib_version_hash': '3bf14311f7547731c94dbd4059f7aca95272210409631acbc5603a06223921e4',
'name': 'actual-budget',
'run_as_context': [],
'sources': [],
'title': 'Actual Budget',
'train': 'community',
'version': '1.1.11'
},
'schema': {
'groups': [
{
'name': 'Actual Budget Configuration',
'description': 'Configure Actual Budget'
}
],
'questions': [
{
'variable': 'actual_budget',
'label': '',
'group': 'Actual Budget Configuration',
'schema': {
'type': 'dict',
'attrs': [
{
'variable': 'additional_envs',
'label': 'Additional Environment Variables',
'description': 'Configure additional environment variables for Actual Budget.',
'schema': {
'type': 'list',
'default': [],
'items': []
}
}
]
}
}
],
'readme': '',
'changelog': None,
'values': {
'actual_budget': {
'additional_envs': []
},
'run_as': {
'user': 568,
'group': 568
},
'network': {
'web_port': 31012,
'host_network': False
},
'storage': {
'data': {
'type': 'ix_volume',
'ix_volume_config': {
'acl_enable': False,
'dataset_name': 'data'
}
},
'additional_storage': []
},
'resources': {
'limits': {
'cpus': 2,
'memory': 4096
}
}
}
}
},
{},
{
'ix_certificates': {},
'ix_certificate_authorities': {},
'ix_volumes': {},
'ix_context': {}
}
)
])
@pytest.mark.asyncio
async def test_normalize_and_validate(app_detail, values, expected):
middleware = Middleware()
app_schema_obj = AppSchemaService(middleware)
dict_obj = Dict(
'actual-budget',
Dict('run_as'),
Dict('network'),
Dict('resources')
)
middleware['app.schema.validate_values'] = lambda *args: dict_obj
new_values = await app_schema_obj.normalize_and_validate_values(
item_details=app_detail,
values=values,
update=False,
app_dir='/path/to/app'
)
assert new_values == expected
| 4,490 | Python | .py | 120 | 19.491667 | 115 | 0.380124 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,426 | test_normalize_gpu_options.py | truenas_middleware/src/middlewared/middlewared/pytest/unit/plugins/apps/test_normalize_gpu_options.py |
import pytest
from middlewared.plugins.apps.schema_normalization import AppSchemaService
from middlewared.pytest.unit.middleware import Middleware
@pytest.mark.parametrize('gpu_list, value, expected', [
(
[
{
'pci_slot': '0000:00:02.0',
'addr': {
'domain': '0000',
'bus': '00',
'slot': '02'
},
'description': 'Red Hat, Inc. QXL paravirtual graphic card',
'devices': [
{'pci_id': '8086:1237', 'pci_slot': '0000:00:00.0', 'vm_pci_slot': 'pci_0000_00_00_0'},
{'pci_id': '8086:7000', 'pci_slot': '0000:00:01.0', 'vm_pci_slot': 'pci_0000_00_01_0'},
],
'vendor': 'NVIDIA',
'uses_system_critical_devices': True,
'critical_reason': 'Critical devices found: 0000:00:01.0',
'available_to_host': True,
'error': ''
}
],
{
'use_all_gpus': True,
'nvidia_gpu_selection': {
'0000:01:00.0': 'NVIDIA GPU 1',
'0000:02:00.0': 'NVIDIA GPU 2'
}
},
{
'use_all_gpus': False,
'nvidia_gpu_selection': {}
}
),
(
[
{
'pci_slot': '0000:00:02.0',
'addr': {
'domain': '0000',
'bus': '00',
'slot': '02'
},
'description': 'Intel Integrated Graphics',
'devices': [
{'pci_id': '8086:1234', 'pci_slot': '0000:00:00.0', 'vm_pci_slot': 'pci_0000_00_00_0'},
],
'vendor': 'Intel', # Non-NVIDIA vendor
'uses_system_critical_devices': True,
'critical_reason': 'No critical devices.',
'available_to_host': True,
'error': ''
}
],
{
'use_all_gpus': True,
'nvidia_gpu_selection': {}
},
{
'use_all_gpus': True,
'nvidia_gpu_selection': {}
}
),
(
[
{
'pci_slot': '0000:00:02.0',
'addr': {
'domain': '0000',
'bus': '00',
'slot': '02'
},
'description': 'NVIDIA GeForce RTX 3080',
'devices': [
{'pci_id': '10de:2206', 'pci_slot': '0000:01:00.0', 'vm_pci_slot': 'pci_0000_01_00_0'},
],
'vendor': 'NVIDIA', # This GPU is NVIDIA
'uses_system_critical_devices': True,
'critical_reason': 'No critical devices.',
'available_to_host': True,
'error': ''
},
{
'pci_slot': '0000:00:03.0',
'addr': {
'domain': '0000',
'bus': '00',
'slot': '03'
},
'description': 'AMD Radeon RX 6800',
'devices': [
{'pci_id': '1002:73bf', 'pci_slot': '0000:01:01.0', 'vm_pci_slot': 'pci_0000_01_01_0'},
],
'vendor': 'AMD', # Non-NVIDIA vendor
'uses_system_critical_devices': False,
'critical_reason': 'No critical devices.',
'available_to_host': True,
'error': ''
}
],
{
'use_all_gpus': True,
'nvidia_gpu_selection': {}
},
{
'use_all_gpus': True,
'nvidia_gpu_selection': {}
}
)
])
@pytest.mark.asyncio
async def test_normalize_gpu_option(gpu_list, value, expected):
middleware = Middleware()
app_schema_obj = AppSchemaService(middleware)
middleware['app.gpu_choices_internal'] = lambda *args: gpu_list
result = await app_schema_obj.normalize_gpu_configuration('', value, '', '')
assert result is not None
assert result == expected
| 4,181 | Python | .py | 121 | 20.92562 | 107 | 0.415844 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,427 | test_get_list_item_from_value.py | truenas_middleware/src/middlewared/middlewared/pytest/unit/plugins/apps/test_get_list_item_from_value.py | import pytest
from middlewared.plugins.apps.schema_utils import get_list_item_from_value
from middlewared.schema import List
@pytest.mark.parametrize('values, question_attr, should_work', [
(
['val1', 'val2', 'val3'],
List(
items=[
List({'question1': 'desc1'}),
List({'question2': 'desc2'}),
List({'question3': 'desc3'})
]
),
True
),
(
None,
List(
items=[
List({'question1': 'desc1'}),
List({'question2': 'desc2'}),
List({'question3': 'desc3'})
]
),
True
),
(
[{'val1': 'a'}, {'val2': 'b'}, {'val3': 'c'}],
List(
items=[
List({'question1': 'desc1'}),
List({'question2': 'desc2'}),
List({'question3': 'desc3'})
]
),
True
),
(
['val1', 'val1'],
List(
items=[
List({'question1': 'desc1'}, unique=True),
],
),
False
),
])
def test_get_list_item_from_value(values, question_attr, should_work):
if should_work:
result = get_list_item_from_value(values, question_attr)
assert result is not None
else:
assert get_list_item_from_value(values, question_attr) is None
| 1,408 | Python | .py | 53 | 17.056604 | 74 | 0.460799 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,428 | test_normalize_ix_volumes.py | truenas_middleware/src/middlewared/middlewared/pytest/unit/plugins/apps/test_normalize_ix_volumes.py | import pytest
from middlewared.plugins.apps.schema_normalization import AppSchemaService
from middlewared.pytest.unit.middleware import Middleware
from middlewared.schema import Dict
@pytest.mark.parametrize('attr, value, complete_config, context', [
(
Dict(),
{
'dataset_name': 'volume_1',
'properties': {'prop_key': 'prop_value'},
'acl_entries': {
'entries': [{'type': 'ALLOW', 'permissions': 'write'}],
'path': '/mnt/data'
}
},
{
'ix_volumes': {
'volume_1': ''
}
},
{'actions': [], 'app': {'name': 'test_app'}}
),
(
Dict(),
{
'dataset_name': 'volume_1',
'properties': {'prop_key': 'prop_value'},
'acl_entries': {
'entries': [],
'path': ''
}
},
{
'ix_volumes': {
'volume_1': ''
}
},
{'actions': [], 'app': {'name': 'test_app'}}
),
(
Dict(),
{
'dataset_name': 'volume_1',
'properties': {'prop_key': 'prop_value'},
'acl_entries': {
'entries': [],
'path': ''
}
},
{
'ix_volumes': {
'volume_1': ''
}
},
{
'actions': [
{
'method': 'update_volumes',
'args': [[
{
'name': 'volume_1'
}
]]
}
],
'app': {'name': 'test_app'}
}
),
(
Dict(),
{
'dataset_name': 'volume_1',
'properties': {'prop_key': 'prop_value'},
'acl_entries': {
'entries': [],
'path': ''
}
},
{
'ix_volumes': {
'volume_1': ''
}
},
{
'actions': [
{
'method': 'update_volumes',
'args': [[
{
'name': 'volume_2'
}
]]
}
],
'app': {'name': 'test_app'}
}
),
])
@pytest.mark.asyncio
async def test_normalize_ix_volumes(attr, value, complete_config, context):
middleware = Middleware()
app_schema_obj = AppSchemaService(middleware)
result = await app_schema_obj.normalize_ix_volume(attr, value, complete_config, context)
assert len(context['actions']) > 0
assert value['dataset_name'] in [v['name'] for v in context['actions'][0]['args'][-1]]
assert result == value
| 2,872 | Python | .py | 106 | 15.179245 | 92 | 0.375589 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,429 | test_normalize_certificate.py | truenas_middleware/src/middlewared/middlewared/pytest/unit/plugins/apps/test_normalize_certificate.py | import textwrap
import pytest
from middlewared.plugins.apps.schema_normalization import AppSchemaService
from middlewared.pytest.unit.middleware import Middleware
from middlewared.schema import Int
@pytest.mark.parametrize('cert, value, should_work', [
(
textwrap.dedent(
'''
-----BEGIN CERTIFICATE-----
MIIFmzCCA4OgAwIBAgICMoMwDQYJKoZIhvcNAQELBQAwcjEMMAoGA1UEAwwDZGV2
MQswCQYDVQQGEwJVUzELMAkGA1UECAwCVE4xEjAQBgNVBAcMCUtub3h2aWxsZTEL
MAkGA1UECgwCaVgxDDAKBgNVBAsMA2RldjEZMBcGCSqGSIb3DQEJARYKZGV2QGl4
LmNvbTAeFw0yMjAxMjQxOTI0MTRaFw0yMzAyMjUxOTI0MTRaMHIxDDAKBgNVBAMM
A2RldjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlROMRIwEAYDVQQHDAlLbm94dmls
bGUxCzAJBgNVBAoMAmlYMQwwCgYDVQQLDANkZXYxGTAXBgkqhkiG9w0BCQEWCmRl
dkBpeC5jb20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDuy5kKf7eT
LOuxm1pn51kFLgJHD6k05pROjMOXEZel7CsmrDKEehSSdwDB/WUim3idOsImLrc+
ApXsnKwVY93f7yn1rfF4lgKsa3sb6oqAcPEobgTUqSmJ/OQVilUqOtj/dmFaEWIS
21eKNzaByNdpyOcoRF/+uDylEsE1Gj0GjkBneVRxyTZFV7LdVyDk38hljesnd8FX
gnD0DCdI3jBvqSYvd+GvQ2nQ2624HAmEQwfllqKi9PRDngeZIeiTQSWN+rybJbDY
yonRS0FPxJydt/sDlzi43qzHnrTqUbL+2RjYIqcOqeivNtDZ2joh+xqfRdKzACWu
QWrhGCL5+9bnqA6PEPA7GQ2jp00gDkjB7+HlQLI8ZCZcST6mkbfs/EaW00WYIcw5
lb+5oJ8oJqWebnQB21iwvPjvAv353iA1ApTJxBdo13x7oXBwWsrpxWk6SdL2Z5zU
NXrC9ZyaoeQ5uZ/oBXbCxJfhSkISyI5D8yeYLjmMxn+AvRBQpkRmVvcy3ls2SHGX
4XEJ4Q0wj3a0rPqmDZUwpWErbmf+N6D7J+uK8n3pcGlvkFIUaP60UQGp4gwnZA2O
dZdhVQ4whQHyjTmL7kRKl+gR/vTp+iPvKMfTO1HBQp97iK8IPM7Q2Gpe6U4n/Ll2
TDaZ9DroM83Vnc6cX69Th555SA9+gP6HWQIDAQABozswOTAYBgNVHREEETAPggdk
b21haW4xhwQICAgIMB0GA1UdDgQWBBSz0br/9U9mwYZfuRO1JmKTEorq1DANBgkq
hkiG9w0BAQsFAAOCAgEAK7nBNA+qjgvkmcSLQC+yXPOwb3o55D+N0J2QLxJFT4NV
b0GKf0dkz92Ew1pkKYzsH6lLlKRE23cye6EZLIwkkhhF0sTwYeu8HNy7VmkSDsp0
aKbqxgBzIJx+ztQGNgZ1fQMRjHCRLf8TaSAxnVXaXXUeU6fUBq2gHbYq6BfZkGmU
6f8DzL7uKHzcMEmWfC5KxfSskFFPOyaz/VGViQ0yffwH1NB+txDlU58rmu9w0wLe
cOrOjVUNg8axQen2Uejjj3IRmDC18ZfY7EqI8O1PizCtIcPSm+NnZYg/FvVj0KmM
o2QwGMd5QTU2J5lz988Xlofm/r3GBH32+ETqIcJolBw9bBkwruBvHpcmyLSFcFWK
sdGgi2gK2rGb+oKwzpHSeCtQVwgQth55qRH1DQGaAdpA1uTriOdcR96i65/jcz96
aD2B958hF1B/7I4Md+LFYhxgwREBhyQkU6saf7GR0Q+p4F8/oIkjhdLsyzk4YHyI
PVtK00W8zQMKF6zhHjfaF2uDRO/ycMKCq9NIqQJCZNqwNAo0r4FOmilwud/tzFY8
GQ9FXeQSqWo7hUIXdbej+aJ7DusYeuE/CwQFNUnz1khvIFJ5B7YP+gYCyUW7V2Hr
Mv+cZ473U8hYQ1Ij7pXi7DxsOWqWCDhyK0Yp6MZsw0rNaAIPHnTTxYdMfmIYHT0=
-----END CERTIFICATE-----
'''
),
12,
True
),
(
textwrap.dedent(
'''
-----BEGIN CERTIFICATE-----
MIIFmzCCA4OgAwIBAgICMoMwDQYJKoZIhvcNAQELBQAwcjEMMAoGA1UEAwwDZGV2
MQswCQYDVQQGEwJVUzELMAkGA1UECAwCVE4xEjAQBgNVBAcMCUtub3h2aWxsZTEL
MAkGA1UECgwCaVgxDDAKBgNVBAsMA2RldjEZMBcGCSqGSIb3DQEJARYKZGV2QGl4
LmNvbTAeFw0yMjAxMjQxOTI0MTRaFw0yMzAyMjUxOTI0MTRaMHIxDDAKBgNVBAMM
A2RldjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlROMRIwEAYDVQQHDAlLbm94dmls
bGUxCzAJBgNVBAoMAmlYMQwwCgYDVQQLDANkZXYxGTAXBgkqhkiG9w0BCQEWCmRl
dkBpeC5jb20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDuy5kKf7eT
LOuxm1pn51kFLgJHD6k05pROjMOXEZel7CsmrDKEehSSdwDB/WUim3idOsImLrc+
ApXsnKwVY93f7yn1rfF4lgKsa3sb6oqAcPEobgTUqSmJ/OQVilUqOtj/dmFaEWIS
21eKNzaByNdpyOcoRF/+uDylEsE1Gj0GjkBneVRxyTZFV7LdVyDk38hljesnd8FX
gnD0DCdI3jBvqSYvd+GvQ2nQ2624HAmEQwfllqKi9PRDngeZIeiTQSWN+rybJbDY
yonRS0FPxJydt/sDlzi43qzHnrTqUbL+2RjYIqcOqeivNtDZ2joh+xqfRdKzACWu
QWrhGCL5+9bnqA6PEPA7GQ2jp00gDkjB7+HlQLI8ZCZcST6mkbfs/EaW00WYIcw5
lb+5oJ8oJqWebnQB21iwvPjvAv353iA1ApTJxBdo13x7oXBwWsrpxWk6SdL2Z5zU
NXrC9ZyaoeQ5uZ/oBXbCxJfhSkISyI5D8yeYLjmMxn+AvRBQpkRmVvcy3ls2SHGX
4XEJ4Q0wj3a0rPqmDZUwpWErbmf+N6D7J+uK8n3pcGlvkFIUaP60UQGp4gwnZA2O
dZdhVQ4whQHyjTmL7kRKl+gR/vTp+iPvKMfTO1HBQp97iK8IPM7Q2Gpe6U4n/Ll2
TDaZ9DroM83Vnc6cX69Th555SA9+gP6HWQIDAQABozswOTAYBgNVHREEETAPggdk
b21haW4xhwQICAgIMB0GA1UdDgQWBBSz0br/9U9mwYZfuRO1JmKTEorq1DANBgkq
hkiG9w0BAQsFAAOCAgEAK7nBNA+qjgvkmcSLQC+yXPOwb3o55D+N0J2QLxJFT4NV
b0GKf0dkz92Ew1pkKYzsH6lLlKRE23cye6EZLIwkkhhF0sTwYeu8HNy7VmkSDsp0
aKbqxgBzIJx+ztQGNgZ1fQMRjHCRLf8TaSAxnVXaXXUeU6fUBq2gHbYq6BfZkGmU
6f8DzL7uKHzcMEmWfC5KxfSskFFPOyaz/VGViQ0yffwH1NB+txDlU58rmu9w0wLe
cOrOjVUNg8axQen2Uejjj3IRmDC18ZfY7EqI8O1PizCtIcPSm+NnZYg/FvVj0KmM
o2QwGMd5QTU2J5lz988Xlofm/r3GBH32+ETqIcJolBw9bBkwruBvHpcmyLSFcFWK
sdGgi2gK2rGb+oKwzpHSeCtQVwgQth55qRH1DQGaAdpA1uTriOdcR96i65/jcz96
aD2B958hF1B/7I4Md+LFYhxgwREBhyQkU6saf7GR0Q+p4F8/oIkjhdLsyzk4YHyI
PVtK00W8zQMKF6zhHjfaF2uDRO/ycMKCq9NIqQJCZNqwNAo0r4FOmilwud/tzFY8
GQ9FXeQSqWo7hUIXdbej+aJ7DusYeuE/CwQFNUnz1khvIFJ5B7YP+gYCyUW7V2Hr
Mv+cZ473U8hYQ1Ij7pXi7DxsOWqWCDhyK0Yp6MZsw0rNaAIPHnTTxYdMfmIYHT0=
-----END CERTIFICATE-----
'''
),
None,
False
),
], ids=['valid_cert', 'invalid_cert'])
@pytest.mark.asyncio
async def test_normalize_certificate(cert, value, should_work):
middleware = Middleware()
app_schema_obj = AppSchemaService(middleware)
middleware['certificate.get_instance'] = lambda *args: cert
complete_config = {'ix_certificates': {value: cert}}
result = await app_schema_obj.normalize_certificate(Int('Cert'), value, complete_config, '')
if should_work:
assert result is not None
else:
assert result is None
| 5,765 | Python | .py | 98 | 48.183673 | 96 | 0.780113 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,430 | test_get_normalized_gpus.py | truenas_middleware/src/middlewared/middlewared/pytest/unit/plugins/apps/test_get_normalized_gpus.py | import pytest
from middlewared.plugins.apps.resources_utils import get_normalized_gpu_choices
@pytest.mark.parametrize('all_gpu_info, nvidia_gpus, should_work', [
(
[
{
'addr': {
'pci_slot': '0000:00:02.0',
'domain': '0000',
'bus': '00',
'slot': '02'
},
'description': 'Red Hat, Inc. QXL paravirtual graphic card',
'devices': [
{'pci_id': '8086:1237', 'pci_slot': '0000:00:00.0', 'vm_pci_slot': 'pci_0000_00_00_0'},
{'pci_id': '8086:7000', 'pci_slot': '0000:00:01.0', 'vm_pci_slot': 'pci_0000_00_01_0'},
{'pci_id': '8086:7010', 'pci_slot': '0000:00:01.1', 'vm_pci_slot': 'pci_0000_00_01_1'},
{'pci_id': '8086:7113', 'pci_slot': '0000:00:01.3', 'vm_pci_slot': 'pci_0000_00_01_3'},
],
'vendor': None,
'uses_system_critical_devices': True,
'critical_reason': 'Critical devices found: 0000:00:01.0',
'available_to_host': True
}
],
{},
True
),
(
[
{
'addr': {
'pci_slot': '0000:00:02.0',
'domain': '0000',
'bus': '00',
'slot': '02'
},
'description': 'Red Hat, Inc. QXL paravirtual graphic card',
'devices': [
{'pci_id': '8086:1237', 'pci_slot': '0000:00:00.0', 'vm_pci_slot': 'pci_0000_00_00_0'},
{'pci_id': '8086:7000', 'pci_slot': '0000:00:01.0', 'vm_pci_slot': 'pci_0000_00_01_0'},
{'pci_id': '8086:7010', 'pci_slot': '0000:00:01.1', 'vm_pci_slot': 'pci_0000_00_01_1'},
{'pci_id': '8086:7113', 'pci_slot': '0000:00:01.3', 'vm_pci_slot': 'pci_0000_00_01_3'},
],
'vendor': 'NVIDIA',
'uses_system_critical_devices': True,
'critical_reason': 'Critical devices found: 0000:00:01.0',
'available_to_host': True
}
],
{
'gpu_uuid': 112,
'model': 'A6000x2',
'description': "NVIDIA's A6000 GPU with 2 cores",
'pci_slot': 11111,
},
False
),
(
[
{
'addr': {
'pci_slot': '0000:00:02.0',
'domain': '0000',
'bus': '00',
'slot': '02'
},
'description': 'Red Hat, Inc. QXL paravirtual graphic card',
'devices': [
{'pci_id': '8086:1237', 'pci_slot': '0000:00:00.0', 'vm_pci_slot': 'pci_0000_00_00_0'},
{'pci_id': '8086:7000', 'pci_slot': '0000:00:01.0', 'vm_pci_slot': 'pci_0000_00_01_0'},
{'pci_id': '8086:7010', 'pci_slot': '0000:00:01.1', 'vm_pci_slot': 'pci_0000_00_01_1'},
{'pci_id': '8086:7113', 'pci_slot': '0000:00:01.3', 'vm_pci_slot': 'pci_0000_00_01_3'},
],
'vendor': 'NVIDIA',
'uses_system_critical_devices': True,
'critical_reason': 'Critical devices found: 0000:00:01.0',
'available_to_host': True
}
],
{
'model': 'A6000x2',
'description': "NVIDIA's A6000 GPU with 2 cores",
'0000:00:02.0': {
'gpu_uuid': '112',
},
},
True
),
(
[
{
'addr': {
'pci_slot': '0000:00:02.0',
'domain': '0000',
'bus': '00',
'slot': '02'
},
'description': 'Red Hat, Inc. QXL paravirtual graphic card',
'devices': [
{'pci_id': '8086:1237', 'pci_slot': '0000:00:00.0', 'vm_pci_slot': 'pci_0000_00_00_0'},
{'pci_id': '8086:7000', 'pci_slot': '0000:00:01.0', 'vm_pci_slot': 'pci_0000_00_01_0'},
{'pci_id': '8086:7010', 'pci_slot': '0000:00:01.1', 'vm_pci_slot': 'pci_0000_00_01_1'},
{'pci_id': '8086:7113', 'pci_slot': '0000:00:01.3', 'vm_pci_slot': 'pci_0000_00_01_3'},
],
'vendor': 'NVIDIA',
'uses_system_critical_devices': True,
'critical_reason': 'Critical devices found: 0000:00:01.0',
'available_to_host': True
}
],
{
'model': 'A6000x2',
'description': "NVIDIA's A6000 GPU with 2 cores",
'0000:00:02.0': {},
},
False
),
(
[
{
'addr': {
'pci_slot': '0000:00:02.0',
'domain': '0000',
'bus': '00',
'slot': '02'
},
'description': 'Red Hat, Inc. QXL paravirtual graphic card',
'devices': [
{'pci_id': '8086:1237', 'pci_slot': '0000:00:00.0', 'vm_pci_slot': 'pci_0000_00_00_0'},
{'pci_id': '8086:7000', 'pci_slot': '0000:00:01.0', 'vm_pci_slot': 'pci_0000_00_01_0'},
{'pci_id': '8086:7010', 'pci_slot': '0000:00:01.1', 'vm_pci_slot': 'pci_0000_00_01_1'},
{'pci_id': '8086:7113', 'pci_slot': '0000:00:01.3', 'vm_pci_slot': 'pci_0000_00_01_3'},
],
'vendor': 'NVIDIA',
'uses_system_critical_devices': True,
'critical_reason': 'Critical devices found: 0000:00:01.0',
'available_to_host': True
}
],
{
'model': 'A6000x2',
'description': "NVIDIA's A6000 GPU with 2 cores",
'0000:00:02.0': {
'gpu_uuid': '1125?as'
},
},
False
),
(
[
{
'addr': {
'pci_slot': '0000:00:02.0',
'domain': '0000',
'bus': '00',
'slot': '02'
},
'description': 'Red Hat, Inc. QXL paravirtual graphic card',
'devices': [
{'pci_id': '8086:1237', 'pci_slot': '0000:00:00.0', 'vm_pci_slot': 'pci_0000_00_00_0'},
{'pci_id': '8086:7000', 'pci_slot': '0000:00:01.0', 'vm_pci_slot': 'pci_0000_00_01_0'},
{'pci_id': '8086:7010', 'pci_slot': '0000:00:01.1', 'vm_pci_slot': 'pci_0000_00_01_1'},
{'pci_id': '8086:7113', 'pci_slot': '0000:00:01.3', 'vm_pci_slot': 'pci_0000_00_01_3'},
],
'vendor': None,
'uses_system_critical_devices': True,
'critical_reason': 'Critical devices found: 0000:00:01.0',
'available_to_host': False
}
],
{},
False
),
])
def test_get_normalized_gpus(all_gpu_info, nvidia_gpus, should_work):
result = get_normalized_gpu_choices(all_gpu_info, nvidia_gpus)
if should_work:
assert result[0]['error'] is None
else:
assert result[0]['error'] is not None
| 7,376 | Python | .py | 181 | 26.033149 | 107 | 0.412542 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,431 | test_device_info.py | truenas_middleware/src/middlewared/middlewared/pytest/unit/plugins/device/test_device_info.py | from unittest.mock import Mock
import pytest
from middlewared.plugins.device_.device_info import DeviceService
@pytest.mark.parametrize("host_type,disk_data,get_rotation_rate,result", [
(None, {"rota": True}, Mock(return_value=7200), ("HDD", 7200)),
(None, {"rota": True}, Mock(return_value=None), ("SSD", None)),
(None, {"rota": False}, Mock(side_effect=RuntimeError()), ("SSD", None)),
("QEMU", {"rota": True}, Mock(side_effect=RuntimeError()), ("HDD", None)),
])
def test_get_type_and_rotation_rate(host_type, disk_data, get_rotation_rate, result):
d = DeviceService(None)
d.HOST_TYPE = host_type
d._get_rotation_rate = get_rotation_rate
assert d._get_type_and_rotation_rate(disk_data, None) == result
| 741 | Python | .py | 14 | 49.357143 | 85 | 0.69018 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,432 | test_audit_utils.py | truenas_middleware/src/middlewared/middlewared/pytest/unit/plugins/audit/test_audit_utils.py | import pytest
from middlewared.utils import filter_list
from middlewared.plugins.audit.utils import (
AUDITED_SERVICES,
parse_query_filters,
requires_python_filtering,
SQL_SAFE_FIELDS,
)
def test_service_filter_equal():
services = [s[0] for s in AUDITED_SERVICES]
to_check, filters = parse_query_filters(services, [['service', '=', 'SMB']], True)
assert len(to_check) == 1
assert to_check == {'SMB'}
def test_service_filter_in():
services = [s[0] for s in AUDITED_SERVICES]
to_check, filters = parse_query_filters(services, [['service', 'in', ['SMB']]], True)
assert len(to_check) == 1
assert to_check == {'SMB'}
def test_service_filter_not_equal():
""" Test that direct match properly excludes service """
services = [s[0] for s in AUDITED_SERVICES]
to_check, filters = parse_query_filters(services, [['service', '!=', 'SMB']], True)
assert len(to_check) == len(services) - 1
assert 'SMB' not in to_check
def test_service_filter_not_in():
""" Test that not-in filter properly excludes service """
services = [s[0] for s in AUDITED_SERVICES]
to_check, filters = parse_query_filters(services, [['service', 'nin', ['SMB']]], True)
assert len(to_check) == len(services) - 1
assert 'SMB' not in to_check
def test_no_services():
""" Test that all services being excluded results in empty `to_check` """
services = [s[0] for s in AUDITED_SERVICES]
to_check, filters = parse_query_filters(services, [['service', 'nin', services]], True)
assert len(to_check) == 0
def test_query_filters_supported():
""" Test that large filters containing only supported keys will get passed to SQL """
services = [s[0] for s in AUDITED_SERVICES]
filters = [[key, "=", services] for key in SQL_SAFE_FIELDS]
to_check, filters_out = parse_query_filters(services, filters, False)
assert len(to_check) == len(services)
assert len(filters_out) == len(filters)
def test_query_filters_disjunction():
""" Test that filters involing disjunction won't be passed to SQL """
services = [s[0] for s in AUDITED_SERVICES]
bad_filter = ['OR', ['username', '=', 'Bob'], ['username', '=', 'mary']]
good_filter = ['event', '=', 'CONNECT']
to_check, filters_out = parse_query_filters(services, [bad_filter, good_filter], False)
# verify OR is excluded
assert len(filters_out) == 1
assert filters_out == [good_filter]
def test_query_filters_json():
""" Test that excluded fields won't be passed to SQL """
services = [s[0] for s in AUDITED_SERVICES]
bad_filter = ['event_data', '=', {'result': 'canary'}]
good_filter = ['event', '=', 'CONNECT']
to_check, filters_out = parse_query_filters(services, [bad_filter, good_filter], False)
assert len(filters_out) == 1
assert filters_out == [good_filter]
def test_requires_python_filtering_filter_mismatch():
""" test that mismatch between filtersets results in rejection """
services = [s[0] for s in AUDITED_SERVICES]
result = requires_python_filtering(services, [['event_data.result', '=', 'canary']], [], {})
assert result is True
def test_requires_python_filtering_select_subkey():
""" test that selecting for subkey in JSON object results in rejection """
services = [s[0] for s in AUDITED_SERVICES]
result = requires_python_filtering(services, [], [], {'select': ['event_data.result']})
assert result is True
@pytest.mark.parametrize('services,options,expected', [
([s[0] for s in AUDITED_SERVICES], {'offset': 1}, True),
([s[0] for s in AUDITED_SERVICES], {'limit': 1}, True),
(['SMB'], {'offset': 1}, False),
(['SMB'], {'limit': 1}, False),
])
def test_requires_python_filtering_options(services, options, expected):
""" test that selecting for subkey in JSON object results in rejection """
result = requires_python_filtering(services, [], [], options)
assert result is expected
| 3,961 | Python | .py | 79 | 45.658228 | 96 | 0.669001 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,433 | test_recordsize_choices.py | truenas_middleware/src/middlewared/middlewared/pytest/unit/plugins/pool/test_recordsize_choices.py | import io
from unittest.mock import patch
from middlewared.plugins.pool_.dataset_recordsize import PoolDatasetService
def test_recordsize_choices():
with patch("middlewared.plugins.pool_.dataset_recordsize.open") as mock:
mock.return_value = io.StringIO("32768\n")
assert PoolDatasetService(None).recordsize_choices() == ["512", "512B", "1K", "2K", "4K", "8K", "16K", "32K"]
| 398 | Python | .py | 7 | 52.571429 | 117 | 0.724227 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,434 | test_unlock.py | truenas_middleware/src/middlewared/middlewared/pytest/unit/plugins/pool/test_unlock.py | from unittest.mock import Mock
import pytest
from middlewared.plugins.pool_.dataset_encryption_lock import PoolDatasetService
@pytest.mark.parametrize("request_datasets,keys_supplied,queried_datasets,result", [
(
[{"name": "tank/test", "recursive": True}, {"name": "tank/test/child", "recursive": True},
{"name": "tank/test/child/grandchild", "recursive": False}],
{"tank/test": "test-key", "tank/test/child": "child-key", "tank/test/child/grandchild": "grandchild-key"},
["tank/test", "tank/test/another-child", "tank/test/child", "tank/test/child/grandchild",
"tank/test/child/grandchild/grandgrandchild"],
{
"tank/test": "test-key",
"tank/test/another-child": "test-key",
"tank/test/child": "child-key",
"tank/test/child/grandchild": "grandchild-key",
"tank/test/child/grandchild/grandgrandchild": "child-key",
}
)
])
def test_assign_supplied_recursive_keys(request_datasets, keys_supplied, queried_datasets, result):
PoolDatasetService(Mock())._assign_supplied_recursive_keys(request_datasets, keys_supplied, queried_datasets)
assert keys_supplied == result
| 1,200 | Python | .py | 22 | 47.272727 | 114 | 0.672913 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,435 | test_schedule.py | truenas_middleware/src/middlewared/middlewared/pytest/unit/plugins/smart/test_schedule.py | from middlewared.plugins.smart_.schedule import smartd_schedule_piece
def test__smartd_schedule_piece__every_day_of_week():
assert smartd_schedule_piece("1,2,3,4,5,6,7", 1, 7) == "."
def test__smartd_schedule_piece__every_day_of_week_wildcard():
assert smartd_schedule_piece("*", 1, 7) == "."
def test__smartd_schedule_piece__specific_day_of_week():
assert smartd_schedule_piece("1,2,3", 1, 7) == "(1|2|3)"
def test__smartd_schedule_piece__every_month():
assert smartd_schedule_piece("1,2,3,4,5,6,7,8,9,10,11,12", 1, 12) == ".."
def test__smartd_schedule_piece__each_month_wildcard():
assert smartd_schedule_piece("*", 1, 12) == ".."
def test__smartd_schedule_piece__each_month():
assert smartd_schedule_piece("*/1", 1, 12) == ".."
def test__smartd_schedule_piece__every_fifth_month():
assert smartd_schedule_piece("*/5", 1, 12) == "(05|10)"
def test__smartd_schedule_piece__every_specific_month():
assert smartd_schedule_piece("1,5,11", 1, 12) == "(01|05|11)"
def test__smartd_schedule_piece__at_midnight():
assert smartd_schedule_piece("0", 1, 23) == "(00)"
def test__smartd_schedule_piece__range_with_divisor():
assert smartd_schedule_piece("3-30/10", 1, 31) == "(10|20|30)"
def test__smartd_schedule_piece__range_without_divisor():
assert smartd_schedule_piece("10-15", 1, 31) == "(10|11|12|13|14|15)"
def test__smartd_schedule_piece__malformed_range_without_divisor():
assert smartd_schedule_piece("10-1", 1, 31) == "(10)"
| 1,500 | Python | .py | 25 | 56.12 | 77 | 0.674707 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,436 | test_crud.py | truenas_middleware/src/middlewared/middlewared/pytest/unit/plugins/smart/test_crud.py | import pytest
from middlewared.plugins.smart import smart_test_schedules_intersect_at
DEFAULTS = {"hour": "*", "month": "*", "dom": "*", "dow": "*"}
@pytest.mark.parametrize("a,b,result", [
({"hour": "1"}, {"hour": "2"}, None),
({"hour": "*"}, {"hour": "2"}, "02:00"),
({"hour": "*/3"}, {"hour": "1,7,15,16,21"}, "15:00"),
({"dom": "1,3,5", "hour": "2"}, {"dom": "2,4,6", "hour": "2"}, None),
({"dom": "1,3,6", "hour": "2"}, {"dom": "2,4,6", "hour": "2"}, "Day 6th of every month, 02:00"),
({"dom": "1,3,6", "month": "*/2"}, {"dom": "2,4,6", "month": "1"}, None),
({"dom": "1,3,6", "month": "*/2"}, {"dom": "2,4,6", "month": "3,8"}, "Aug, 6th, 00:00"),
({"hour": "2"}, {"dow": "4", "hour": "2"}, "Thu, 02:00"),
({"dow": "0"}, {"dow": "0"}, "Sun, 00:00"),
({"dow": "0"}, {"dow": "7"}, "Sun, 00:00"),
])
def test__smart_test_schedules_intersect_at(a, b, result):
assert smart_test_schedules_intersect_at({**DEFAULTS, **a}, {**DEFAULTS, **b}) == result
| 1,000 | Python | .py | 17 | 54.882353 | 100 | 0.471853 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,437 | test_vm_devices_xml.py | truenas_middleware/src/middlewared/middlewared/pytest/unit/plugins/vm/test_vm_devices_xml.py | import pytest
from unittest.mock import patch
from xml.etree import ElementTree as etree
from middlewared.plugins.vm.devices import CDROM, DISK, NIC, RAW, DISPLAY
from middlewared.plugins.vm.supervisor.domain_xml import devices_xml
from middlewared.pytest.unit.middleware import Middleware
GUEST_CHANEL = '<channel type="unix"><target type="virtio" name="org.qemu.guest_agent.0" /></channel>'
@pytest.mark.parametrize('vm_data,expected_xml', [
({'ensure_display_device': False, 'trusted_platform_module': False, 'devices': [], 'min_memory': None},
f'<devices>{GUEST_CHANEL}<serial type="pty" /></devices>'),
({'ensure_display_device': True, 'trusted_platform_module': False, 'devices': [], 'min_memory': None},
f'<devices><video />{GUEST_CHANEL}<serial type="pty" /></devices>'),
])
def test_basic_devices_xml(vm_data, expected_xml):
assert etree.tostring(devices_xml(vm_data, {'devices': []})).decode().strip() == expected_xml
@pytest.mark.parametrize('vm_data,expected_xml', [
({'ensure_display_device': False, 'trusted_platform_module': False, 'min_memory': None, 'devices': [{
'attributes': {'path': '/mnt/tank/disk.iso'},
'dtype': 'CDROM',
}]}, '<devices><disk type="file" device="cdrom"><driver name="qemu" type="raw" />'
'<source file="/mnt/tank/disk.iso" /><target dev="sda" bus="sata" /><boot order="1" />'
f'</disk>{GUEST_CHANEL}<serial type="pty" /></devices>'
),
])
def test_cdrom_xml(vm_data, expected_xml):
m = Middleware()
with patch('middlewared.plugins.vm.devices.cdrom.CDROM.is_available') as mock:
mock.return_value = True
assert etree.tostring(devices_xml(
vm_data, {'devices': [CDROM(device, m) for device in vm_data['devices']]})
).decode().strip() == expected_xml
@pytest.mark.parametrize('vm_data,expected_xml', [
({'ensure_display_device': False, 'trusted_platform_module': False, 'min_memory': None, 'devices': [{
'dtype': 'DISPLAY',
'attributes': {
'bind': '0.0.0.0',
'password': '',
'web': True,
'type': 'SPICE',
'resolution': '1024x768',
'port': 5912,
'web_port': 5913,
'wait': False,
},
}]}, '<devices><graphics type="spice" port="5912"><listen type="address" address="0.0.0.0" /></graphics>'
'<controller type="usb" model="nec-xhci" /><input type="tablet" bus="usb" /><video>'
'<model type="qxl"><resolution x="1024" y="768" /></model></video><channel type="spicevmc">'
f'<target type="virtio" name="com.redhat.spice.0" /></channel>{GUEST_CHANEL}<serial type="pty" /></devices>'
),
])
def test_display_xml(vm_data, expected_xml):
m = Middleware()
with patch('middlewared.plugins.vm.devices.display.DISPLAY.is_available') as mock:
mock.return_value = True
assert etree.tostring(devices_xml(
vm_data, {'devices': [DISPLAY(device, m) for device in vm_data['devices']]})
).decode().strip() == expected_xml
@pytest.mark.parametrize('vm_data,expected_xml', [
({'ensure_display_device': False, 'trusted_platform_module': False, 'min_memory': None, 'devices': [{
'attributes': {
'type': 'VIRTIO',
'mac': '00:a0:99:7e:bb:8a',
'nic_attach': 'br0',
'trust_guest_rx_filters': False
},
'dtype': 'NIC',
}]}, '<devices><interface type="bridge"><source bridge="br0" /><model type="virtio" />'
'<mac address="00:a0:99:7e:bb:8a" /></interface>'
f'{GUEST_CHANEL}<serial type="pty" /></devices>'
),
({'ensure_display_device': False, 'trusted_platform_module': False, 'min_memory': None, 'devices': [{
'attributes': {
'type': 'VIRTIO',
'mac': '00:a0:99:7e:bb:8a',
'nic_attach': 'ens3',
'trust_guest_rx_filters': False
},
'dtype': 'NIC',
}]}, '<devices><interface type="direct" trustGuestRxFilters="no"><source dev="ens3" mode="bridge" />'
'<model type="virtio" /><mac address="00:a0:99:7e:bb:8a" /></interface>'
f'{GUEST_CHANEL}<serial type="pty" /></devices>'
),
({'ensure_display_device': False, 'trusted_platform_module': False, 'min_memory': None, 'devices': [{
'attributes': {
'type': 'VIRTIO',
'mac': '00:a0:99:7e:bb:8a',
'nic_attach': 'ens3',
'trust_guest_rx_filters': True
},
'dtype': 'NIC',
}]}, '<devices><interface type="direct" trustGuestRxFilters="yes"><source dev="ens3" mode="bridge" />'
'<model type="virtio" /><mac address="00:a0:99:7e:bb:8a" /></interface>'
f'{GUEST_CHANEL}<serial type="pty" /></devices>'
),
])
@patch('middlewared.plugins.vm.devices.nic.NIC.is_available', lambda *args: True)
def test_nic_xml(vm_data, expected_xml):
def setup_nic_attach(self):
self.nic_attach = vm_data['devices'][0]['attributes']['nic_attach']
m = Middleware()
with patch('middlewared.plugins.vm.devices.nic.NIC.setup_nic_attach', setup_nic_attach):
assert etree.tostring(devices_xml(
vm_data, {'devices': [NIC(device, m) for device in vm_data['devices']]})
).decode().strip() == expected_xml
@pytest.mark.parametrize('vm_data,expected_xml', [
({'ensure_display_device': False, 'trusted_platform_module': False, 'min_memory': None, 'devices': [{
'attributes': {
'path': '/dev/zvol/pool/boot_1',
'type': 'AHCI',
'logical_sectorsize': None,
'physical_sectorsize': None,
'iotype': 'THREADS',
},
'dtype': 'DISK',
}]}, '<devices><disk type="block" device="disk"><driver name="qemu" type="raw" cache="none" io="threads" discard="unmap" />'
'<source dev="/dev/zvol/pool/boot_1" /><target bus="sata" dev="sda" /><boot order="1" />'
f'</disk>{GUEST_CHANEL}<serial type="pty" /></devices>'
),
])
def test_disk_xml(vm_data, expected_xml):
m = Middleware()
with patch('middlewared.plugins.vm.devices.storage_devices.DISK.is_available') as mock:
mock.return_value = True
assert etree.tostring(devices_xml(
vm_data, {'devices': [DISK(device, m) for device in vm_data['devices']]})
).decode().strip() == expected_xml
@pytest.mark.parametrize('vm_data,expected_xml', [
({'ensure_display_device': False, 'trusted_platform_module': False, 'min_memory': None, 'devices': [{
'attributes': {
'path': '/mnt/tank/somefile',
'type': 'AHCI',
'logical_sectorsize': None,
'physical_sectorsize': None,
'iotype': 'THREADS',
},
'dtype': 'RAW',
}]}, '<devices><disk type="file" device="disk"><driver name="qemu" type="raw" cache="none" io="threads" discard="unmap" />'
'<source file="/mnt/tank/somefile" /><target bus="sata" dev="sda" /><boot order="1" />'
f'</disk>{GUEST_CHANEL}<serial type="pty" /></devices>'
),
({'ensure_display_device': False, 'trusted_platform_module': False, 'min_memory': None, 'devices': [{
'attributes': {
'path': '/mnt/tank/somefile',
'type': 'AHCI',
'logical_sectorsize': 512,
'physical_sectorsize': 512,
'iotype': 'THREADS',
},
'dtype': 'RAW',
}]}, '<devices><disk type="file" device="disk"><driver name="qemu" type="raw" cache="none" io="threads" discard="unmap" />'
'<source file="/mnt/tank/somefile" /><target bus="sata" dev="sda" /><boot order="1" />'
'<blockio logical_block_size="512" physical_block_size="512" /></disk>'
f'{GUEST_CHANEL}<serial type="pty" /></devices>'
),
])
def test_raw_xml(vm_data, expected_xml):
m = Middleware()
with patch('middlewared.plugins.vm.devices.storage_devices.RAW.is_available') as mock:
mock.return_value = True
assert etree.tostring(devices_xml(
vm_data, {'devices': [RAW(device, m) for device in vm_data['devices']]})
).decode().strip() == expected_xml
| 8,137 | Python | .py | 162 | 42.283951 | 128 | 0.594747 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,438 | test_attachments.py | truenas_middleware/src/middlewared/middlewared/pytest/unit/plugins/vm/test_attachments.py | import pytest
from middlewared.plugins.vm.attachments import determine_recursive_search
@pytest.mark.parametrize('recursive,device,child_datasets,result', [
(True, {'attributes': {'path': '/mnt/tank/somefile'}, 'dtype': 'CDROM'}, ['tank/child'], True),
(False, {'attributes': {'path': '/dev/zvol/tank/somezvol'}, 'dtype': 'DISK'}, ['tank/child'], False),
(False, {'attributes': {'path': '/mnt/tank/child/file'}, 'dtype': 'RAW'}, ['tank/child'], False),
(False, {'attributes': {'path': '/mnt/tank/file'}, 'dtype': 'RAW'}, ['tank/child'], True),
])
@pytest.mark.asyncio
async def test_determining_recursive_search(recursive, device, child_datasets, result):
assert await determine_recursive_search(recursive, device, child_datasets) is result
| 764 | Python | .py | 11 | 66.363636 | 105 | 0.689333 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,439 | test_vm_gpu_pci_choices.py | truenas_middleware/src/middlewared/middlewared/pytest/unit/plugins/vm/test_vm_gpu_pci_choices.py | from unittest.mock import Mock, patch
from middlewared.pytest.unit.helpers import load_compound_service
from middlewared.pytest.unit.middleware import Middleware
VMDeviceService = load_compound_service('vm.device')
AVAILABLE_GPUs = [
{
'addr': {
'pci_slot': '0000:16:0e.0',
'domain': '0000',
'bus': '16',
'slot': '0e'
},
'description': 'Red Hat, Inc. Virtio 1.0 GPU',
'devices': [
{
'pci_id': '8086:29C0',
'pci_slot': '0000:16:0e.0',
'vm_pci_slot': 'pci_0000_16_0e_0'
},
{
'pci_id': '1AF4:1050',
'pci_slot': '0000:16:0e.2',
'vm_pci_slot': 'pci_0000_16_0e_2'
},
],
'vendor': None,
'uses_system_critical_devices': False,
'available_to_host': True
},
{
'addr': {
'pci_slot': '0000:17:0e.0',
'domain': '0000',
'bus': '17',
'slot': '0e'
},
'description': 'Red Hat, Inc. Virtio 1.0 GPU',
'devices': [
{
'pci_id': '8086:29C0',
'pci_slot': '0000:17:0e.0',
'vm_pci_slot': 'pci_0000_17_0e_0'
},
{
'pci_id': '1AF4:1050',
'pci_slot': '0000:17:0e.2',
'vm_pci_slot': 'pci_0000_17_0e_2'
},
],
'vendor': None,
'uses_system_critical_devices': False,
'available_to_host': False
},
{
'addr': {
'pci_slot': '0000:18:0e.0',
'domain': '0000',
'bus': '18',
'slot': '0e'
},
'description': 'Red Hat, Inc. Virtio 1.0 GPU',
'devices': [
{
'pci_id': '8086:29C0',
'pci_slot': '0000:18:0e.0',
'vm_pci_slot': 'pci_0000_18_0e_0'
},
{
'pci_id': '1AF4:1050',
'pci_slot': '0000:18:0e.2',
'vm_pci_slot': 'pci_0000_18_0e_2'
},
],
'vendor': None,
'uses_system_critical_devices': True,
'available_to_host': True
}
]
IOMMU_GROUPS = {
'0000:16:0e.2': {
'number': 45,
'addresses': [
{
'domain': '0x0000',
'bus': '0x16',
'slot': '0x0e',
'function': '0x2'
},
{
'domain': '0x0000',
'bus': '0x16',
'slot': '0x0e',
'function': '0x0'
}
]
},
'0000:16:0e.0': {
'number': 45,
'addresses': [
{
'domain': '0x0000',
'bus': '0x16',
'slot': '0x0e',
'function': '0x2'
},
{
'domain': '0x0000',
'bus': '0x16',
'slot': '0x0e',
'function': '0x0'
}
]
},
'0000:17:0e.2': {
'number': 46,
'addresses': [
{
'domain': '0x0000',
'bus': '0x17',
'slot': '0x0e',
'function': '0x2'
},
{
'domain': '0x0000',
'bus': '0x17',
'slot': '0x0e',
'function': '0x0'
}
]
},
'0000:17:0e.0': {
'number': 46,
'addresses': [
{
'domain': '0x0000',
'bus': '0x17',
'slot': '0x0e',
'function': '0x2'
},
{
'domain': '0x0000',
'bus': '0x17',
'slot': '0x0e',
'function': '0x0'
}
]
},
'0000:18:0e.2': {
'number': 47,
'addresses': [
{
'domain': '0x0000',
'bus': '0x18',
'slot': '0x0e',
'function': '0x2'
},
{
'domain': '0x0000',
'bus': '0x18',
'slot': '0x0e',
'function': '0x0'
}
]
},
'0000:18:0e.0': {
'number': 47,
'addresses': [
{
'domain': '0x0000',
'bus': '0x18',
'slot': '0x0e',
'function': '0x2'
},
{
'domain': '0x0000',
'bus': '0x18',
'slot': '0x0e',
'function': '0x0'
}
]
},
'0000:b2:0f.0': {
'number': 83,
'addresses': [
{
'domain': '0x0000',
'bus': '0xb2',
'slot': '0x0f',
'function': '0x0'
}
]
},
'0000:00:04.0': {
'number': 17,
'addresses': [
{
'domain': '0x0000',
'bus': '0x00',
'slot': '0x04',
'function': '0x0'
}
]
},
}
def test_get_pci_ids_for_gpu_isolation():
with patch('middlewared.plugins.vm.pci.get_iommu_groups_info', Mock(return_value=IOMMU_GROUPS)):
with patch('middlewared.plugins.vm.pci.get_gpus', Mock(return_value=AVAILABLE_GPUs)):
assert set(VMDeviceService(Middleware()).get_pci_ids_for_gpu_isolation('0000:16:0e.0')) == {
'pci_0000_16_0e_0', 'pci_0000_16_0e_2'
}
| 5,637 | Python | .py | 210 | 14.709524 | 104 | 0.354916 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,440 | test_pci_device_iommu_groups.py | truenas_middleware/src/middlewared/middlewared/pytest/unit/plugins/vm/test_pci_device_iommu_groups.py | from pathlib import PosixPath
from unittest.mock import Mock, patch
from middlewared.utils.iommu import get_iommu_groups_info
DEVICES_PATH = [
PosixPath('/sys/kernel/iommu_groups/55/devices/0000:64:0a.1'),
PosixPath('/sys/kernel/iommu_groups/83/devices/0000:b2:0f.0'),
PosixPath('/sys/kernel/iommu_groups/17/devices/0000:00:04.0'),
PosixPath('/sys/kernel/iommu_groups/45/devices/0000:16:0e.2'),
PosixPath('/sys/kernel/iommu_groups/45/devices/0000:16:0e.0'),
PosixPath('/sys/kernel/iommu_groups/45a/devices/0000:16:0e.7'),
PosixPath('/sys/kernel/iommu_groups/45/devices/test_file')
]
IOMMU_GROUPS = {
'0000:64:0a.1': {
'number': 55,
'addresses': [
{
'domain': '0x0000',
'bus': '0x64',
'slot': '0x0a',
'function': '0x1'
}
]
},
'0000:b2:0f.0': {
'number': 83,
'addresses': [
{
'domain': '0x0000',
'bus': '0xb2',
'slot': '0x0f',
'function': '0x0'
}
]
},
'0000:00:04.0': {
'number': 17,
'addresses': [
{
'domain': '0x0000',
'bus': '0x00',
'slot': '0x04',
'function': '0x0'
}
]
},
'0000:16:0e.2': {
'number': 45,
'addresses': [
{
'domain': '0x0000',
'bus': '0x16',
'slot': '0x0e',
'function': '0x2'
},
{
'domain': '0x0000',
'bus': '0x16',
'slot': '0x0e',
'function': '0x0'
}
]
},
'0000:16:0e.0': {
'number': 45,
'addresses': [
{
'domain': '0x0000',
'bus': '0x16',
'slot': '0x0e',
'function': '0x2'
},
{
'domain': '0x0000',
'bus': '0x16',
'slot': '0x0e',
'function': '0x0'
}
]
},
}
def test_iommu_groups():
with patch('middlewared.utils.iommu.pathlib.PosixPath.is_dir', Mock(return_value=True)):
with patch('middlewared.utils.iommu.pathlib.Path.glob', Mock(return_value=DEVICES_PATH)):
assert get_iommu_groups_info() == IOMMU_GROUPS
| 2,459 | Python | .py | 85 | 18.129412 | 97 | 0.441959 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,441 | test_passthrough_choices.py | truenas_middleware/src/middlewared/middlewared/pytest/unit/plugins/vm/test_passthrough_choices.py | from unittest.mock import Mock
import pytest
from middlewared.plugins.vm.pci import VMDeviceService
from middlewared.pytest.unit.middleware import Middleware
@pytest.mark.parametrize('pcidevs,results', [
(
Mock(
sys_name='0000:00:00.0',
attributes={'class': b'0x060000'},
properties={
'ID_PCI_SUBCLASS_FROM_DATABASE': 'Host bridge',
'DRIVER': None,
'ID_MODEL_FROM_DATABASE': 'Sky Lake-E DMI3 Registers',
'ID_VENDOR_FROM_DATABASE': 'Intel Corporation',
}
),
{
'capability': {
'class': '0x060000',
'domain': '0',
'bus': '0',
'slot': '0',
'function': '0',
'product': 'Sky Lake-E DMI3 Registers',
'vendor': 'Intel Corporation'
},
'controller_type': 'Host bridge',
'critical': False,
'iommu_group': {
'number': 27,
'addresses': [
{
'domain': '0x0000',
'bus': '0x00',
'slot': '0x00',
'function': '0x0'
}
]
},
'available': True,
'drivers': [],
'error': None,
'device_path': '/sys/bus/pci/devices/0000:00:00.0',
'reset_mechanism_defined': False,
'description': "0000:00:00.0 'Host bridge': Sky Lake-E DMI3 Registers by 'Intel Corporation'"
}
),
(
Mock(
sys_name='0000:00:04.0',
attributes={'class': b'0x088000'},
properties={
'ID_PCI_SUBCLASS_FROM_DATABASE': 'System peripheral',
'DRIVER': 'ioatdma',
'ID_MODEL_FROM_DATABASE': 'Sky Lake-E CBDMA Registers',
'ID_VENDOR_FROM_DATABASE': 'Intel Corporation',
}
),
{
'capability': {
'class': '0x088000',
'domain': '0',
'bus': '0',
'slot': '4',
'function': '0',
'product': 'Sky Lake-E CBDMA Registers',
'vendor': 'Intel Corporation'
},
'controller_type': 'System peripheral',
'critical': False,
'iommu_group': {
'number': 28,
'addresses': [
{
'domain': '0x0000',
'bus': '0x00',
'slot': '0x04',
'function': '0x0'
}
]
},
'available': False,
'drivers': ['ioatdma'],
'error': None,
'device_path': '/sys/bus/pci/devices/0000:00:04.0',
'reset_mechanism_defined': False,
'description': "0000:00:04.0 'System peripheral': Sky Lake-E CBDMA Registers by 'Intel Corporation'"
}
),
])
def test__get_pci_device_details(pcidevs, results):
iommu_info = {
'0000:00:00.0': {
'number': 27,
'addresses': [
{
'domain': '0x0000',
'bus': '0x00',
'slot': '0x00',
'function': '0x0'
}
],
},
'0000:00:04.0': {
'number': 28,
'addresses': [
{
'domain': '0x0000',
'bus': '0x00',
'slot': '0x04',
'function': '0x0'
}
],
}
}
assert VMDeviceService(Middleware()).get_pci_device_details(pcidevs, iommu_info) == results
| 3,849 | Python | .py | 116 | 19.008621 | 112 | 0.408152 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,442 | test_vm_libvirt_xml.py | truenas_middleware/src/middlewared/middlewared/pytest/unit/plugins/vm/test_vm_libvirt_xml.py | import pytest
from xml.etree import ElementTree as etree
from middlewared.plugins.vm.supervisor.domain_xml import clock_xml, commandline_xml, cpu_xml, features_xml
@pytest.mark.parametrize('vm_data,expected_xml', [
({'hyperv_enlightenments': False, 'time': 'LOCAL'}, '<clock offset="localtime" />'),
({'hyperv_enlightenments': True, 'time': 'LOCAL'},
'<clock offset="localtime"><timer name="hypervclock" present="yes" /></clock>'),
({'hyperv_enlightenments': True, 'time': 'UTC'},
'<clock offset="utc"><timer name="hypervclock" present="yes" /></clock>'),
])
def test_clock_xml(vm_data, expected_xml):
assert etree.tostring(clock_xml(vm_data)).decode().strip() == expected_xml
@pytest.mark.parametrize('vm_data,expected_xml', [
({'command_line_args': ''}, '<commandline xmlns="http://libvirt.org/schemas/domain/qemu/1.0" />'),
({'command_line_args': '-set group.id.arg=value'},
'<commandline xmlns="http://libvirt.org/schemas/domain/qemu/1.0"><arg value="-set" />'
'<arg value="group.id.arg=value" /></commandline>'),
])
def test_command_line_xml(vm_data, expected_xml):
assert etree.tostring(commandline_xml(vm_data)).decode().strip() == expected_xml
@pytest.mark.parametrize('vm_data,context,expected_xml', [
({
'cpu_mode': 'CUSTOM',
'vcpus': 1,
'cores': 2,
'threads': 3,
'cpu_model': None,
'cpuset': None,
'pin_vcpus': False,
'nodeset': None,
'enable_cpu_topology_extension': False
}, {'cpu_model_choices': {}}, [
'<cpu mode="custom"><topology sockets="1" cores="2" threads="3" /></cpu>',
'<vcpu>6</vcpu>',
]),
({
'cpu_mode': 'HOST-PASSTHROUGH',
'vcpus': 1,
'cores': 2,
'threads': 3,
'cpu_model': None,
'cpuset': None,
'pin_vcpus': False,
'nodeset': None,
'enable_cpu_topology_extension': False
}, {'cpu_model_choices': {}}, [
'<cpu mode="host-passthrough"><topology sockets="1" cores="2" threads="3" /><cache mode="passthrough" /></cpu>',
'<vcpu>6</vcpu>',
]),
({
'cpu_mode': 'HOST-PASSTHROUGH',
'vcpus': 1,
'cores': 2,
'threads': 3,
'cpu_model': None,
'cpuset': None,
'pin_vcpus': False,
'nodeset': None,
'enable_cpu_topology_extension': True
}, {'cpu_model_choices': {}}, [
'<cpu mode="host-passthrough"><topology sockets="1" cores="2" threads="3" />'
'<cache mode="passthrough" /><feature policy="require" name="topoext" /></cpu>',
'<vcpu>6</vcpu>',
]),
({
'cpu_mode': 'CUSTOM',
'vcpus': 1,
'cores': 2,
'threads': 3,
'cpu_model': 'pentium',
'cpuset': None,
'pin_vcpus': False,
'nodeset': None,
'enable_cpu_topology_extension': False
}, {'cpu_model_choices': {'pentium': 'pentium', 'pentium2': 'pentium2'}}, [
'<cpu mode="custom"><topology sockets="1" cores="2" threads="3" />'
'<model fallback="forbid">pentium</model></cpu>',
'<vcpu>6</vcpu>',
]),
({
'cpu_mode': 'CUSTOM',
'vcpus': 1,
'cores': 2,
'threads': 3,
'cpu_model': None,
'cpuset': '1-2,4-6',
'pin_vcpus': True,
'nodeset': None,
'enable_cpu_topology_extension': False,
}, {'cpu_model_choices': {}}, [
'<cpu mode="custom"><topology sockets="1" cores="2" threads="3" /></cpu>',
'<vcpu cpuset="1-2,4-6">6</vcpu>',
'<cputune><vcpupin vcpu="0" cpuset="1" /><vcpupin vcpu="1" cpuset="2" />'
'<vcpupin vcpu="2" cpuset="4" /><vcpupin vcpu="3" cpuset="5" /><vcpupin vcpu="4" cpuset="6" /></cputune>',
]),
({
'cpu_mode': 'CUSTOM',
'vcpus': 1,
'cores': 2,
'threads': 3,
'cpu_model': None,
'cpuset': None,
'pin_vcpus': False,
'nodeset': '1-2,4-6',
'enable_cpu_topology_extension': False
}, {'cpu_model_choices': {}}, [
'<cpu mode="custom"><topology sockets="1" cores="2" threads="3" /></cpu>',
'<vcpu>6</vcpu>',
'<numatune><memory nodeset="1-2,4-6" /></numatune>',
]),
])
def test_cpu_xml(vm_data, context, expected_xml):
assert [etree.tostring(o).decode().strip() for o in cpu_xml(vm_data, context)] == expected_xml
@pytest.mark.parametrize('vm_data,expected_xml', [
({'hide_from_msr': False, 'hyperv_enlightenments': False},
'<features><acpi /><apic /><msrs unknown="ignore" /></features>'),
({'hide_from_msr': True, 'hyperv_enlightenments': False},
'<features><acpi /><apic /><msrs unknown="ignore" /><kvm><hidden state="on" /></kvm></features>'),
({'hide_from_msr': True, 'hyperv_enlightenments': True},
'<features><acpi /><apic /><msrs unknown="ignore" /><kvm><hidden state="on" /></kvm>'
'<hyperv><relaxed state="on" /><vapic state="on" /><spinlocks state="on" retries="8191" /><reset state="on" />'
'<frequencies state="on" /><vpindex state="on" /><synic state="on" /><ipi state="on" /><tlbflush state="on" />'
'<stimer state="on" /></hyperv></features>'),
])
def test_features_xml(vm_data, expected_xml):
assert etree.tostring(features_xml(vm_data)).decode().strip() == expected_xml
| 5,307 | Python | .py | 126 | 35.071429 | 120 | 0.567008 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,443 | test_smart_attributes.py | truenas_middleware/src/middlewared/middlewared/pytest/unit/plugins/disk/test_smart_attributes.py | import textwrap
from unittest.mock import AsyncMock
import pytest
from middlewared.plugins.disk_.smart_attributes import DiskService
from middlewared.pytest.unit.middleware import Middleware
@pytest.mark.asyncio
async def test__disk_service__sata_dom_lifetime_left():
m = Middleware()
m["disk.smartctl"] = AsyncMock(return_value=textwrap.dedent("""\
smartctl 6.6 2017-11-05 r4594 [FreeBSD 11.2-STABLE amd64] (local build)
Copyright (C) 2002-17, Bruce Allen, Christian Franke, www.smartmontools.org
=== START OF READ SMART DATA SECTION ===
SMART Attributes Data Structure revision number: 0
Vendor Specific SMART Attributes with Thresholds:
ID# ATTRIBUTE_NAME FLAG VALUE WORST THRESH TYPE UPDATED WHEN_FAILED RAW_VALUE
9 Power_On_Hours 0x0012 100 100 000 Old_age Always - 8693
12 Power_Cycle_Count 0x0012 100 100 000 Old_age Always - 240
163 Unknown_Attribute 0x0000 100 100 001 Old_age Offline - 1065
164 Unknown_Attribute 0x0000 100 100 001 Old_age Offline - 322
166 Unknown_Attribute 0x0000 100 100 010 Old_age Offline - 0
167 Unknown_Attribute 0x0022 100 100 000 Old_age Always - 0
168 Unknown_Attribute 0x0012 100 100 000 Old_age Always - 0
175 Program_Fail_Count_Chip 0x0013 100 100 010 Pre-fail Always - 0
192 Power-Off_Retract_Count 0x0012 100 100 000 Old_age Always - 208
194 Temperature_Celsius 0x0022 060 060 030 Old_age Always - 40 (Min/Max 30/60)
241 Total_LBAs_Written 0x0032 100 100 000 Old_age Always - 14088053817
"""))
assert abs(await DiskService(m).sata_dom_lifetime_left("ada1") - 0.8926) < 1e-4
| 2,001 | Python | .py | 28 | 64.642857 | 113 | 0.604071 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,444 | test_label_to_dev.py | truenas_middleware/src/middlewared/middlewared/pytest/unit/plugins/disk/test_label_to_dev.py | import stat
import pytest
from middlewared.plugins.disk_.disk_info import DiskService
@pytest.mark.parametrize("label,dev,block_devices,symlinks", [
# Normal label (by-partuuid)
(
"disk/by-partuuid/4a3469b8-4c2f-11ee-9e9d-ac1f6b0a9d32",
"sda1",
["/dev/sda1"],
[("/dev/disk/by-partuuid/4a3469b8-4c2f-11ee-9e9d-ac1f6b0a9d32", "/dev/sda1")],
),
# Label is a whole device
(
"sda",
"sda",
["/dev/sda"],
[],
),
# Label is a partition
(
"sda1",
"sda1",
["/dev/sda1"],
[],
),
# Label does not exist
])
def test_label_to_dev(fs, label, dev, block_devices, symlinks):
for block_device in block_devices:
fs.create_file(block_device, stat.S_IFBLK)
for source, target in symlinks:
fs.create_symlink(source, target)
assert DiskService(None).label_to_dev(label) == dev
| 923 | Python | .py | 33 | 21.848485 | 86 | 0.60452 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,445 | test_dev_to_ident.py | truenas_middleware/src/middlewared/middlewared/pytest/unit/plugins/disk/test_dev_to_ident.py | import pytest
from unittest.mock import Mock
from middlewared.plugins.disk_.sync import DiskService
OBJ = DiskService(Mock())
BY_UUID = (
"pmem0",
{
"pmem0": {
"name": "pmem0",
"serial": None,
"serial_lunid": None,
"parts": [{
"partition_type": "516e7cba-6ecf-11d6-8ff8-00022d09712b",
"partition_uuid": "b9253137-a0a4-11ec-b194-3cecef615fde",
}],
}
},
"{uuid}b9253137-a0a4-11ec-b194-3cecef615fde",
)
BY_SERIAL_LUNID = (
"nvme0n1",
{
"nvme0n1": {
"name": "nvme0n1",
"serial": None,
"serial_lunid": "1234_XXXX",
"parts": []
}
},
"{serial_lunid}1234_XXXX",
)
BY_DEVICENAME = (
"sda",
{
"sda": {
"serial": None,
"serial_lunid": None,
"parts": []
}
},
"{devicename}sda",
)
BY_SERIAL = (
"sdaiy",
{
"sdaiy": {
"serial": "AAAAAAAA",
"serial_lunid": None,
"parts": []
}
},
"{serial}AAAAAAAA",
)
@pytest.mark.parametrize('disk_name, sys_disks, result', [BY_UUID, BY_SERIAL_LUNID, BY_DEVICENAME, BY_SERIAL])
def test_dev_to_ident(disk_name, sys_disks, result):
assert result == OBJ.dev_to_ident(disk_name, sys_disks)
| 1,355 | Python | .py | 56 | 16.982143 | 110 | 0.512741 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,446 | test_availability.py | truenas_middleware/src/middlewared/middlewared/pytest/unit/plugins/disk/test_availability.py | from unittest.mock import AsyncMock
import pytest
from middlewared.plugins.disk_.availability import DiskService
from middlewared.pytest.unit.middleware import Middleware
@pytest.mark.parametrize("disks,allow_duplicate_serials,errors", [
(["sda", "sda"], False, []),
(["sdi"], False, ["The following disks were not found in system: sdi."]),
(["sdb"], False, ["The following disks are already in use: sdb."]),
(["sdc"], False, []),
(["sdd"], False, ["Disks have duplicate serial numbers: ' BAD USB DRIVE ' (sdd, sde)."]),
(["sdf", "sdg"], False, ["Disks have duplicate serial numbers: ' EVEN WORSE USB DRIVE ' (sdf, sdg)."]),
(["sdd"], True, []),
])
@pytest.mark.asyncio
async def test__disk_service__check_disks_availability(disks, allow_duplicate_serials, errors):
m = Middleware()
m["disk.query"] = AsyncMock(return_value=[
{"devname": "sda", "serial": "1", "lunid": None},
{"devname": "sdb", "serial": "2", "lunid": "0"},
{"devname": "sdc", "serial": "2", "lunid": "1"},
{"devname": "sdd", "serial": " BAD USB DRIVE ", "lunid": None},
{"devname": "sde", "serial": " BAD USB DRIVE ", "lunid": None},
{"devname": "sdf", "serial": " EVEN WORSE USB DRIVE ", "lunid": None},
{"devname": "sdg", "serial": " EVEN WORSE USB DRIVE ", "lunid": None},
])
m["disk.get_reserved"] = AsyncMock(return_value=["sdb", "sde"])
verrors = await DiskService(m).check_disks_availability(disks, allow_duplicate_serials)
assert [e.errmsg for e in verrors.errors] == errors
| 1,564 | Python | .py | 28 | 50.821429 | 107 | 0.623775 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,447 | test_ident_to_dev.py | truenas_middleware/src/middlewared/middlewared/pytest/unit/plugins/disk/test_ident_to_dev.py | import pytest
from unittest.mock import Mock
from middlewared.plugins.disk_.sync import DiskService
OBJ = DiskService(Mock())
BY_UUID = (
"{uuid}b9253137-a0a4-11ec-b194-3cecef615fde",
{
"pmem0": {
"name": "pmem0",
"serial": None,
"serial_lunid": None,
"parts": [{
"disk": "pmem0",
"partition_type": "516e7cba-6ecf-11d6-8ff8-00022d09712b",
"partition_uuid": "b9253137-a0a4-11ec-b194-3cecef615fde",
}],
}
},
"pmem0",
)
BY_SERIAL_LUNID = (
"{serial_lunid}1234_XXXX",
{
"nvme0n1": {
"name": "nvme0n1",
"serial": None,
"serial_lunid": "1234_XXXX",
"parts": []
}
},
"nvme0n1",
)
BY_DEVICENAME = (
"{devicename}sda",
{
"sda": {
"name": "sda",
"serial": None,
"serial_lunid": None,
"parts": []
}
},
"sda",
)
BY_SERIAL = (
"{serial}AAAAAAAA",
{
"sdaiy": {
"serial": "AAAAAAAA",
"serial_lunid": None,
"parts": []
}
},
"sdaiy",
)
@pytest.mark.parametrize('ident, sys_disks, result', [BY_UUID, BY_SERIAL_LUNID, BY_DEVICENAME, BY_SERIAL])
def test_ident_to_dev(ident, sys_disks, result):
assert result == OBJ.ident_to_dev(ident, sys_disks)
| 1,403 | Python | .py | 58 | 16.706897 | 106 | 0.500373 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,448 | test_utils.py | truenas_middleware/src/middlewared/middlewared/pytest/unit/plugins/update/test_utils.py | import pytest
from middlewared.plugins.update_.utils import can_update
@pytest.mark.parametrize("old_version,new_version,result", [
("FreeNAS-11", "FreeNAS-11.1", True),
("FreeNAS-11.1", "FreeNAS-11.1-U1", True),
("FreeNAS-11.3-U2", "FreeNAS-11.3-U2.1", True),
("FreeNAS-11.3-U2", "FreeNAS-12.0", True),
("FreeNAS-11.3-U2", "TrueNAS-12.0", True),
("FreeNAS-11.3-U2", "TrueNAS-12.0-MASTER-202004190426", True),
("FreeNAS-11.3-U2", "TrueNAS-12.0-MASTER-20200419-0426", True),
("FreeNAS-11.3", "TrueNAS-12.0-MASTER-20200419-0426", True),
("22.02-MASTER-20220207-112927", "22.02.1-MASTER-20220208-034252", True),
("22.02-ALPHA", "22.02-RC", True),
("22.02-ALPHA", "22.02-RC.2", True),
("22.02-RC", "22.02-RC.2", True),
("22.02-RC.2", "22.02", True),
("22.02-RC.2", "22.02.0", True),
("22.02", "22.02.1", True),
("22.02.0", "22.02.1", True),
# Anything can be updated to a MASTER release
("TrueNAS-SCALE-22.02-RC.1", "TrueNAS-SCALE-22.02-MASTER-20211029-134913", True),
# Older MASTER to newer MASTER
("TrueNAS-SCALE-22.02-MASTER-20211029-134913", "TrueNAS-SCALE-22.02-MASTER-20211029-205533", True),
# Older INTERNAL to newer INTERNAL
("TrueNAS-SCALE-22.02-INTERNAL-225", "TrueNAS-SCALE-22.02-INTERNAL-226", True),
# Anything can be updated to a CUSTOM build
("TrueNAS-SCALE-22.02-RC.1", "TrueNAS-SCALE-22.02-CUSTOM", True),
("TrueNAS-SCALE-22.02-MASTER-20211029-134913", "TrueNAS-SCALE-22.02-CUSTOM", True),
("22.02.0", "22.02.CUSTOM", True),
("22.12.2-INTERNAL.9", "22.12.2-INTERNAL.11", True),
])
def test__can_update(old_version, new_version, result):
assert can_update(old_version, new_version) is result
assert can_update(new_version, old_version) is not result
def test_can_update_internal_to_anything():
assert can_update("TrueNAS-SCALE-22.02-INTERNAL-225", "TrueNAS-SCALE-22.02-RC.1")
def test_can_update_anything_to_internal():
assert can_update("TrueNAS-SCALE-22.02-RC.1", "TrueNAS-SCALE-22.02-INTERNAL-225")
| 2,047 | Python | .py | 38 | 49.421053 | 103 | 0.663836 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,449 | service_mixin.py | truenas_middleware/src/middlewared/middlewared/service/service_mixin.py | from middlewared.service_exception import CallError
class ServiceChangeMixin:
async def _service_change(self, service, verb, options=None):
svc_state = (await self.middleware.call(
'service.query',
[('service', '=', service)],
{'get': True}
))['state'].lower()
# For now its hard to keep track of which services change rc.conf.
# To be safe run this every time any service is updated.
# This adds up ~180ms so its seems a reasonable workaround for the time being.
await self.middleware.call('etc.generate', 'rc')
if svc_state == 'running':
started = await self.middleware.call(f'service.{verb}', service, options or {})
if not started:
raise CallError(
f'The {service} service failed to start',
CallError.ESERVICESTARTFAILURE,
[service],
)
| 958 | Python | .py | 20 | 36 | 91 | 0.589056 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,450 | compound_service.py | truenas_middleware/src/middlewared/middlewared/service/compound_service.py | import itertools
from .base import service_config
from .service import Service
class CompoundService(Service):
def __init__(self, middleware, parts):
super().__init__(middleware)
config_specified = {}
for part1, part2 in itertools.combinations(parts, 2):
for key in set(part1._config_specified.keys()) & set(part2._config_specified.keys()):
if part1._config_specified[key] != part2._config_specified[key]:
raise RuntimeError(f'{part1} has {key}={part1._config_specified[key]!r}, but '
f'{part2} has {key}={part2._config_specified[key]!r}')
config_specified.update(part1._config_specified)
config_specified.update(part2._config_specified)
self._config = service_config(parts[0].__class__, config_specified)
self.parts = parts
methods_parts = {}
for part in self.parts:
for name in dir(part):
if name.startswith('_'):
continue
meth = getattr(part, name)
if not callable(meth):
continue
if hasattr(self, name):
raise RuntimeError(
f'Duplicate method name {name} for service parts {methods_parts[name]} and {part}',
)
setattr(self, name, meth)
methods_parts[name] = part
for part in self.parts:
if part.__doc__:
self.__doc__ = part.__doc__
break
def __repr__(self):
return f'<CompoundService: {", ".join([repr(part) for part in self.parts])}>'
| 1,699 | Python | .py | 36 | 33.444444 | 107 | 0.545124 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,451 | service_part.py | truenas_middleware/src/middlewared/middlewared/service/service_part.py | import inspect
from middlewared.utils.type import copy_function_metadata
from .base import ServiceBase
class ServicePartBaseMeta(ServiceBase):
def __new__(cls, name, bases, attrs):
klass = super().__new__(cls, name, bases, attrs)
if name == 'ServicePartBase':
return klass
if len(bases) == 1 and bases[0].__name__ == 'ServicePartBase':
return klass
for base in bases:
if any(b.__name__ == 'ServicePartBase' for b in base.__bases__):
break
else:
raise RuntimeError(f'Could not find ServicePartBase among bases of these classes: {bases!r}')
for name, original_method in inspect.getmembers(base, predicate=inspect.isfunction):
new_method = attrs.get(name)
if new_method is None:
raise RuntimeError(f'{klass!r} does not define method {name!r} that is defined in it\'s base {base!r}')
if hasattr(original_method, 'wraps'):
original_argspec = inspect.getfullargspec(original_method.wraps)
else:
original_argspec = inspect.getfullargspec(original_method)
if original_argspec != inspect.getfullargspec(new_method):
raise RuntimeError(f'Signature for method {name!r} does not match between {klass!r} and it\'s base '
f'{base!r}')
copy_function_metadata(original_method, new_method)
if hasattr(original_method, 'wrap'):
new_method = original_method.wrap(new_method)
setattr(klass, name, new_method)
return klass
class ServicePartBase(metaclass=ServicePartBaseMeta):
pass
| 1,718 | Python | .py | 33 | 40.242424 | 119 | 0.621783 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,452 | core_service.py | truenas_middleware/src/middlewared/middlewared/service/core_service.py | import asyncio
import errno
import inspect
import ipaddress
import os
import re
import socket
import threading
import time
import traceback
import uuid
from collections import defaultdict
from remote_pdb import RemotePdb
from subprocess import run
import middlewared.main
from middlewared.api import api_method
from middlewared.api.base.jsonschema import get_json_schema
from middlewared.api.current import (
CoreSetOptionsArgs, CoreSetOptionsResult, CoreSubscribeArgs, CoreSubscribeResult, CoreUnsubscribeArgs,
CoreUnsubscribeResult,
)
from middlewared.common.environ import environ_update
from middlewared.job import Job
from middlewared.pipe import Pipes
from middlewared.schema import accepts, Any, Bool, Datetime, Dict, Int, List, Str
from middlewared.service_exception import CallError, ValidationErrors
from middlewared.utils import BOOTREADY, filter_list, MIDDLEWARE_RUN_DIR
from middlewared.utils.debug import get_frame_details, get_threads_stacks
from middlewared.utils.privilege import credential_has_full_admin, credential_is_limited_to_own_jobs
from middlewared.validators import IpAddress, Range
from .compound_service import CompoundService
from .config_service import ConfigService
from .crud_service import CRUDService
from .decorators import filterable, filterable_returns, job, no_auth_required, no_authz_required, pass_app, private
from .service import Service
MIDDLEWARE_STARTED_SENTINEL_PATH = os.path.join(MIDDLEWARE_RUN_DIR, 'middlewared-started')
def is_service_class(service, klass):
return (
isinstance(service, klass) or
(isinstance(service, CompoundService) and any(isinstance(part, klass) for part in service.parts))
)
class CoreService(Service):
class Config:
cli_private = True
@accepts(Str('id'), Int('cols'), Int('rows'))
async def resize_shell(self, id_, cols, rows):
"""
Resize terminal session (/websocket/shell) to cols x rows
"""
shell = middlewared.main.ShellApplication.shells.get(id_)
if shell is None:
raise CallError('Shell does not exist', errno.ENOENT)
shell.resize(cols, rows)
@private
def get_tasks(self):
for task in asyncio.all_tasks(loop=self.middleware.loop):
formatted = None
frame = None
frames = []
for frame in task.get_stack():
cur_frame = get_frame_details(frame, self.logger)
if cur_frame:
frames.append(cur_frame)
if frame:
formatted = traceback.format_stack(frame)
yield {
'stack': formatted,
'frames': frames,
}
def _job_by_app_and_id(self, app, job_id):
if app is None:
try:
return self.middleware.jobs[job_id]
except KeyError:
raise CallError('Job does not exist', errno.ENOENT)
else:
return self.__job_by_credential_and_id(app.authenticated_credentials, job_id)
def __job_by_credential_and_id(self, credential, job_id):
if not credential_is_limited_to_own_jobs(credential):
return self.middleware.jobs[job_id]
if not credential.is_user_session or credential_has_full_admin(credential):
return self.middleware.jobs[job_id]
job = self.middleware.jobs[job_id]
if job.credentials.user['username'] == credential.user['username']:
return job
raise CallError(f'{job_id}: job is not owned by current session.', errno.EPERM)
@no_authz_required
@filterable
@filterable_returns(Dict(
'job',
Int('id'),
Str('method'),
List('arguments'),
Bool('transient'),
Str('description', null=True),
Bool('abortable'),
Str('logs_path', null=True),
Str('logs_excerpt', null=True),
Dict(
'progress',
Int('percent', null=True),
Str('description', null=True),
Any('extra', null=True),
),
Any('result', null=True),
Any('result_encoding_error', null=True),
Str('error', null=True),
Str('exception', null=True),
Dict(
'exc_info',
Str('repr', null=True),
Str('type', null=True),
Int('errno', null=True),
Any('extra', null=True),
null=True
),
Str('state'),
Datetime('time_started', null=True),
Datetime('time_finished', null=True),
Dict(
'credentials',
Str('type'),
Dict('data', additional_attrs=True),
null=True,
),
register=True,
))
@pass_app(rest=True)
def get_jobs(self, app, filters, options):
"""
Get information about long-running jobs.
If authenticated session does not have the FULL_ADMIN role, only
jobs owned by the current authenticated session will be returned.
`result` key will have sensitive values redacted by default for external
clients.
Redaction behavior may be explicitly specfied via the `extra`
query-option `raw_result`. If `raw_result` is True then unredacted result
is returned.
"""
# Get raw result by default for internal calls to core.get_jobs otherwise
# redact result by default
raw_result_default = False if app else True
if app and credential_is_limited_to_own_jobs(app.authenticated_credentials):
username = app.authenticated_credentials.user['username']
jobs = list(self.middleware.jobs.for_username(username).values())
else:
jobs = list(self.middleware.jobs.all().values())
raw_result = options['extra'].get('raw_result', raw_result_default)
jobs = filter_list([
i.__encode__(raw_result) for i in jobs
], filters, options)
return jobs
@no_authz_required
@accepts(Int('id'), Str('filename'), Bool('buffered', default=False))
@pass_app(rest=True)
async def job_download_logs(self, app, id_, filename, buffered):
"""
Download logs of the job `id`.
Please see `core.download` method documentation for explanation on `filename` and `buffered` arguments,
and return value.
"""
job = self._job_by_app_and_id(app, id_)
if job.logs_path is None:
raise CallError('This job has no logs')
return (await self._download(app, 'filesystem.get', [job.logs_path], filename, buffered))[1]
@no_authz_required
@accepts(Int('id'))
@job()
async def job_wait(self, job, id_):
target_job = self.__job_by_credential_and_id(job.credentials, id_)
return await job.wrap(target_job)
@private
@accepts(Int('id'), Dict(
'job-update',
Dict('progress', additional_attrs=True),
))
def job_update(self, id_, data):
job = self.middleware.jobs[id_]
progress = data.get('progress')
if progress:
job.set_progress(
progress['percent'],
description=progress.get('description'),
extra=progress.get('extra'),
)
@private
def is_starting_during_boot(self):
# Returns True if middleware is being currently started during boot
return not os.path.exists(MIDDLEWARE_STARTED_SENTINEL_PATH)
@private
def notify_postinit(self):
self.middleware.call_sync('migration.run')
# Sentinel file to tell we have gone far enough in the boot process.
# See #17508
open(BOOTREADY, 'w').close()
# Send event to middlewared saying we are late enough in the process to call it ready
self.middleware.call_sync('core.event_send', 'system.ready', 'ADDED')
# Let's setup periodic tasks now
self.middleware._setup_periodic_tasks()
@no_authz_required
@accepts(Int('id'))
@pass_app(rest=True)
def job_abort(self, app, id_):
job = self._job_by_app_and_id(app, id_)
return job.abort()
def _should_list_service(self, name, service, target):
if service._config.private is True:
if not (target == 'REST' and name == 'resttest'):
return False
if target == 'CLI' and service._config.cli_private:
return False
return True
@no_auth_required
@accepts(Str('target', enum=['WS', 'CLI', 'REST'], default='WS'))
@private
@pass_app()
def get_services(self, app, target):
"""Returns a list of all registered services."""
services = {}
for k, v in list(self.middleware.get_services().items()):
if not self._should_list_service(k, v, target):
continue
if is_service_class(v, CRUDService):
_typ = 'crud'
elif is_service_class(v, ConfigService):
_typ = 'config'
else:
_typ = 'service'
config = {k: v for k, v in list(v._config.__dict__.items())
if not (k in ['entry', 'process_pool', 'thread_pool'] or k.startswith('_'))}
if config['cli_description'] is None:
if v.__doc__:
config['cli_description'] = inspect.getdoc(v).split("\n")[0].strip()
services[k] = {
'config': config,
'type': _typ,
}
return services
@no_auth_required
@accepts(Str('service', default=None, null=True), Str('target', enum=['WS', 'CLI', 'REST'], default='WS'))
@private
@pass_app()
def get_methods(self, app, service, target):
"""
Return methods metadata of every available service.
`service` parameter is optional and filters the result for a single service.
"""
data = {}
for name, svc in list(self.middleware.get_services().items()):
if service is not None and name != service:
continue
if not self._should_list_service(name, svc, target):
continue
for attr in dir(svc):
if attr.startswith('_'):
continue
method = None
# For CRUD.do_{update,delete} they need to be accounted
# as "item_method", since they are just wrapped.
item_method = None
if is_service_class(svc, CRUDService):
"""
For CRUD the create/update/delete are special.
The real implementation happens in do_create/do_update/do_delete
so thats where we actually extract pertinent information.
"""
if attr in ('create', 'update', 'delete'):
method = getattr(svc, 'do_{}'.format(attr), None)
if method is None:
continue
if attr in ('update', 'delete'):
item_method = True
elif attr in ('do_create', 'do_update', 'do_delete'):
continue
elif is_service_class(svc, ConfigService):
"""
For Config the update is special.
The real implementation happens in do_update
so thats where we actually extract pertinent information.
"""
if attr == 'update':
original_name = 'do_{}'.format(attr)
if hasattr(svc, original_name):
method = getattr(svc, original_name, None)
else:
method = getattr(svc, attr)
if method is None:
continue
elif attr in ('do_update',):
continue
if method is None:
method = getattr(svc, attr, None)
if method is None or not callable(method):
continue
# Skip private methods
if hasattr(method, '_private') and method._private is True:
continue
if target == 'CLI' and hasattr(method, '_cli_private'):
continue
# terminate is a private method used to clean up a service on shutdown
if attr == 'terminate':
continue
method_name = f'{name}.{attr}'
no_auth_required = hasattr(method, '_no_auth_required')
no_authz_required = hasattr(method, '_no_authz_required')
# Skip methods that are not allowed for the currently authenticated credentials
if app is not None:
if not no_auth_required:
if not app.authenticated_credentials:
continue
if not no_authz_required and not app.authenticated_credentials.authorize('CALL', method_name):
continue
examples = defaultdict(list)
doc = inspect.getdoc(method)
if doc:
"""
Allow method docstring to have sections in the format of:
.. section_name::
Currently the following sections are available:
.. examples:: - goes into `__all__` list in examples
.. examples(cli):: - goes into `cli` list in examples
.. examples(rest):: - goes into `rest` list in examples
.. examples(websocket):: - goes into `websocket` list in examples
"""
sections = re.split(r'^.. (.+?)::$', doc, flags=re.M)
doc = sections[0]
for i in range((len(sections) - 1) // 2):
idx = (i + 1) * 2 - 1
reg = re.search(r'examples(?:\((.+)\))?', sections[idx])
if reg is None:
continue
exname = reg.groups()[0]
if exname is None:
exname = '__all__'
examples[exname].append(sections[idx + 1])
method_schemas = {'accepts': None, 'returns': None}
for schema_type in method_schemas:
if hasattr(method, 'new_style_accepts'):
method_schemas['accepts'] = get_json_schema(method.new_style_accepts)
method_schemas['returns'] = get_json_schema(method.new_style_returns)
continue
args_descriptions_doc = doc or ''
if attr == 'update':
if do_create := getattr(svc, 'do_create', None):
args_descriptions_doc += "\n" + inspect.getdoc(do_create)
method_schemas[schema_type] = self.get_json_schema(
getattr(method, schema_type, None), args_descriptions_doc
)
if filterable_schema := getattr(method, '_filterable_schema', None):
# filterable_schema is OROperator here, and we just want it's specific schema
filterable_schema = self.get_json_schema([filterable_schema.schemas[1]], None)[0]
elif attr == 'query':
if isinstance(svc, CompoundService):
for part in svc.parts:
if hasattr(part, 'do_create'):
d = inspect.getdoc(part.do_create)
break
else:
d = None
for part in svc.parts:
if hasattr(part, 'ENTRY') and part.ENTRY is not None:
filterable_schema = self.get_json_schema(
[self.middleware._schemas[part.ENTRY.name]],
d,
)[0]
break
elif hasattr(svc, 'ENTRY') and svc.ENTRY is not None:
d = None
if hasattr(svc, 'do_create'):
d = inspect.getdoc(svc.do_create)
filterable_schema = self.get_json_schema(
[self.middleware._schemas[svc.ENTRY.name]],
d,
)[0]
if method_schemas['accepts'] is None:
raise RuntimeError(f'Method {method_name} is public but has no @accepts()')
data[method_name] = {
'description': doc,
'cli_description': (doc or '').split('\n\n')[0].split('.')[0].replace('\n', ' '),
'examples': examples,
'item_method': True if item_method else hasattr(method, '_item_method'),
'no_auth_required': no_auth_required,
'filterable': hasattr(method, '_filterable') or (
getattr(method, 'new_style_accepts', None) is not None and
method.new_style_accepts.__name__ == "QueryArgs"
),
'filterable_schema': filterable_schema,
'pass_application': hasattr(method, '_pass_app'),
'extra_methods': method._rest_api_metadata['extra_methods'] if hasattr(
method, '_rest_api_metadata') else None,
'require_websocket': hasattr(method, '_pass_app') and not method._pass_app['rest'],
'job': hasattr(method, '_job'),
'downloadable': hasattr(method, '_job') and 'output' in method._job['pipes'],
'uploadable': hasattr(method, '_job') and 'input' in method._job['pipes'],
'check_pipes': hasattr(method, '_job') and method._job['pipes'] and method._job['check_pipes'],
'roles': self.middleware.role_manager.roles_for_method(method_name),
**method_schemas,
}
return data
@private
def get_json_schema(self, schema, args_descriptions_doc):
if not schema:
return schema
args_descriptions_doc = args_descriptions_doc or ''
schema = [i.to_json_schema() for i in schema if not getattr(i, 'hidden', False)]
names = set()
for i in schema:
names.add(i['_name_'])
if i.get('type') == 'object':
for j in i['properties'].values():
names.add(j['_name_'])
args_descriptions = self._cli_args_descriptions(args_descriptions_doc, names)
for i in schema:
if not i.get('description') and i['_name_'] in args_descriptions:
i['description'] = args_descriptions[i['_name_']]
if i.get('type') == 'object':
for j in i['properties'].values():
if not j.get('description') and j['_name_'] in args_descriptions:
j['description'] = args_descriptions[j['_name_']]
return schema
@accepts()
def get_events(self):
"""
Returns metadata for every possible event emitted from websocket server.
"""
events = {}
for name, attrs in self.middleware.get_events():
if attrs['private']:
continue
events[name] = {
'description': attrs['description'],
'wildcard_subscription': attrs['wildcard_subscription'],
'accepts': self.get_json_schema(list(filter(bool, attrs['accepts'])), attrs['description']),
'returns': (
get_json_schema(attrs['new_style_returns']) if attrs['new_style_returns']
else self.get_json_schema(list(filter(bool, attrs['returns'])), attrs['description'])
),
}
return events
@private
async def call_hook(self, name, args, kwargs=None):
kwargs = kwargs or {}
await self.middleware.call_hook(name, *args, **kwargs)
@private
async def event_send(self, name, event_type, kwargs=None):
kwargs = kwargs or {}
self.middleware.send_event(name, event_type, **kwargs)
@no_authz_required
@accepts()
def ping(self):
"""
Utility method which just returns "pong".
Can be used to keep connection/authtoken alive instead of using
"ping" protocol message.
"""
return 'pong'
def _ping_host(self, version, host, timeout, count=None, interface=None, interval=None):
if version == 4:
command = ['ping', '-4', '-w', f'{timeout}']
elif version == 6:
command = ['ping6', '-w', f'{timeout}']
if count:
command.extend(['-c', str(count)])
if interface:
command.extend(['-I', interface])
if interval:
command.extend(['-i', interval])
command.append(host)
return run(command).returncode == 0
@accepts(
Dict(
'options',
Str('type', enum=['ICMP', 'ICMPV4', 'ICMPV6'], default='ICMP'),
Str('hostname', required=True),
Int('timeout', validators=[Range(min_=1, max_=60)], default=4),
Int('count', default=None),
Str('interface', default=None),
Str('interval', default=None),
),
)
def ping_remote(self, options):
"""
Method that will send an ICMP echo request to "hostname"
and will wait up to "timeout" for a reply.
"""
ip = None
ip_found = True
verrors = ValidationErrors()
try:
ip = IpAddress()
ip(options['hostname'])
ip = options['hostname']
except ValueError:
ip_found = False
if not ip_found:
try:
if options['type'] == 'ICMP':
ip = socket.getaddrinfo(options['hostname'], None)[0][4][0]
elif options['type'] == 'ICMPV4':
ip = socket.getaddrinfo(options['hostname'], None, socket.AF_INET)[0][4][0]
elif options['type'] == 'ICMPV6':
ip = socket.getaddrinfo(options['hostname'], None, socket.AF_INET6)[0][4][0]
except socket.gaierror:
verrors.add(
'options.hostname',
f'{options["hostname"]} cannot be resolved to an IP address.'
)
verrors.check()
addr = ipaddress.ip_address(ip)
if not addr.version == 4 and (options['type'] == 'ICMP' or options['type'] == 'ICMPV4'):
verrors.add(
'options.type',
f'Requested ICMPv4 protocol, but the address provided "{addr}" is not a valid IPv4 address.'
)
if not addr.version == 6 and options['type'] == 'ICMPV6':
verrors.add(
'options.type',
f'Requested ICMPv6 protocol, but the address provided "{addr}" is not a valid IPv6 address.'
)
verrors.check()
ping_host = False
if addr.version in [4, 6]:
ping_host = self._ping_host(addr.version, ip, options['timeout'], options.get('count'), options.get('interface'), options.get('interval'))
return ping_host
@accepts(
Dict(
'options',
Str('ip', default=None),
Str('interface', default=None),
),
)
def arp(self, options):
arp_command = ['arp', '-n']
if interface := options.get('interface'):
arp_command.extend(['-i', interface])
rv = run(arp_command, capture_output=True)
search_ip = options.get('ip')
result = {}
for line in rv.stdout.decode().strip().splitlines():
sline = line.split()
try:
line_ip = str(ipaddress.ip_address(sline[0]))
except ValueError:
continue
if sline[1] != 'ether':
continue
if search_ip:
if line_ip == search_ip:
result[line_ip] = sline[2]
else:
result[line_ip] = sline[2]
return result
@no_authz_required
@accepts(
Str('method'),
List('args'),
Str('filename'),
Bool('buffered', default=False),
)
@pass_app(rest=True)
async def download(self, app, method, args, filename, buffered):
"""
Core helper to call a job marked for download.
Non-`buffered` downloads will allow job to write to pipe as soon as download URL is requested, job will stay
blocked meanwhile. `buffered` downloads must wait for job to complete before requesting download URL, job's
pipe output will be buffered to ramfs.
Returns the job id and the URL for download.
"""
if app is not None:
if not app.authenticated_credentials.authorize('CALL', method):
raise CallError('Not authorized', errno.EACCES)
return await self._download(app, method, args, filename, buffered)
async def _download(self, app, method, args, filename, buffered):
serviceobj, methodobj = self.middleware.get_method(method)
job = await self.middleware.call_with_audit(
method, serviceobj, methodobj, args, app=app,
pipes=Pipes(output=self.middleware.pipe(buffered))
)
token = await self.middleware.call('auth.generate_token', 300, {'filename': filename, 'job': job.id}, app=app)
self.middleware.fileapp.register_job(job.id, buffered)
return job.id, f'/_download/{job.id}?auth_token={token}'
@private
@no_authz_required
@accepts(Dict('core-job', Int('sleep')))
@job()
def job_test(self, job, data):
"""
Private no-op method to test a job, simply returning `true`.
"""
sleep = data.get('sleep')
if sleep is not None:
def sleep_fn():
i = 0
while i < sleep:
job.set_progress((i / sleep) * 100)
time.sleep(1)
i += 1
job.set_progress(100)
t = threading.Thread(target=sleep_fn, daemon=True)
t.start()
t.join()
return True
@accepts(Dict(
'options',
Str('bind_address', default='0.0.0.0'),
Int('bind_port', default=3000),
Bool('threaded', default=False),
))
async def debug(self, data):
"""
Setup middlewared for remote debugging.
engine currently used:
- REMOTE_PDB: Remote vanilla PDB (over TCP sockets)
options:
- bind_address: local ip address to bind the remote debug session to
- bind_port: local port to listen on
- threaded: run debugger in a new thread instead of the main event loop
"""
if data['threaded']:
self.middleware.create_task(
self.middleware.run_in_thread(
RemotePdb, data['bind_address'], data['bind_port']
)
)
else:
RemotePdb(data['bind_address'], data['bind_port']).set_trace()
@private
async def profile(self, method, params=None):
return await self.middleware.call(method, *(params or []), profile=True)
@private
def threads_stacks(self):
return get_threads_stacks()
@private
def get_pid(self):
return os.getpid()
@private
def get_oom_score_adj(self, pid):
try:
with open(f'/proc/{pid}/oom_score_adj', 'r') as f:
return int(f.read().strip())
except ValueError:
self.logger.error("Value inside of /proc/%r/oom_score_adj is NOT a number.", pid)
except Exception:
self.logger.error("Unexpected error looking up process %r.", pid, exc_info=True)
return None
@no_authz_required
@accepts(Str("method"), List("params", items=[List("params")]), Str("description", null=True, default=None))
@job(lock=lambda args: f"bulk:{args[0]}")
@pass_app()
async def bulk(self, app, job, method, params, description):
"""
Will sequentially call `method` with arguments from the `params` list. For example, running
call("core.bulk", "zfs.snapshot.delete", [["tank@snap-1", true], ["tank@snap-2", false]])
will call
call("zfs.snapshot.delete", "tank@snap-1", true)
call("zfs.snapshot.delete", "tank@snap-2", false)
If the first call fails and the seconds succeeds (returning `true`), the result of the overall call will be:
[
{"result": null, "error": "Error deleting snapshot"},
{"result": true, "error": null}
]
Important note: the execution status of `core.bulk` will always be a `SUCCESS` (unless an unlikely internal
error occurs). Caller must check for individual call results to ensure the absence of any call errors.
`description` contains format string for job progress (e.g. "Deleting snapshot {0[dataset]}@{0[name]}")
"""
serviceobj, methodobj = self.middleware.get_method(method)
if params:
if mock := self.middleware._mock_method(method, params[0]):
methodobj = mock
if app is not None:
if not app.authenticated_credentials.authorize("CALL", method):
await self.middleware.log_audit_message_for_method(
method, methodobj, params[0] if params else [], app, True, False, False,
)
raise CallError("Not authorized", errno.EPERM)
statuses = []
if not params:
return statuses
for i, p in enumerate(params):
progress_description = f"{i} / {len(params)}"
if description is not None:
progress_description += ": " + description.format(*p)
job.set_progress(100 * i / len(params), progress_description)
try:
# Convention for the auditing backend is to only generate audit
# entries for external callers to methods. app is only None
# on internal calls to core.bulk.
if app:
msg = await self.middleware.call_with_audit(method, serviceobj, methodobj, p, app=app)
else:
msg = await self.middleware.call(method, *p)
status = {"result": msg, "error": None}
if isinstance(msg, Job):
b_job = msg
status["job_id"] = b_job.id
status["result"] = await msg.wait()
if b_job.error:
status["error"] = b_job.error
statuses.append(status)
except Exception as e:
statuses.append({"result": None, "error": str(e)})
return statuses
_environ = {}
@private
async def environ(self):
return self._environ
@private
async def environ_update(self, update):
environ_update(update)
for k, v in update.items():
if v is None:
self._environ.pop(k, None)
else:
self._environ[k] = v
self.middleware.send_event('core.environ', 'CHANGED', fields=update)
RE_ARG = re.compile(r'`[a-z0-9_]+`', flags=re.IGNORECASE)
RE_NEW_ARG_START = re.compile(r'`|[A-Z]|\*')
def _cli_args_descriptions(self, doc, names):
descriptions = defaultdict(list)
current_names = set()
current_doc = []
for line in (doc or '').split('\n'):
if (
(matched_line_names := {name.strip('`') for name in self.RE_ARG.findall(line)}) and
(line_names := matched_line_names & names)
):
if line_names & current_names or not self.RE_NEW_ARG_START.match(line):
current_names |= line_names
else:
for name in current_names:
descriptions[name] += current_doc
current_names = line_names
current_doc = []
current_doc.append(line)
elif line:
current_doc.append(line)
else:
for name in current_names:
descriptions[name] += current_doc
current_names = set()
current_doc = []
return {
k: '\n'.join(v)
for k, v in descriptions.items()
}
@no_auth_required
@api_method(CoreSetOptionsArgs, CoreSetOptionsResult, rate_limit=False)
@pass_app()
async def set_options(self, app, options):
if "py_exceptions" in options:
app.py_exceptions = options["py_exceptions"]
@no_auth_required
@api_method(CoreSubscribeArgs, CoreSubscribeResult)
@pass_app()
async def subscribe(self, app, event):
if not self.middleware.can_subscribe(app, event):
raise CallError('Not authorized', errno.EACCES)
ident = str(uuid.uuid4())
await app.subscribe(ident, event)
return ident
@no_auth_required
@api_method(CoreUnsubscribeArgs, CoreUnsubscribeResult)
@pass_app()
async def unsubscribe(self, app, ident):
await app.unsubscribe(ident)
| 34,009 | Python | .py | 765 | 31.665359 | 150 | 0.549272 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,453 | crud_service.py | truenas_middleware/src/middlewared/middlewared/service/crud_service.py | import asyncio
import copy
import errno
from pydantic import create_model, Field
from typing_extensions import Annotated
from middlewared.api import api_method
from middlewared.api.base.model import BaseModel
from middlewared.api.current import QueryArgs, QueryOptions
from middlewared.service_exception import CallError, InstanceNotFound
from middlewared.schema import accepts, Any, Bool, convert_schema, Dict, Int, List, OROperator, Patch, Ref, returns
from middlewared.utils import filter_list
from middlewared.utils.type import copy_function_metadata
from .base import ServiceBase
from .decorators import filterable, pass_app, private
from .service import Service
from .service_mixin import ServiceChangeMixin
PAGINATION_OPTS = ('count', 'get', 'limit', 'offset', 'select')
def get_datastore_primary_key_schema(klass):
return convert_schema({
'type': klass._config.datastore_primary_key_type,
'name': klass._config.datastore_primary_key,
})
def get_instance_args(entry):
return create_model(
entry.__name__.removesuffix("Entry") + "GetInstanceArgs",
__base__=(BaseModel,),
id=Annotated[entry.model_fields["id"].annotation, Field()],
options=Annotated[QueryOptions, Field(default={})],
)
def get_instance_result(entry):
return create_model(
entry.__name__.removesuffix("Entry") + "GetInstanceResult",
__base__=(BaseModel,),
__module__=entry.__module__,
result=Annotated[entry, Field()],
)
def query_result(item):
return create_model(
item.__name__.removesuffix("Entry") + "QueryResult",
__base__=(BaseModel,),
__module__=item.__module__,
result=Annotated[list[item] | item | int, Field()],
)
class CRUDServiceMetabase(ServiceBase):
def __new__(cls, name, bases, attrs):
klass = super().__new__(cls, name, bases, attrs)
if any(
name == c_name and len(bases) == len(c_bases) and all(b.__name__ == c_b for b, c_b in zip(bases, c_bases))
for c_name, c_bases in (
('CRUDService', ('ServiceChangeMixin', 'Service')),
('SharingTaskService', ('CRUDService',)),
('SharingService', ('SharingTaskService',)),
('TaskPathService', ('SharingTaskService',)),
)
):
return klass
if klass._config.entry is not None:
# FIXME: This is to prevent `Method cloudsync.credentials.ENTRY is public but has no @accepts()`, remove
# eventually.
klass.ENTRY = None
# FIXME: Remove `wraps` handling when we get rid of `@filterable` in `CRUDService.query` definition
klass.query = api_method(QueryArgs, query_result(klass._config.entry))(
klass.query.wraps if hasattr(klass.query, "wraps") else klass.query
)
# FIXME: Remove `wraps` handling when we get rid of `@accepts` in `CRUDService.get_instance` definition
klass.get_instance = api_method(get_instance_args(klass._config.entry),
get_instance_result(klass._config.entry))(klass.get_instance.wraps)
return klass
namespace = klass._config.namespace.replace('.', '_')
entry_key = f'{namespace}_entry'
if klass.ENTRY == NotImplementedError:
klass.ENTRY = Dict(entry_key, additional_attrs=True)
else:
# We would like to ensure that not all fields are required as select can filter out fields
if isinstance(klass.ENTRY, (Dict, Patch)):
entry_key = klass.ENTRY.name
elif isinstance(klass.ENTRY, Ref):
entry_key = f'{klass.ENTRY.name}_ref_entry'
else:
raise ValueError('Result entry should be Dict/Patch/Ref instance')
result_entry = copy.deepcopy(klass.ENTRY)
query_result_entry = copy.deepcopy(klass.ENTRY)
if isinstance(result_entry, Ref):
query_result_entry = Patch(result_entry.name, entry_key)
if isinstance(result_entry, Patch):
query_result_entry.patches.append(('attr', {'update': True}))
else:
query_result_entry.update = True
result_entry.register = True
query_result_entry.register = False
query_method = klass.query.wraps if hasattr(klass.query, 'returns') else klass.query
klass.query = returns(OROperator(
List('query_result', items=[copy.deepcopy(query_result_entry)]),
query_result_entry,
Int('count'),
result_entry,
name='query_result',
))(query_method)
klass.get_instance = returns(Ref(entry_key))(klass.get_instance)
for m_name in filter(lambda m: hasattr(klass, m), ('do_create', 'do_update')):
for d_name, decorator in filter(
lambda d: not hasattr(getattr(klass, m_name), d[0]), (('returns', returns), ('accepts', accepts))
):
new_name = f'{namespace}_{m_name.split("_")[-1]}'
if d_name == 'returns':
new_name += '_returns'
patch_entry = Patch(entry_key, new_name, register=True)
schema = []
if d_name == 'accepts':
patch_entry.patches.append(('rm', {
'name': klass._config.datastore_primary_key,
'safe_delete': True,
}))
if m_name == 'do_update':
patch_entry.patches.append(('attr', {'update': True}))
schema.append(get_datastore_primary_key_schema(klass))
schema.append(patch_entry)
setattr(klass, m_name, decorator(*schema)(getattr(klass, m_name)))
if hasattr(klass, 'do_delete'):
if not hasattr(klass.do_delete, 'accepts'):
klass.do_delete = accepts(get_datastore_primary_key_schema(klass))(klass.do_delete)
if not hasattr(klass.do_delete, 'returns'):
klass.do_delete = returns(Bool(
'deleted', description='Will return `true` if `id` is deleted successfully'
))(klass.do_delete)
return klass
class CRUDService(ServiceChangeMixin, Service, metaclass=CRUDServiceMetabase):
"""
CRUD service abstract class
Meant for services in that a set of entries can be queried, new entry
create, updated and/or deleted.
CRUD stands for Create Retrieve Update Delete.
"""
ENTRY = NotImplementedError
def __init__(self, middleware):
super().__init__(middleware)
if self._config.event_register:
if self._config.role_prefix:
roles = [f'{self._config.role_prefix}_READ']
else:
roles = ['READONLY_ADMIN']
if self._config.entry is not None:
kwargs = dict(new_style_returns=self._config.entry)
else:
kwargs = dict(returns=Ref(self.ENTRY.name))
self.middleware.event_register(
f'{self._config.namespace}.query',
f'Sent on {self._config.namespace} changes.',
private=self._config.private,
roles=roles,
**kwargs,
)
@private
async def get_options(self, options):
options = options or {}
options['extend'] = self._config.datastore_extend
options['extend_context'] = self._config.datastore_extend_context
options['prefix'] = self._config.datastore_prefix
return options
@filterable
async def query(self, filters, options):
if not self._config.datastore:
raise NotImplementedError(
f'{self._config.namespace}.query must be implemented or a '
'`datastore` Config attribute provided.'
)
if not filters:
filters = []
options = await self.get_options(options)
# In case we are extending which may transform the result in numerous ways
# we can only filter the final result. Exception is when forced to use sql
# for filters for performance reasons.
if not options['force_sql_filters'] and options['extend']:
datastore_options = options.copy()
for option in PAGINATION_OPTS:
datastore_options.pop(option, None)
result = await self.middleware.call(
'datastore.query', self._config.datastore, [], datastore_options
)
return await self.middleware.run_in_thread(
filter_list, result, filters, options
)
else:
return await self.middleware.call(
'datastore.query', self._config.datastore, filters, options,
)
@pass_app(rest=True)
async def create(self, app, audit_callback, data):
return await self.middleware._call(
f'{self._config.namespace}.create', self, await self._get_crud_wrapper_func(
self.do_create, 'create', 'ADDED',
), [data], app=app, audit_callback=audit_callback,
)
create.audit_callback = True
@pass_app(rest=True)
async def update(self, app, audit_callback, id_, data):
return await self.middleware._call(
f'{self._config.namespace}.update', self, await self._get_crud_wrapper_func(
self.do_update, 'update', 'CHANGED', id_,
), [id_, data], app=app, audit_callback=audit_callback,
)
update.audit_callback = True
@pass_app(rest=True)
async def delete(self, app, audit_callback, id_, *args):
return await self.middleware._call(
f'{self._config.namespace}.delete', self, await self._get_crud_wrapper_func(
self.do_delete, 'delete', 'REMOVED', id_,
), [id_] + list(args), app=app, audit_callback=audit_callback,
)
delete.audit_callback = True
async def _get_crud_wrapper_func(self, func, action, event_type, oid=None):
if asyncio.iscoroutinefunction(func):
async def nf(*args, **kwargs):
rv = await func(*args, **kwargs)
await self.middleware.call_hook(f'{self._config.namespace}.post_{action}', rv)
if self._config.event_send and (action == 'delete' or isinstance(rv, dict) and 'id' in rv):
self.middleware.send_event(f'{self._config.namespace}.query', event_type, id=oid or rv['id'])
return rv
else:
def nf(*args, **kwargs):
rv = func(*args, **kwargs)
self.middleware.call_hook_sync(f'{self._config.namespace}.post_{action}', rv)
if self._config.event_send and (action == 'delete' or isinstance(rv, dict) and 'id' in rv):
self.middleware.send_event(f'{self._config.namespace}.query', event_type, id=oid or rv['id'])
return rv
copy_function_metadata(func, nf)
return nf
@accepts(
Any('id'),
Patch(
'query-options', 'query-options-get_instance',
('edit', {
'name': 'force_sql_filters',
'method': lambda x: setattr(x, 'default', True),
}),
register=True,
),
)
async def get_instance(self, id_, options):
"""
Returns instance matching `id`. If `id` is not found, Validation error is raised.
Please see `query` method documentation for `options`.
"""
instance = await self.middleware.call(
f'{self._config.namespace}.query',
[[self._config.datastore_primary_key, '=', id_]],
options
)
if not instance:
raise InstanceNotFound(f'{self._config.verbose_name} {id_} does not exist')
return instance[0]
@private
@accepts(Any('id'), Ref('query-options-get_instance'))
def get_instance__sync(self, id_, options):
"""
Synchronous implementation of `get_instance`.
"""
instance = self.middleware.call_sync(
f'{self._config.namespace}.query',
[[self._config.datastore_primary_key, '=', id_]],
options,
)
if not instance:
raise InstanceNotFound(f'{self._config.verbose_name} {id_} does not exist')
return instance[0]
async def _ensure_unique(self, verrors, schema_name, field_name, value, id_=None):
f = [(field_name, '=', value)]
if id_ is not None:
f.append(('id', '!=', id_))
instance = await self.middleware.call(f'{self._config.namespace}.query', f)
if instance:
verrors.add(
'.'.join(filter(None, [schema_name, field_name])),
f'Object with this {field_name} already exists'
)
@private
async def check_dependencies(self, id_, ignored=None):
"""
Raises EBUSY CallError if some datastores/services (except for `ignored`) reference object specified by id.
"""
dependencies = await self.get_dependencies(id_, ignored)
if dependencies:
dep_err = 'This object is being used by following service(s):\n'
for index, dependency in enumerate(dependencies.values()):
key = 'service' if dependency['service'] else 'datastore'
dep_err += f'{index + 1}) {dependency[key]!r} {key.capitalize()}\n'
raise CallError(dep_err, errno.EBUSY, {'dependencies': list(dependencies.values())})
@private
async def get_dependencies(self, id_, ignored=None):
ignored = ignored or set()
services = {
service['config'].get('datastore'): (name, service)
for name, service in (await self.middleware.call('core.get_services')).items()
if service['config'].get('datastore')
}
dependencies = {}
for datastore, fk in await self.middleware.call('datastore.get_backrefs', self._config.datastore):
if datastore in ignored:
continue
if datastore in services:
service = {
'name': services[datastore][0],
'type': services[datastore][1]['type'],
}
if service['name'] in ignored:
continue
else:
service = None
objects = await self.middleware.call('datastore.query', datastore, [(fk, '=', id_)])
if objects:
data = {
'objects': objects,
}
if service is not None:
query_col = fk
prefix = services[datastore][1]['config'].get('datastore_prefix')
if prefix:
if query_col.startswith(prefix):
query_col = query_col[len(prefix):]
if service['type'] == 'config':
data = {
'key': query_col,
}
if service['type'] == 'crud':
data = {
'objects': await self.middleware.call(
f'{service["name"]}.query', [('id', 'in', [object_['id'] for object_ in objects])],
),
}
dependencies[datastore] = dict({
'datastore': datastore,
'service': service['name'] if service else None,
}, **data)
return dependencies
| 15,747 | Python | .py | 334 | 35.080838 | 118 | 0.574109 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,454 | system_service.py | truenas_middleware/src/middlewared/middlewared/service/system_service.py | from middlewared.schema import accepts
from .config_service import ConfigService
from .decorators import private
class SystemServiceService(ConfigService):
"""
System service class
Meant for services that manage system services configuration.
"""
@accepts()
async def config(self):
return await self._get_or_insert(
self._config.datastore, {
'extend': self._config.datastore_extend,
'extend_context': self._config.datastore_extend_context,
'prefix': self._config.datastore_prefix
}
)
@private
async def _update_service(self, old, new, verb=None, options=None):
await self.middleware.call(
'datastore.update', self._config.datastore, old['id'], new, {'prefix': self._config.datastore_prefix}
)
fut = self._service_change(self._config.service, verb or self._config.service_verb, options)
if self._config.service_verb_sync:
await fut
else:
self.middleware.create_task(fut)
| 1,073 | Python | .py | 27 | 31.222222 | 113 | 0.647738 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,455 | __init__.py | truenas_middleware/src/middlewared/middlewared/service/__init__.py | from middlewared.schema import accepts, returns # noqa
from middlewared.service_exception import ( # noqa
CallException, CallError, InstanceNotFound, ValidationError, ValidationErrors
)
from middlewared.utils import filter_list # noqa
from .compound_service import CompoundService # noqa
from .config_service import ConfigService # noqa
from .core_service import CoreService, MIDDLEWARE_RUN_DIR, MIDDLEWARE_STARTED_SENTINEL_PATH # noqa
from .crud_service import CRUDService # noqa
from .decorators import ( # noqa
cli_private, filterable, filterable_returns, item_method, job, lock, no_auth_required,
no_authz_required, pass_app, periodic, private, rest_api_metadata, skip_arg, threaded,
)
from .service import Service # noqa
from .service_mixin import ServiceChangeMixin # noqa
from .service_part import ServicePartBase # noqa
from .sharing_service import SharingService, SharingTaskService, TaskPathService # noqa
from .system_service import SystemServiceService # noqa
ABSTRACT_SERVICES = ( # noqa
CompoundService, ConfigService, CRUDService, SharingService, SharingTaskService,
SystemServiceService, TaskPathService
)
| 1,145 | Python | .py | 22 | 50 | 98 | 0.816964 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,456 | config_service.py | truenas_middleware/src/middlewared/middlewared/service/config_service.py | import asyncio
import copy
from middlewared.schema import accepts, Dict, Patch, returns
from .base import ServiceBase
from .decorators import private
from .service import Service
from .service_mixin import ServiceChangeMixin
get_or_insert_lock = asyncio.Lock()
class ConfigServiceMetabase(ServiceBase):
def __new__(cls, name, bases, attrs):
klass = super().__new__(cls, name, bases, attrs)
if any(
name == c_name and len(bases) == len(c_bases) and all(
b.__name__ == c_b for b, c_b in zip(bases, c_bases)
)
for c_name, c_bases in (
('ConfigService', ('ServiceChangeMixin', 'Service')),
('SystemServiceService', ('ConfigService',)),
)
):
return klass
namespace = klass._config.namespace.replace('.', '_')
config_entry_key = f'{namespace}_entry'
if klass.ENTRY == NotImplementedError:
klass.ENTRY = Dict(config_entry_key, additional_attrs=True)
config_entry_key = klass.ENTRY.name
config_entry = copy.deepcopy(klass.ENTRY)
config_entry.register = True
klass.config = returns(config_entry)(klass.config)
if hasattr(klass, 'do_update'):
for m_name, decorator in filter(
lambda m: not hasattr(klass.do_update, m[0]),
(('returns', returns), ('accepts', accepts))
):
new_name = f'{namespace}_update'
if m_name == 'returns':
new_name += '_returns'
patch_entry = Patch(config_entry_key, new_name, register=True)
schema = [patch_entry]
if m_name == 'accepts':
patch_entry.patches.append(('rm', {
'name': klass._config.datastore_primary_key,
'safe_delete': True,
}))
patch_entry.patches.append(('attr', {'update': True}))
klass.do_update = decorator(*schema)(klass.do_update)
return klass
class ConfigService(ServiceChangeMixin, Service, metaclass=ConfigServiceMetabase):
"""
Config service abstract class
Meant for services that provide a single set of attributes which can be
updated or not.
"""
ENTRY = NotImplementedError
@accepts()
async def config(self):
options = {}
options['extend'] = self._config.datastore_extend
options['extend_context'] = self._config.datastore_extend_context
options['prefix'] = self._config.datastore_prefix
return await self._get_or_insert(self._config.datastore, options)
async def update(self, data):
rv = await self.middleware._call(
f'{self._config.namespace}.update', self, self.do_update, [data]
)
await self.middleware.call_hook(f'{self._config.namespace}.post_update', rv)
return rv
@private
async def _get_or_insert(self, datastore, options):
try:
return await self.middleware.call('datastore.config', datastore, options)
except IndexError:
async with get_or_insert_lock:
try:
return await self.middleware.call('datastore.config', datastore, options)
except IndexError:
await self.middleware.call('datastore.insert', datastore, {})
return await self.middleware.call('datastore.config', datastore, options)
| 3,517 | Python | .py | 78 | 34.089744 | 93 | 0.596081 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,457 | base.py | truenas_middleware/src/middlewared/middlewared/service/base.py | def service_config(klass, config):
namespace = klass.__name__
if namespace.endswith('Service'):
namespace = namespace[:-7]
namespace = namespace.lower()
config_attrs = {
'datastore': None,
'datastore_prefix': '',
'datastore_extend': None,
'datastore_extend_context': None,
'datastore_primary_key': 'id',
'datastore_primary_key_type': 'integer',
'entry': None,
'event_register': True,
'event_send': True,
'service': None,
'service_verb': 'reload',
'service_verb_sync': True,
'namespace': namespace,
'namespace_alias': None,
'private': False,
'thread_pool': None,
'process_pool': None,
'cli_namespace': None,
'cli_private': False,
'cli_description': None,
'role_prefix': None,
'role_separate_delete': False,
'verbose_name': klass.__name__.replace('Service', ''),
}
config_attrs.update({
k: v
for k, v in list(config.items())
if not k.startswith('_')
})
return type('Config', (), config_attrs)
class ServiceBase(type):
"""
Metaclass of all services
This metaclass instantiates a `_config` attribute in the service instance
from options provided in a Config class, e.g.
class MyService(Service):
class Meta:
namespace = 'foo'
private = False
Currently the following options are allowed:
- datastore: name of the datastore mainly used in the service
- datastore_extend: datastore `extend` option used in common `query` method
- datastore_prefix: datastore `prefix` option used in helper methods
- service: system service `name` option used by `SystemServiceService`
- service_verb: verb to be used on update (default to `reload`)
- namespace: namespace identifier of the service
- namespace_alias: another namespace identifier of the service, mostly used to rename and
slowly deprecate old name.
- private: whether or not the service is deemed private
- verbose_name: human-friendly singular name for the service
- thread_pool: thread pool to use for threaded methods
- process_pool: process pool to run service methods
- cli_namespace: replace namespace identifier for CLI
- cli_private: if the service is not private, this flags whether or not the service is visible in the CLI
"""
def __new__(cls, name, bases, attrs):
super_new = super(ServiceBase, cls).__new__
if name == 'Service' and bases == ():
return super_new(cls, name, bases, attrs)
config = attrs.pop('Config', None)
klass = super_new(cls, name, bases, attrs)
if config:
klass._config_specified = {k: v for k, v in config.__dict__.items() if not k.startswith('_')}
else:
klass._config_specified = {}
klass._config = service_config(klass, klass._config_specified)
return klass
| 3,062 | Python | .py | 73 | 33.726027 | 111 | 0.624454 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,458 | sharing_service.py | truenas_middleware/src/middlewared/middlewared/service/sharing_service.py | from middlewared.async_validators import check_path_resides_within_volume
from middlewared.plugins.zfs_.validation_utils import check_zvol_in_boot_pool_using_path
from middlewared.utils.path import FSLocation, path_location, strip_location_prefix
from .crud_service import CRUDService
from .decorators import pass_app, private
class SharingTaskService(CRUDService):
path_field = 'path'
allowed_path_types = [FSLocation.LOCAL]
enabled_field = 'enabled'
locked_field = 'locked'
locked_alert_class = NotImplemented
share_task_type = NotImplemented
@private
async def get_path_field(self, data):
return data[self.path_field]
@private
async def sharing_task_extend_context(self, rows, extra):
if extra.get('use_cached_locked_datasets', True):
locked_ds_endpoint = 'pool.dataset.locked_datasets_cached'
else:
locked_ds_endpoint = 'zfs.dataset.locked_datasets'
if extra.get('select'):
select_fields = []
for entry in extra['select']:
if isinstance(entry, list) and entry:
select_fields.append(entry[0])
elif isinstance(entry, str):
# Just being extra sure so that we don't crash
select_fields.append(entry)
if self.locked_field not in select_fields:
extra['retrieve_locked_info'] = False
return {
'locked_datasets': await self.middleware.call(
locked_ds_endpoint
) if extra.get('retrieve_locked_info', True) else [],
'service_extend': (
await self.middleware.call(self._config.datastore_extend_context, rows, extra)
if self._config.datastore_extend_context else None
),
'retrieve_locked_info': extra.get('retrieve_locked_info', True),
}
@private
async def validate_cluster_path(self, verrors, name, volname, path):
verrors.add(name, 'cluster volume is not mounted.')
@private
async def validate_external_path(self, verrors, name, path):
# Services with external paths must implement their own
# validation here because we can't predict what is required.
raise NotImplementedError
@private
async def validate_zvol_path(self, verrors, name, path):
if check_zvol_in_boot_pool_using_path(path):
verrors.add(name, 'Disk residing in boot pool cannot be consumed and is not supported')
@private
async def validate_local_path(self, verrors, name, path):
await check_path_resides_within_volume(verrors, self.middleware, name, path)
@private
async def validate_path_field(self, data, schema, verrors):
name = f'{schema}.{self.path_field}'
path = await self.get_path_field(data)
await self.validate_zvol_path(verrors, name, path)
loc = path_location(path)
if loc not in self.allowed_path_types:
verrors.add(name, f'{loc.name}: path type is not allowed.')
elif loc is FSLocation.CLUSTER:
verrors.add(name, f'{path}: path within cluster volume must be specified.')
elif loc is FSLocation.EXTERNAL:
await self.validate_external_path(verrors, name, strip_location_prefix(path))
elif loc is FSLocation.LOCAL:
await self.validate_local_path(verrors, name, path)
else:
self.logger.error('%s: unknown location type', loc.name)
raise NotImplementedError
return verrors
@private
async def sharing_task_determine_locked(self, data, locked_datasets):
path = await self.get_path_field(data)
if path_location(path) is not FSLocation.LOCAL:
return False
return await self.middleware.call(
'pool.dataset.path_in_locked_datasets', path, locked_datasets
)
@private
async def sharing_task_extend(self, data, context):
args = [data] + ([context['service_extend']] if self._config.datastore_extend_context else [])
if self._config.datastore_extend:
data = await self.middleware.call(self._config.datastore_extend, *args)
if context['retrieve_locked_info']:
data[self.locked_field] = await self.middleware.call(
f'{self._config.namespace}.sharing_task_determine_locked', data, context['locked_datasets']
)
return data
@private
async def get_options(self, options):
return {
**(await super().get_options(options)),
'extend': f'{self._config.namespace}.sharing_task_extend',
'extend_context': f'{self._config.namespace}.sharing_task_extend_context',
}
@private
async def human_identifier(self, share_task):
raise NotImplementedError
@private
async def generate_locked_alert(self, share_task_id):
share_task = await self.get_instance(share_task_id)
await self.middleware.call(
'alert.oneshot_create', self.locked_alert_class,
{**share_task, 'identifier': await self.human_identifier(share_task), 'type': self.share_task_type}
)
@private
async def remove_locked_alert(self, share_task_id):
await self.middleware.call(
'alert.oneshot_delete', self.locked_alert_class, f'"{self.share_task_type}_{share_task_id}"'
)
@pass_app(rest=True)
async def update(self, app, audit_callback, id_, data):
rv = await super().update(app, audit_callback, id_, data)
if not rv[self.enabled_field] or not rv[self.locked_field]:
await self.remove_locked_alert(rv['id'])
return rv
update.audit_callback = True
@pass_app(rest=True)
async def delete(self, app, audit_callback, id_, *args):
rv = await super().delete(app, audit_callback, id_, *args)
await self.remove_locked_alert(id_)
return rv
delete.audit_callback = True
class SharingService(SharingTaskService):
locked_alert_class = 'ShareLocked'
@private
async def human_identifier(self, share_task):
return share_task['name']
class TaskPathService(SharingTaskService):
locked_alert_class = 'TaskLocked'
@private
async def human_identifier(self, share_task):
return await self.get_path_field(share_task)
| 6,395 | Python | .py | 137 | 37.620438 | 111 | 0.654551 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,459 | service.py | truenas_middleware/src/middlewared/middlewared/service/service.py | from middlewared.logger import Logger
from .base import ServiceBase
class Service(object, metaclass=ServiceBase):
"""
Generic service abstract class
This is meant for services that do not follow any standard.
"""
def __init__(self, middleware):
self.logger = Logger(type(self).__name__).getLogger()
self.middleware = middleware
| 368 | Python | .py | 10 | 31.8 | 63 | 0.714689 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,460 | decorators.py | truenas_middleware/src/middlewared/middlewared/service/decorators.py | import asyncio
import threading
from collections import defaultdict, namedtuple
from functools import wraps
from middlewared.schema import accepts, Int, List, OROperator, Ref, returns
LOCKS = defaultdict(asyncio.Lock)
PeriodicTaskDescriptor = namedtuple('PeriodicTaskDescriptor', ['interval', 'run_on_start'])
THREADING_LOCKS = defaultdict(threading.Lock)
def cli_private(fn):
"""Do not expose method in CLI"""
fn._cli_private = True
return fn
def filterable(fn=None, /, *, roles=None):
def filterable_internal(fn):
fn._filterable = True
if hasattr(fn, 'wraps'):
fn.wraps._filterable = True
return accepts(Ref('query-filters'), Ref('query-options'), roles=roles)(fn)
# See if we're being called as @filterable or @filterable().
if fn is None:
# We're called with parens.
return filterable_internal
# We're called as @filterable without parens.
return filterable_internal(fn)
def filterable_returns(schema):
def filterable_internal(fn):
operator = OROperator(
Int('count'),
schema,
List('query_result', items=[schema.copy()]),
name='filterable_result',
)
fn._filterable_schema = operator
if hasattr(fn, 'wraps'):
fn.wraps._filterable_schema = operator
return returns(operator)(fn)
return filterable_internal
def item_method(fn):
"""Flag method as an item method.
That means it operates over a single item in the collection,
by an unique identifier."""
fn._item_method = True
return fn
def job(
lock=None, lock_queue_size=5, logs=False, process=False, pipes=None, check_pipes=True, transient=False,
description=None, abortable=False
):
"""
Flag method as a long-running job. This must be the first decorator to be applied (meaning that it must be specified
the last).
Methods wrapped with this decorator must accept :class:`middlewared.job.Job` object as their first argument.
:param lock: Determines a lock for this job to use. Locks prevent duplicate jobs that do the same work or access
a shared resource from running at the same time. First job that obtains a lock will execute normally,
subsequent jobs will stay in the `WAITING` state until the first job completes.
Lock namespace is global. That way, if the method `"disk.wipe"` obtains a lock `"disk:sdb"`, then the method
`"disk.format"` will have to wait for the same lock `"disk:sdb"` to be released.
`lock` can be a constant string (for example, `lock='boot_scrub'`) or a callable that will accept the job's
arguments and produce a lock name, e.g.:
.. code-block:: python
@job(lock=lambda args: f'scrub:{args[0]}')
def scrub(self, pool_name):
Please beware that, as `@job` decorator must be executed before `@accepts`, the arguments passed to the lock
callable will be the raw arguments given by caller (there would be no arguments sanitizing or added defaults).
Default value is `None` meaning that no locking is used.
:param lock_queue_size: How many jobs with this lock can be in the `WAITING` state. For example, there is no sense
to queue the same cloud sync or pool scrub twice, so we specify `lock_queue_size=1`. The first called cloud sync
will run normally; then, if we call a second cloudsync with the same id while the first is still running, it
will be queued; and then, if we call a third cloudsync, it won't be queued anymore.
If lock queue size is exceeded then the new job is discarded and the `id` of the last job in the queue is
returned.
If lock queue size is zero, then launching a job when another job with the same lock is running will raise an
`EBUSY` error.
Default value is `5`. `None` would mean that lock queue is infinite.
:param logs: If `True` then `job.logs_fd` object will be available. It is an unbuffered file opened in binary mode;
the job can write its logs there, and they will be available in the `/var/log/jobs/{id}.log` file. By default,
no such file is opened.
:param process: If `True` then the job body is called in a separate process. By default, job body is executed in the
main middleware process.
:param pipes: A list of pipes a job can have. A job can have `pipes=["input"]` pipe, `pipes=["output"]` pipe
or both at the same time.
Pipes allow us to pass streaming data to/from a job. Job can read its input pipe via `job.pipes.input.r` and
write to its output pipe via `job.pipes.output.w`. Both are binary mode streams. By default, no pipes are
opened.
:param check_pipes: If `True`, then the job will check that all its specified pipes are opened (it's the caller's
responsibility to open the pipes). If `False`, then the job must explicitly run `job.check_pipe("input")`
before accessing the pipe. This is useful when a job might or might need a pipe depending on its call arguments.
By default, all pipes are checked.
:param transient: If `True` then `"core.get_jobs"` ADDED or CHANGED event won't be sent for this job, and it will
be removed from `core.get_jobs` upon completion. This is useful for periodic service jobs that we don't want
to see in task manager UI. By default, the job is not transient.
:param description: A callable that will return the job's human-readable description (that will appear in the task
manager UI) based on its passed arguments. For example:
.. code-block:: python
@job(description=lambda dev, mode, *args: f'Wipe disk {dev}')
Please beware that, as `@job` decorator must be executed before `@accepts`, the arguments passed to the
description callable will be the raw arguments given by caller (there would be no arguments sanitizing or added
defaults).
:param abortable: If `True` then the job can be aborted in the task manager UI. When the job is aborted,
`asyncio.CancelledError` is raised inside the job method (meaning that only asynchronous job methods can be
aborted). By default, jobs are not abortable.
"""
def check_job(fn):
fn._job = {
'lock': lock,
'lock_queue_size': lock_queue_size,
'logs': logs,
'process': process,
'pipes': pipes or [],
'check_pipes': check_pipes,
'transient': transient,
'description': description,
'abortable': abortable,
}
return fn
return check_job
def lock(lock_str):
def lock_fn(fn):
if asyncio.iscoroutinefunction(fn):
f_lock = LOCKS[lock_str]
@wraps(fn)
async def l_fn(*args, **kwargs):
async with f_lock:
return await fn(*args, **kwargs)
else:
f_lock = THREADING_LOCKS[lock_str]
@wraps(fn)
def l_fn(*args, **kwargs):
with f_lock:
return fn(*args, **kwargs)
return l_fn
return lock_fn
def no_auth_required(fn):
"""Authentication is not required to use the given method."""
fn._no_auth_required = True
return fn
def no_authz_required(fn):
"""Authorization not required to use the given method."""
fn._no_authz_required = True
return fn
def pass_app(*, require=False, rest=False):
"""Pass the application instance as parameter to the method."""
def wrapper(fn):
fn._pass_app = {
'require': require,
'rest': rest,
}
return fn
return wrapper
def periodic(interval, run_on_start=True):
def wrapper(fn):
fn._periodic = PeriodicTaskDescriptor(interval, run_on_start)
return fn
return wrapper
def private(fn):
"""Do not expose method in public API"""
fn._private = True
return fn
def rest_api_metadata(extra_methods=None):
"""
Allow having endpoints specify explicit rest methods.
Explicit methods should be a list which specifies what methods the function should be available
at other then the default one it is already going to be. This is useful when we want to maintain
backwards compatibility with endpoints which were not expecting payload before but are now and users
still would like to consume them with previous method which would be GET whereas it's POST now.
"""
def wrapper(fn):
fn._rest_api_metadata = {
'extra_methods': extra_methods,
}
return fn
return wrapper
def skip_arg(count=0):
"""Skip "count" arguments when validating accepts"""
def wrap(fn):
fn._skip_arg = count
return fn
return wrap
def threaded(pool):
def m(fn):
fn._thread_pool = pool
return fn
return m
| 8,998 | Python | .py | 182 | 41.659341 | 120 | 0.670094 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,461 | bootenv.py | truenas_middleware/src/middlewared/middlewared/plugins/bootenv.py | from middlewared.plugins.zfs_.validation_utils import validate_dataset_name
from middlewared.schema import accepts, Bool, Datetime, Dict, Int, returns, Str
from middlewared.service import (
CallError, CRUDService, ValidationErrors, filterable, item_method, job, private
)
from middlewared.utils import filter_list, Popen, run
from datetime import datetime
import errno
import os
import subprocess
class BootEnvService(CRUDService):
class Config:
datastore_primary_key_type = 'string'
cli_namespace = 'system.bootenv'
BE_TOOL = 'zectl'
ENTRY = Dict(
'bootenv_entry',
Str('id'),
Str('realname'),
Str('name'),
Str('active'),
Bool('activated'),
Bool('can_activate'),
Str('mountpoint'),
Str('space'),
Datetime('created'),
Bool('keep'),
Int('rawspace'),
additional_attrs=True
)
@filterable
def query(self, filters, options):
"""
Query all Boot Environments with `query-filters` and `query-options`.
"""
results = []
cp = subprocess.run([self.BE_TOOL, 'list', '-H'], capture_output=True, text=True)
datasets_origins = [
d['properties']['origin']['parsed']
for d in self.middleware.call_sync('zfs.dataset.query', [], {'extra': {'properties': ['origin']}})
]
boot_pool = self.middleware.call_sync('boot.pool_name')
for line in cp.stdout.strip().split('\n'):
fields = line.split('\t')
name = fields[0]
if len(fields) > 5 and fields[5] != '-':
name = fields[5]
be = {
'id': name,
'realname': fields[0],
'name': name,
'active': fields[1],
'activated': 'n' in fields[1].lower(),
'can_activate': False,
'mountpoint': fields[2],
'space': None,
'created': datetime.strptime(fields[3], '%Y-%m-%d %H:%M'),
'keep': False,
'rawspace': None
}
ds = self.middleware.call_sync('zfs.dataset.query', [
('id', '=', rf'{boot_pool}/ROOT/{fields[0]}'),
], {'extra': {'snapshots': True}})
if ds:
ds = ds[0]
snapshot = None
origin = ds['properties']['origin']['parsed']
if '@' in origin:
snapshot = self.middleware.call_sync('zfs.snapshot.query', [('id', '=', origin)])
if snapshot:
snapshot = snapshot[0]
if f'{self.BE_TOOL}:keep' in ds['properties']:
if ds['properties'][f'{self.BE_TOOL}:keep']['value'] == 'True':
be['keep'] = True
elif ds['properties'][f'{self.BE_TOOL}:keep']['value'] == 'False':
be['keep'] = False
# When a BE is deleted, following actions happen
# 1) It's descendants ( if any ) are promoted once
# 2) BE is deleted
# 3) Filesystems dependent on BE's origin are promoted
# 4) Origin is deleted
#
# Now we would like to find out the space which will be freed when a BE is removed.
# We classify a BE as of being 2 types,
# 1) BE without descendants
# 2) BE with descendants
#
# For (1), space freed is "usedbydataset" property and space freed by it's "origin".
# For (2), space freed is "usedbydataset" property and space freed by it's "origin" but this cannot
# actively determined because all the descendants are promoted once for this BE and at the end origin
# of current BE would be determined by last descendant promoted. So we ignore this for now and rely
# only on the space it is currently consuming as a best effort to predict.
# There is also "usedbysnaps" property, for that we will retrieve all snapshots of the dataset,
# find if any of them do not have a dataset cloned, that space will also be freed when we delete
# this dataset. And we will also factor in the space consumed by children.
be['rawspace'] = ds['properties']['usedbydataset']['parsed'] + ds[
'properties']['usedbychildren']['parsed']
children = False
for snap in ds['snapshots']:
if snap['name'] not in datasets_origins:
be['rawspace'] += self.middleware.call_sync(
'zfs.snapshot.get_instance', snap['name'], {'extra': {'properties': ['used']}}
)['properties']['used']['parsed']
else:
children = True
if snapshot and not children:
# This indicates the current BE is a leaf and it is safe to add the BE's origin
# space to the space freed when it is deleted.
be['rawspace'] += snapshot['properties']['used']['parsed']
if be['rawspace'] < 1024:
be['space'] = f'{be["rawspace"]}B'
elif 1024 <= be['rawspace'] < 1048576:
be['space'] = f'{be["rawspace"] / 1024}K'
elif 1048576 <= be['rawspace'] < 1073741824:
be['space'] = f'{be["rawspace"] / 1048576}M'
elif 1073741824 <= be['rawspace'] < 1099511627776:
be['space'] = f'{be["rawspace"] / 1073741824}G'
elif 1099511627776 <= be['rawspace'] < 1125899906842624:
be['space'] = f'{be["rawspace"] / 1099511627776}T'
elif 1125899906842624 <= be['rawspace'] < 1152921504606846976:
be['space'] = f'{be["rawspace"] / 1125899906842624}P'
elif 1152921504606846976 <= be['rawspace'] < 1152921504606846976:
be['space'] = f'{be["rawspace"] / 1152921504606846976}E'
else:
be['space'] = f'{be["rawspace"] / 1152921504606846976}Z'
be.update({
'space': f'{round(float(be["space"][:-1]), 2)}{be["space"][-1]}',
'can_activate': (
'truenas:kernel_version' in ds['properties'] or
'truenas:12' in ds['properties']
),
})
results.append(be)
return filter_list(results, filters, options)
@item_method
@accepts(Str('id'))
@returns(Bool('successfully_activated'))
def activate(self, oid):
"""
Activates boot environment `id`.
"""
be = self.middleware.call_sync('bootenv.query', [['id', '=', oid]], {'get': True})
if not be['can_activate']:
raise CallError('This BE cannot be activated')
try:
subprocess.run([self.BE_TOOL, 'activate', oid], capture_output=True, text=True, check=True)
except subprocess.CalledProcessError as cpe:
raise CallError(f'Failed to activate BE: {cpe.stdout.strip()}')
else:
return True
@item_method
@accepts(
Str('id'),
Dict(
'attributes',
Bool('keep', default=False),
)
)
@returns(Bool('successfully_set_attribute'))
async def set_attribute(self, oid, attrs):
"""
Sets attributes boot environment `id`.
Currently only `keep` attribute is allowed.
"""
boot_pool = await self.middleware.call('boot.pool_name')
boot_env = await self.get_instance(oid)
dsname = f'{boot_pool}/ROOT/{boot_env["realname"]}'
ds = await self.middleware.call('zfs.dataset.query', [('id', '=', dsname)])
if not ds:
raise CallError(f'BE {oid!r} does not exist.', errno.ENOENT)
await self.middleware.call('zfs.dataset.update', dsname, {
'properties': {f'{self.BE_TOOL}:keep': {'value': str(attrs['keep'])}},
})
return True
@accepts(Dict(
'bootenv_create',
Str('name', required=True),
Str('source', required=True),
))
@returns(Str('bootenv_name'))
async def do_create(self, data):
"""
Create a new boot environment using `name`.
If a new boot environment is desired which is a clone of another boot environment, `source` can be passed.
Then, a new boot environment of `name` is created using boot environment `source` by cloning it.
Ensure that `name` and `source` are valid boot environment names.
"""
verrors = ValidationErrors()
await self._clean_be_name(verrors, 'bootenv_create', data['name'])
verrors.check()
args = [self.BE_TOOL, 'create']
args += [
'-r', '-e', os.path.join(
await self.middleware.call('boot.pool_name'), 'ROOT', data['source']
)
]
args.append(data['name'])
try:
await run(args, encoding='utf8', check=True)
except subprocess.CalledProcessError as cpe:
raise CallError(f'Failed to create boot environment: {cpe.stdout}')
return data['name']
@accepts(Str('id'), Dict(
'bootenv_update',
Str('name', required=True),
))
@returns(Str('bootenv_name'))
async def do_update(self, oid, data):
"""
Update `id` boot environment name with a new provided valid `name`.
"""
await self.get_instance(oid)
verrors = ValidationErrors()
await self._clean_be_name(verrors, 'bootenv_update', data['name'])
verrors.check()
try:
await run(self.BE_TOOL, 'rename', oid, data['name'], encoding='utf8', check=True)
except subprocess.CalledProcessError as cpe:
raise CallError(f'Failed to update boot environment: {cpe.stderr}')
return data['name']
async def _clean_be_name(self, verrors, schema, name):
beadm_names = (await run(
["sh", "-c", f"{self.BE_TOOL} list -H | awk '{{print ${1}}}'"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)).stdout.decode().split('\n')
if name in filter(None, beadm_names):
verrors.add(f'{schema}.name', f'The name "{name}" already exists', errno.EEXIST)
if not validate_dataset_name(name):
verrors.add(
f'{schema}.name',
f'Invalid BE name {name!r}. "test1, ix-test, ix_test" are valid examples.'
)
@accepts(Str('id'))
@job(lock=lambda args: f'bootenv_delete_{args[0]}')
async def do_delete(self, job, oid):
"""
Delete `id` boot environment. This removes the clone from the system.
"""
be = await self.get_instance(oid)
try:
await run(self.BE_TOOL, 'destroy', '-F', be['id'], encoding='utf8', check=True)
except subprocess.CalledProcessError as cpe:
raise CallError(f'Failed to delete boot environment: {cpe.stdout}')
return True
@private
async def promote_current_be_datasets(self):
boot_pool = await self.middleware.call("boot.pool_name")
be = (await self.middleware.call("bootenv.query", [["activated", "=", True]], {"get": True}))["name"]
for dataset in await self.middleware.call("zfs.dataset.query", [["id", "^", f"{boot_pool}/ROOT/{be}/"]]):
if origin := dataset["properties"]["origin"]["value"]:
self.middleware.logger.info(f"Promoting dataset {dataset['name']} as it is a clone of {origin}")
try:
await self.middleware.call("pool.dataset.promote", dataset["name"])
except Exception as e:
self.middleware.logger.error(f"Error promoting dataset: {e}")
async def setup(middleware):
if not await middleware.call("system.ready"):
# Installer clones `/var/log` dataset of the previous install to avoid copying logs. When booting, we must
# promote the clone to be an independent dataset so that the origin dataset becomes deletable.
# Only perform this operation on boot time to save a few seconds on middleware restart.
try:
await middleware.call("bootenv.promote_current_be_datasets")
except Exception:
middleware.logger.error("Unhandled exception promoting active BE datasets", exc_info=True)
| 12,681 | Python | .py | 265 | 35.664151 | 117 | 0.555277 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,462 | fcport.py | truenas_middleware/src/middlewared/middlewared/plugins/fcport.py | import middlewared.sqlalchemy as sa
class FCPortModel(sa.Model):
__tablename__ = 'services_fibrechanneltotarget'
id = sa.Column(sa.Integer(), primary_key=True)
fc_port = sa.Column(sa.String(10))
fc_target_id = sa.Column(sa.ForeignKey('services_iscsitarget.id'), nullable=True, index=True)
| 308 | Python | .py | 6 | 47.166667 | 97 | 0.735786 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,463 | filesystem.py | truenas_middleware/src/middlewared/middlewared/plugins/filesystem.py | import binascii
import errno
import functools
import os
import pathlib
import shutil
import stat as statlib
import time
import pyinotify
from itertools import product
from middlewared.event import EventSource
from middlewared.plugins.pwenc import PWENC_FILE_SECRET, PWENC_FILE_SECRET_MODE
from middlewared.plugins.docker.state_utils import IX_APPS_DIR_NAME
from middlewared.plugins.filesystem_ import chflags
from middlewared.schema import accepts, Bool, Dict, Float, Int, List, Ref, returns, Path, Str, UnixPerm
from middlewared.service import private, CallError, filterable_returns, filterable, Service, job
from middlewared.utils import filter_list
from middlewared.utils.filesystem import attrs, stat_x
from middlewared.utils.filesystem.acl import acl_is_present
from middlewared.utils.filesystem.constants import FileType
from middlewared.utils.filesystem.directory import DirectoryIterator, DirectoryRequestMask
from middlewared.utils.filesystem.utils import timespec_convert_float
from middlewared.utils.mount import getmntinfo
from middlewared.utils.nss import pwd, grp
from middlewared.utils.path import FSLocation, path_location, is_child_realpath
class FilesystemService(Service):
class Config:
cli_private = True
@accepts(Str('path'), roles=['FILESYSTEM_ATTRS_READ'])
@returns(Bool())
def is_immutable(self, path):
"""
Retrieves boolean which is set when immutable flag is set on `path`.
"""
stx_attrs = self.stat(path)['attributes']
return 'IMMUTABLE' in stx_attrs
@accepts(Bool('set_flag'), Str('path'), roles=['FILESYSTEM_ATTRS_WRITE'])
@returns()
def set_immutable(self, set_flag, path):
"""
Set/Unset immutable flag at `path`.
`set_flag` when set will set immutable flag and when unset will unset immutable flag at `path`.
"""
chflags.set_immutable(path, set_flag)
@accepts(Dict(
'set_zfs_file_attributes',
Path('path', required=True),
Dict(
'zfs_file_attributes',
Bool('readonly'),
Bool('hidden'),
Bool('system'),
Bool('archive'),
Bool('immutable'),
Bool('nounlink'),
Bool('appendonly'),
Bool('offline'),
Bool('sparse'),
register=True
),
), roles=['FILESYSTEM_ATTRS_WRITE'], audit='Filesystem set ZFS attributes', audit_extended=lambda data: data['path'])
@returns()
def set_zfs_attributes(self, data):
"""
Set special ZFS-related file flags on the specified path
`readonly` - this maps to READONLY MS-DOS attribute. When set, file may not be
written to (toggling does not impact existing file opens).
`hidden` - this maps to HIDDEN MS-DOS attribute. When set, the SMB HIDDEN flag
is set and file is "hidden" from the perspective of SMB clients.
`system` - this maps to SYSTEM MS-DOS attribute. Is presented to SMB clients, but
has no impact on local filesystem.
`archive` - this maps to ARCHIVE MS-DOS attribute. Value is reset to True whenever
file is modified.
`immutable` - file may not be altered or deleted. Also appears as IMMUTABLE in
attributes in `filesystem.stat` output and as STATX_ATTR_IMMUTABLE in statx() response.
`nounlink` - file may be altered but not deleted.
`appendonly` - file may only be opened with O_APPEND flag. Also appears as APPEND in
attributes in `filesystem.stat` output and as STATX_ATTR_APPEND in statx() response.
`offline` - this maps to OFFLINE MS-DOS attribute. Is presented to SMB clients, but
has no impact on local filesystem.
`sparse` - maps to SPARSE MS-DOS attribute. Is presented to SMB clients, but has
no impact on local filesystem.
"""
return attrs.set_zfs_file_attributes_dict(data['path'], data['zfs_file_attributes'])
@accepts(Str('path'), roles=['FILESYSTEM_ATTRS_READ'])
@returns(Ref('zfs_file_attributes'))
def get_zfs_attributes(self, path):
"""
Get the current ZFS attributes for the file at the given path
"""
fd = os.open(path, os.O_RDONLY)
try:
attr_mask = attrs.fget_zfs_file_attributes(fd)
finally:
os.close(fd)
return attrs.zfs_attributes_to_dict(attr_mask)
@private
def is_child(self, child, parent):
for to_check in product(
child if isinstance(child, list) else [child],
parent if isinstance(parent, list) else [parent]
):
if is_child_realpath(to_check[0], to_check[1]):
return True
return False
@private
def is_dataset_path(self, path):
return path.startswith('/mnt/') and os.stat(path).st_dev != os.stat('/mnt').st_dev
@private
@filterable
def mount_info(self, filters, options):
mntinfo = getmntinfo()
return filter_list(list(mntinfo.values()), filters, options)
@accepts(Dict(
'filesystem_mkdir',
Str('path'),
Dict(
'options',
UnixPerm('mode', default='755'),
Bool('raise_chmod_error', default=True)
),
), deprecated=[(
lambda args: len(args) == 1 and isinstance(args[0], str),
lambda mkdir_path: [{
'path': mkdir_path
}]
)], roles=['FILESYSTEM_DATA_WRITE'])
@returns(Ref('path_entry'))
def mkdir(self, data):
"""
Create a directory at the specified path.
The following options are supported:
`mode` - specify the permissions to set on the new directory (0o755 is default).
`raise_chmod_error` - choose whether to raise an exception if the attempt to set
mode fails. In this case, the newly created directory will be removed to prevent
use with unintended permissions.
NOTE: if chmod error is skipped, the resulting `mode` key in mkdir response will
indicate the current permissions on the directory and not the permissions specified
in the mkdir payload
"""
path = data['path']
options = data['options']
mode = int(options['mode'], 8)
p = pathlib.Path(path)
if not p.is_absolute():
raise CallError(f'{path}: not an absolute path.', errno.EINVAL)
if p.exists():
raise CallError(f'{path}: path already exists.', errno.EEXIST)
realpath = os.path.realpath(path)
if not realpath.startswith(('/mnt/', '/root/.ssh', '/home/admin/.ssh', '/home/truenas_admin/.ssh')):
raise CallError(f'{path}: path not permitted', errno.EPERM)
os.mkdir(path, mode=mode)
stat = p.stat()
if statlib.S_IMODE(stat.st_mode) != mode:
# This may happen if requested mode is greater than umask
# or if underlying dataset has restricted aclmode and ACL is present
try:
os.chmod(path, mode)
except Exception:
if options['raise_chmod_error']:
os.rmdir(path)
raise
self.logger.debug(
'%s: failed to set mode %s on path after mkdir call',
path, options['mode'], exc_info=True
)
return {
'name': p.parts[-1],
'path': path,
'realpath': realpath,
'type': 'DIRECTORY',
'size': stat.st_size,
'mode': stat.st_mode,
'acl': False if self.acl_is_trivial(path) else True,
'uid': stat.st_uid,
'gid': stat.st_gid,
'is_mountpoint': False,
'is_ctldir': False,
'xattrs': [],
'zfs_attrs': ['ARCHIVE']
}
@private
def listdir_request_mask(self, select):
""" create request mask for directory listing """
if not select:
# request_mask=None means ALL in the directory iterator
return None
request_mask = 0
for i in select:
# select may be list [key, new_name] to allow
# equivalent of SELECT AS.
selected = i[0] if isinstance(i, list) else i
match selected:
case 'realpath':
request_mask |= DirectoryRequestMask.REALPATH
case 'acl':
request_mask |= DirectoryRequestMask.ACL
case 'zfs_attrs':
request_mask |= DirectoryRequestMask.ZFS_ATTRS
case 'is_ctldir':
request_mask |= DirectoryRequestMask.CTLDIR
case 'xattrs':
request_mask |= DirectoryRequestMask.XATTRS
return request_mask
@accepts(
Str('path', required=True),
Ref('query-filters'),
Ref('query-options'),
roles=['FILESYSTEM_ATTRS_READ']
)
@filterable_returns(Dict(
'path_entry',
Str('name', required=True),
Path('path', required=True),
Path('realpath', required=True),
Str('type', required=True, enum=['DIRECTORY', 'FILE', 'SYMLINK', 'OTHER']),
Int('size', required=True, null=True),
Int('allocation_size', required=True, null=True),
Int('mode', required=True, null=True),
Int('mount_id', required=True, null=True),
Bool('acl', required=True, null=True),
Int('uid', required=True, null=True),
Int('gid', required=True, null=True),
Bool('is_mountpoint', required=True),
Bool('is_ctldir', required=True),
List(
'attributes',
required=True,
items=[Str('statx_attribute', enum=[attr.name for attr in stat_x.StatxAttr])]
),
List('xattrs', required=True, null=True),
List('zfs_attrs', required=True, null=True),
register=True
))
def listdir(self, path, filters, options):
"""
Get the contents of a directory.
The select option may be used to optimize listdir performance. Metadata-related
fields that are not selected will not be retrieved from the filesystem.
For example {"select": ["path", "type"]} will avoid querying an xattr list and
ZFS attributes for files in a directory.
NOTE: an empty list for select (default) is treated as requesting all information.
Each entry of the list consists of:
name(str): name of the file
path(str): absolute path of the entry
realpath(str): absolute real path of the entry (if SYMLINK)
type(str): DIRECTORY | FILE | SYMLINK | OTHER
size(int): size of the entry
allocation_size(int): on-disk size of entry
mode(int): file mode/permission
uid(int): user id of entry owner
gid(int): group id of entry owner
acl(bool): extended ACL is present on file
is_mountpoint(bool): path is a mountpoint
is_ctldir(bool): path is within special .zfs directory
attributes(list): list of statx file attributes that apply to the
file. See statx(2) manpage for more details.
xattrs(list): list of extended attribute names.
zfs_attrs(list): list of ZFS file attributes on file
"""
path = pathlib.Path(path)
if not path.exists():
raise CallError(f'Directory {path} does not exist', errno.ENOENT)
if not path.is_dir():
raise CallError(f'Path {path} is not a directory', errno.ENOTDIR)
if options.get('count') is True:
# We're just getting count, drop any unnecessary info
request_mask = 0
else:
request_mask = self.listdir_request_mask(options.get('select', None))
# None request_mask means "everything"
if request_mask is None or (request_mask & DirectoryRequestMask.ZFS_ATTRS):
# Make sure this is actually ZFS before issuing FS ioctls
try:
self.get_zfs_attributes(str(path))
except Exception:
raise CallError(f'{path}: ZFS attributes are not supported.')
file_type = None
for filter_ in filters:
if filter_[0] not in ['type']:
continue
if filter_[1] != '=':
continue
if filter_[2] == 'DIRECTORY':
file_type = FileType.DIRECTORY
elif filter_[2] == 'FILE':
file_type = FileType.FILE
else:
continue
if path.absolute() == pathlib.Path('/mnt'):
# sometimes (on failures) the top-level directory
# where the zpool is mounted does not get removed
# after the zpool is exported. WebUI calls this
# specifying `/mnt` as the path. This is used when
# configuring shares in the "Path" drop-down. To
# prevent shares from being configured to point to
# a path that doesn't exist on a zpool, we'll
# filter these here.
filters.extend([['is_mountpoint', '=', True], ['name', '!=', IX_APPS_DIR_NAME]])
with DirectoryIterator(path, file_type=file_type, request_mask=request_mask) as d_iter:
return filter_list(d_iter, filters, options)
@accepts(Str('path'), roles=['FILESYSTEM_ATTRS_READ'])
@returns(Dict(
'path_stats',
Str('realpath', required=True),
Int('size', required=True),
Int('allocation_size', required=True),
Int('mode', required=True),
Int('uid', required=True),
Int('gid', required=True),
Float('atime', required=True),
Float('mtime', required=True),
Float('ctime', required=True),
Float('btime', required=True),
Int('dev', required=True),
Int('mount_id', required=True),
Int('inode', required=True),
Int('nlink', required=True),
Bool('is_mountpoint', required=True),
Bool('is_ctldir', required=True),
List(
'attributes',
required=True,
items=[Str('statx_attribute', enum=[attr.name for attr in stat_x.StatxAttr])]
),
Str('user', null=True, required=True),
Str('group', null=True, required=True),
Bool('acl', required=True),
))
def stat(self, _path):
"""
Return filesystem information for a given path.
`realpath(str)`: absolute real path of the entry (if SYMLINK)
`type(str)`: DIRECTORY | FILE | SYMLINK | OTHER
`size(int)`: size of the entry
`allocation_size(int)`: on-disk size of entry
`mode(int)`: file mode/permission
`uid(int)`: user id of file owner
`gid(int)`: group id of file owner
`atime(float)`: timestamp for when file was last accessed.
NOTE: this timestamp may be changed from userspace.
`mtime(float)`: timestamp for when file data was last modified
NOTE: this timestamp may be changed from userspace.
`ctime(float)`: timestamp for when file was last changed.
`btime(float)`: timestamp for when file was initially created.
NOTE: depending on platform this may be changed from userspace.
`dev(int)`: device id of the device containing the file. In the
context of the TrueNAS API, this is sufficient to uniquely identify
a given dataset.
`mount_id(int)`: the mount id for the filesystem underlying the given path.
Bind mounts will have same device id, but different mount IDs. This value
is sufficient to uniquely identify the particular mount which can be used
to identify children of the given mountpoint.
`inode(int)`: inode number of the file. This number uniquely identifies
the file on the given device, but once a file is deleted its inode number
may be reused.
`nlink(int)`: number of hard lnks to the file.
`acl(bool)`: extended ACL is present on file
`is_mountpoint(bool)`: path is a mountpoint
`is_ctldir(bool)`: path is within special .zfs directory
`attributes(list)`: list of statx file attributes that apply to the
file. See statx(2) manpage for more details.
"""
if path_location(_path) is FSLocation.EXTERNAL:
raise CallError(f'{_path} is external to TrueNAS', errno.EXDEV)
path = pathlib.Path(_path)
if not path.is_absolute():
raise CallError(f'{_path}: path must be absolute', errno.EINVAL)
st = stat_x.statx_entry_impl(path, None)
if st is None:
raise CallError(f'Path {_path} not found', errno.ENOENT)
realpath = path.resolve().as_posix() if st['etype'] == 'SYMLINK' else path.absolute().as_posix()
stat = {
'realpath': realpath,
'type': st['etype'],
'size': st['st'].stx_size,
'allocation_size': st['st'].stx_blocks * 512,
'mode': st['st'].stx_mode,
'uid': st['st'].stx_uid,
'gid': st['st'].stx_gid,
'atime': timespec_convert_float(st['st'].stx_atime),
'mtime': timespec_convert_float(st['st'].stx_mtime),
'ctime': timespec_convert_float(st['st'].stx_ctime),
'btime': timespec_convert_float(st['st'].stx_btime),
'mount_id': st['st'].stx_mnt_id,
'dev': os.makedev(st['st'].stx_dev_major, st['st'].stx_dev_minor),
'inode': st['st'].stx_ino,
'nlink': st['st'].stx_nlink,
'is_mountpoint': 'MOUNT_ROOT' in st['attributes'],
'is_ctldir': st['is_ctldir'],
'attributes': st['attributes']
}
try:
stat['user'] = pwd.getpwuid(stat['uid']).pw_name
except KeyError:
stat['user'] = None
try:
stat['group'] = grp.getgrgid(stat['gid']).gr_name
except KeyError:
stat['group'] = None
stat['acl'] = False if self.acl_is_trivial(_path) else True
return stat
@private
@accepts(
Str('path'),
Str('content', max_length=2048000),
Dict(
'options',
Bool('append', default=False),
Int('mode'),
Int('uid'),
Int('gid'),
),
)
def file_receive(self, path, content, options):
"""
Simplified file receiving method for small files.
`content` must be a base 64 encoded file content.
"""
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, 'ab' if options.get('append') else 'wb+') as f:
f.write(binascii.a2b_base64(content))
if path == PWENC_FILE_SECRET:
# don't allow someone to clobber mode/ownership
os.fchmod(f.fileno(), PWENC_FILE_SECRET_MODE)
os.fchown(f.fileno(), 0, 0)
else:
if mode := options.get('mode'):
os.fchmod(f.fileno(), mode)
# -1 means don't change uid/gid if the one provided is
# the same that is on disk already
os.fchown(f.fileno(), options.get('uid', -1), options.get('gid', -1))
if path == PWENC_FILE_SECRET:
self.middleware.call_sync('pwenc.reset_secret_cache')
return True
@accepts(Str('path'))
@returns()
@job(pipes=["output"])
def get(self, job, path):
"""
Job to get contents of `path`.
"""
if not os.path.isfile(path):
raise CallError(f'{path} is not a file')
with open(path, 'rb') as f:
shutil.copyfileobj(f, job.pipes.output.w)
@accepts(
Str('path'),
Dict(
'options',
Bool('append', default=False),
Int('mode'),
),
)
@returns(Bool('successful_put'))
@job(pipes=["input"])
def put(self, job, path, options):
"""
Job to put contents to `path`.
"""
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
os.makedirs(dirname)
if options.get('append'):
openmode = 'ab'
else:
openmode = 'wb+'
try:
with open(path, openmode) as f:
shutil.copyfileobj(job.pipes.input.r, f)
except PermissionError:
raise CallError(f'Unable to put contents at {path!r} as the path exists on a locked dataset', errno.EINVAL)
mode = options.get('mode')
if mode:
os.chmod(path, mode)
return True
@accepts(Str('path'), roles=['FILESYSTEM_ATTRS_READ'])
@returns(Dict(
'path_statfs',
List('flags', required=True),
List('fsid', required=True),
Str('fstype', required=True),
Str('source', required=True),
Str('dest', required=True),
Int('blocksize', required=True),
Int('total_blocks', required=True),
Int('free_blocks', required=True),
Int('avail_blocks', required=True),
Str('total_blocks_str', required=True),
Str('free_blocks_str', required=True),
Str('avail_blocks_str', required=True),
Int('files', required=True),
Int('free_files', required=True),
Int('name_max', required=True),
Int('total_bytes', required=True),
Int('free_bytes', required=True),
Int('avail_bytes', required=True),
Str('total_bytes_str', required=True),
Str('free_bytes_str', required=True),
Str('avail_bytes_str', required=True),
))
def statfs(self, path):
"""
Return stats from the filesystem of a given path.
Raises:
CallError(ENOENT) - Path not found
"""
if not path.startswith('/mnt/'):
raise CallError('Path must start with "/mnt/"')
elif path == '/mnt/':
raise CallError('Path must include more than "/mnt/"')
try:
fd = os.open(path, os.O_PATH)
try:
st = os.fstatvfs(fd)
mntid = stat_x.statx('', dir_fd=fd, flags=stat_x.ATFlags.EMPTY_PATH.value).stx_mnt_id
finally:
os.close(fd)
except FileNotFoundError:
raise CallError('Path not found.', errno.ENOENT)
mntinfo = getmntinfo(mnt_id=mntid)[mntid]
flags = mntinfo['mount_opts']
for flag in mntinfo['super_opts']:
if flag in flags:
continue
flags.append(flag)
result = {
'flags': flags,
'fstype': mntinfo['fs_type'].lower(),
'source': mntinfo['mount_source'],
'dest': mntinfo['mountpoint'],
'blocksize': st.f_frsize,
'total_blocks': st.f_blocks,
'free_blocks': st.f_bfree,
'avail_blocks': st.f_bavail,
'files': st.f_files,
'free_files': st.f_ffree,
'name_max': st.f_namemax,
'fsid': [str(st.f_fsid)],
'total_bytes': st.f_blocks * st.f_frsize,
'free_bytes': st.f_bfree * st.f_frsize,
'avail_bytes': st.f_bavail * st.f_frsize,
}
for k in ['total_blocks', 'free_blocks', 'avail_blocks', 'total_bytes', 'free_bytes', 'avail_bytes']:
result[f'{k}_str'] = str(result[k])
return result
@accepts(Str('path'), roles=['FILESYSTEM_ATTRS_READ'])
@returns(Bool('paths_acl_is_trivial'))
def acl_is_trivial(self, path):
"""
Returns True if the ACL can be fully expressed as a file mode without losing
any access rules.
"""
if not os.path.exists(path):
raise CallError(f'Path not found [{path}].', errno.ENOENT)
return not acl_is_present(os.listxattr(path))
class FileFollowTailEventSource(EventSource):
"""
Retrieve last `no_of_lines` specified as an integer argument for a specific `path` and then
any new lines as they are added. Specified argument has the format `path:no_of_lines` ( `/var/log/messages:3` ).
`no_of_lines` is optional and if it is not specified it defaults to `3`.
However, `path` is required for this.
"""
def parse_arg(self):
if ':' in self.arg:
path, lines = self.arg.rsplit(':', 1)
lines = int(lines)
else:
path = self.arg
lines = 3
return path, lines
def run_sync(self):
path, lines = self.parse_arg()
if not os.path.exists(path):
# FIXME: Error?
return
bufsize = 8192
fsize = os.stat(path).st_size
if fsize < bufsize:
bufsize = fsize
i = 0
with open(path, encoding='utf-8', errors='ignore') as f:
data = []
while True:
i += 1
if bufsize * i > fsize:
break
f.seek(fsize - bufsize * i)
data.extend(f.readlines())
if len(data) >= lines or f.tell() == 0:
break
self.send_event('ADDED', fields={'data': ''.join(data[-lines:])})
f.seek(fsize)
for data in self._follow_path(path, f):
self.send_event('ADDED', fields={'data': data})
def _follow_path(self, path, f):
queue = []
watch_manager = pyinotify.WatchManager()
notifier = pyinotify.Notifier(watch_manager)
watch_manager.add_watch(path, pyinotify.IN_MODIFY, functools.partial(self._follow_callback, queue, f))
data = f.read()
if data:
yield data
last_sent_at = time.monotonic()
interval = 0.5 # For performance reasons do not send websocket events more than twice a second
while not self._cancel_sync.is_set():
notifier.process_events()
if time.monotonic() - last_sent_at >= interval:
data = "".join(queue)
if data:
yield data
queue[:] = []
last_sent_at = time.monotonic()
if notifier.check_events(timeout=int(interval * 1000)):
notifier.read_events()
notifier.stop()
def _follow_callback(self, queue, f, event):
data = f.read()
if data:
queue.append(data)
def setup(middleware):
middleware.register_event_source('filesystem.file_tail_follow', FileFollowTailEventSource)
| 26,640 | Python | .py | 622 | 32.572347 | 121 | 0.586988 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,464 | ipmi.py | truenas_middleware/src/middlewared/middlewared/plugins/ipmi.py | import os
from middlewared.schema import accepts, returns, Bool
from middlewared.service import Service
class IPMIService(Service):
class Config:
cli_namespace = 'system.ipmi'
@accepts(roles=['READONLY_ADMIN'])
@returns(Bool('ipmi_loaded'))
def is_loaded(self):
"""Returns a boolean value indicating if /dev/ipmi0 is loaded."""
return os.path.exists('/dev/ipmi0')
async def setup(middleware):
if await middleware.call('system.ready') and (await middleware.call('system.dmidecode_info'))['has-ipmi']:
# systemd generates a unit file that doesn't honor presets so when it's started on a system without a
# BMC device, it always reports as a failure which is expected since no IPMI device exists. Instead
# we check to see if dmidecode reports an ipmi device via type "38" of the SMBIOS spec. It's not
# fool-proof but it's the best we got atm.
await middleware.call('service.start', 'openipmi')
| 984 | Python | .py | 18 | 48.611111 | 110 | 0.711157 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,465 | ldap.py | truenas_middleware/src/middlewared/middlewared/plugins/ldap.py | import enum
import errno
import ipaddress
import ldap as pyldap
import os
import socket
import struct
from urllib.parse import urlparse
from middlewared.schema import accepts, returns, Bool, Dict, Int, List, Str, Ref, LDAP_DN
from middlewared.service import job, private, ConfigService, Service, ValidationErrors
from middlewared.service_exception import CallError
import middlewared.sqlalchemy as sa
from middlewared.plugins.ldap_.ldap_client import LdapClient
from middlewared.plugins.ldap_ import constants
from middlewared.utils.directoryservices.constants import DomainJoinResponse, DSStatus, DSType, SSL
from middlewared.utils.directoryservices.ipa_constants import IpaConfigName
from middlewared.utils.directoryservices.krb5_constants import krb5ccache
from middlewared.utils.directoryservices.krb5_error import KRB5Error
from middlewared.validators import Range
class SAMAccountType(enum.Enum):
SAM_DOMAIN_OBJECT = 0x0
SAM_GROUP_OBJECT = 0x10000000
SAM_NON_SECURITY_GROUP_OBJECT = 0x10000001
SAM_ALIAS_OBJECT = 0x20000000
SAM_NON_SECURITY_ALIAS_OBJECT = 0x20000001
SAM_USER_OBJECT = 0x30000000
SAM_NORMAL_USER_ACCOUNT = 0x30000000
SAM_MACHINE_ACCOUNT = 0x30000001
SAM_TRUST_ACCOUNT = 0x30000002
SAM_APP_BASIC_GROUP = 0x40000000
SAM_APP_QUERY_GROUP = 0x40000001
class LDAPClient(Service):
class Config:
private = True
@accepts(Dict(
'ldap-configuration',
List('uri_list', required=True),
Str('bind_type', enum=['ANONYMOUS', 'PLAIN', 'GSSAPI', 'EXTERNAL'], required=True),
LDAP_DN('basedn', required=True),
Dict(
'credentials',
LDAP_DN('binddn', default=''),
Str('bindpw', default='', private=True),
),
Dict(
'security',
Str('ssl', enum=["OFF", "ON", "START_TLS"]),
Str('sasl', enum=['SIGN', 'SEAL'], default='SEAL'),
Str('client_certificate', null=True, default=''),
Bool('validate_certificates', default=True),
),
Dict(
'options',
Int('timeout', default=30, validators=[Range(min_=1, max_=45)]),
Int('dns_timeout', default=5, validators=[Range(min_=1, max_=45)]),
),
register=True,
))
def validate_credentials(self, data):
"""
Verify that credentials are working by closing any existing LDAP bind
and performing a fresh bind.
"""
try:
LdapClient.open(data, True)
except Exception as e:
self._convert_exception(e)
def _name_to_errno(self, ldaperr):
err = errno.EFAULT
if ldaperr == "INVALID_CREDENTIALS":
err = errno.EPERM
elif ldaperr == "NO_SUCH_OBJECT":
err = errno.ENOENT
elif ldaperr == "INVALID_DN_SYNTAX":
err = errno.EINVAL
return err
def _local_error_to_errno(self, info):
err = errno.EFAULT
err_summary = None
if 'Server not found in Kerberos database' in info:
err = errno.ENOENT
err_summary = "KRB5KDC_ERR_C_PRINCIPAL_UNKNOWN"
return (err, err_summary)
def _convert_exception(self, ex):
if issubclass(type(ex), pyldap.LDAPError) and ex.args:
desc = ex.args[0].get('desc')
info = ex.args[0].get('info')
err_str = f"{desc}: {info}" if info else desc
err = self._name_to_errno(type(ex).__name__)
raise CallError(err_str, err, type(ex).__name__)
if issubclass(ex, pyldap.LOCAL_ERROR):
info = ex.args[0].get('info')
err, err_summary = self._local_error_to_errno(info)
raise CallError(info, err, err_summary)
else:
raise CallError(str(ex))
def parse_results(self, results):
res = []
for r in results:
parsed_data = {}
if len(r) > 1 and isinstance(r[1], dict):
for k, v in r[1].items():
try:
v = list(i.decode() for i in v)
except Exception:
v = list(str(i) for i in v)
parsed_data.update({k: v})
res.append({
'dn': r[0],
'data': parsed_data
})
else:
self.logger.debug("Unable to parse results: %s", r)
return res
@accepts(Dict(
'get-root-dse',
Ref('ldap-configuration'),
))
def get_root_dse(self, data):
"""
root DSE query is defined in RFC4512 as a search operation
with an empty baseObject, scope of baseObject, and a filter of
"(objectClass=*)"
In theory this should be accessible with an anonymous bind. In practice,
it's better to use proper auth because configurations can vary wildly.
"""
results = LdapClient.search(
data['ldap-configuration'], '', pyldap.SCOPE_BASE, '(objectclass=*)'
)
return self.parse_results(results)
@accepts(Dict(
'get-dn',
LDAP_DN('dn', default='', null=True),
Str('scope', default='SUBTREE', enum=['BASE', 'SUBTREE']),
Ref('ldap-configuration'),
))
def get_dn(self, data):
results = LdapClient.search(
data['ldap-configuration'],
data['dn'] or data['ldap-configuration']['basedn'],
pyldap.SCOPE_SUBTREE if data['scope'] == 'SUBTREE' else pyldap.SCOPE_BASE,
'(objectclass=*)'
)
return self.parse_results(results)
@accepts()
def close_handle(self):
LdapClient.close()
class LDAPModel(sa.Model):
__tablename__ = 'directoryservice_ldap'
id = sa.Column(sa.Integer(), primary_key=True)
ldap_hostname = sa.Column(sa.String(120))
ldap_basedn = sa.Column(sa.String(120))
ldap_binddn = sa.Column(sa.String(256))
ldap_bindpw = sa.Column(sa.EncryptedText())
ldap_anonbind = sa.Column(sa.Boolean())
ldap_ssl = sa.Column(sa.String(120))
ldap_timeout = sa.Column(sa.Integer())
ldap_dns_timeout = sa.Column(sa.Integer())
ldap_has_samba_schema = sa.Column(sa.Boolean())
ldap_auxiliary_parameters = sa.Column(sa.Text())
ldap_schema = sa.Column(sa.String(120))
ldap_enable = sa.Column(sa.Boolean())
ldap_certificate_id = sa.Column(sa.ForeignKey('system_certificate.id'), index=True, nullable=True)
ldap_kerberos_realm_id = sa.Column(sa.ForeignKey('directoryservice_kerberosrealm.id'), index=True, nullable=True)
ldap_kerberos_principal = sa.Column(sa.String(255))
ldap_validate_certificates = sa.Column(sa.Boolean(), default=True)
ldap_disable_freenas_cache = sa.Column(sa.Boolean())
ldap_base_user = sa.Column(sa.String(256), nullable=True)
ldap_base_group = sa.Column(sa.String(256), nullable=True)
ldap_base_netgroup = sa.Column(sa.String(256), nullable=True)
ldap_user_object_class = sa.Column(sa.String(256), nullable=True)
ldap_user_name = sa.Column(sa.String(256), nullable=True)
ldap_user_uid = sa.Column(sa.String(256), nullable=True)
ldap_user_gid = sa.Column(sa.String(256), nullable=True)
ldap_user_gecos = sa.Column(sa.String(256), nullable=True)
ldap_user_home_directory = sa.Column(sa.String(256), nullable=True)
ldap_user_shell = sa.Column(sa.String(256), nullable=True)
ldap_shadow_object_class = sa.Column(sa.String(256), nullable=True)
ldap_shadow_last_change = sa.Column(sa.String(256), nullable=True)
ldap_shadow_min = sa.Column(sa.String(256), nullable=True)
ldap_shadow_max = sa.Column(sa.String(256), nullable=True)
ldap_shadow_warning = sa.Column(sa.String(256), nullable=True)
ldap_shadow_inactive = sa.Column(sa.String(256), nullable=True)
ldap_shadow_expire = sa.Column(sa.String(256), nullable=True)
ldap_group_object_class = sa.Column(sa.String(256), nullable=True)
ldap_group_gid = sa.Column(sa.String(256), nullable=True)
ldap_group_member = sa.Column(sa.String(256), nullable=True)
ldap_netgroup_object_class = sa.Column(sa.String(256), nullable=True)
ldap_netgroup_member = sa.Column(sa.String(256), nullable=True)
ldap_netgroup_triple = sa.Column(sa.String(256), nullable=True)
ldap_server_type = sa.Column(sa.String(256), nullable=True)
class LDAPService(ConfigService):
class Config:
service = "ldap"
datastore = 'directoryservice.ldap'
datastore_extend = "ldap.ldap_extend"
datastore_prefix = "ldap_"
cli_namespace = "directory_service.ldap"
role_prefix = "DIRECTORY_SERVICE"
ENTRY = Dict(
'ldap_update',
List('hostname', default=None),
LDAP_DN('basedn'),
LDAP_DN('binddn'),
Str('bindpw', private=True),
Bool('anonbind', default=False),
Ref('ldap_ssl_choice', 'ssl'),
Int('certificate', null=True),
Bool('validate_certificates', default=True),
Bool('disable_freenas_cache'),
Int('timeout', default=30),
Int('dns_timeout', default=5),
Int('kerberos_realm', null=True),
Str('kerberos_principal'),
Str('auxiliary_parameters', max_length=None),
Ref('nss_info_ldap', 'schema'),
Bool('enable'),
constants.LDAP_SEARCH_BASES_SCHEMA,
constants.LDAP_ATTRIBUTE_MAP_SCHEMA,
register=True,
)
@private
async def ldap_conf_to_client_config(self, data=None):
if data is None:
data = await self.config()
if not data['enable']:
raise CallError("LDAP directory service is not enabled.")
client_config = {
"uri_list": data["uri_list"],
"basedn": data.get("basedn", ""),
"credentials": {
"binddn": "",
"bindpw": "",
},
"security": {
"ssl": data["ssl"],
"sasl": "SEAL",
"client_certificate": data["cert_name"],
"validate_certificates": data["validate_certificates"],
},
"options": {
"timeout": data["timeout"],
"dns_timeout": data["dns_timeout"],
}
}
if data['anonbind']:
client_config['bind_type'] = 'ANONYMOUS'
elif data['cert_name']:
client_config['bind_type'] = 'EXTERNAL'
elif data['kerberos_realm']:
client_config['bind_type'] = 'GSSAPI'
else:
client_config['bind_type'] = 'PLAIN'
client_config['credentials'] = {
'binddn': data['binddn'],
'bindpw': data['bindpw']
}
return client_config
@private
async def ldap_extend(self, data):
data['hostname'] = data['hostname'].split(',') if data['hostname'] else []
for key in ["ssl", "schema"]:
data[key] = data[key].upper()
if data["certificate"] is not None:
data["cert_name"] = data['certificate']['cert_name']
data["certificate"] = data['certificate']['id']
else:
data["cert_name"] = None
if data["kerberos_realm"] is not None:
data["kerberos_realm"] = data["kerberos_realm"]["id"]
data['uri_list'] = await self.hostnames_to_uris(data)
# The following portion of ldap_extend shifts ldap search base and map
# parameter overrides into their own separate dictionaries
# "search_bases" and "attribute_maps" respectively
data[constants.LDAP_SEARCH_BASES_SCHEMA_NAME] = {}
data[constants.LDAP_ATTRIBUTE_MAP_SCHEMA_NAME] = {
nss_type: {} for nss_type in constants.LDAP_ATTRIBUTE_MAPS.keys()
}
for key in constants.LDAP_SEARCH_BASE_KEYS:
data[constants.LDAP_SEARCH_BASES_SCHEMA_NAME][key] = data.pop(key, None)
for nss_type, keys in constants.LDAP_ATTRIBUTE_MAPS.items():
for key in keys:
data[constants.LDAP_ATTRIBUTE_MAP_SCHEMA_NAME][nss_type][key] = data.pop(key, None)
return data
@private
async def ldap_compress(self, data):
data['hostname'] = ','.join(data['hostname'])
for key in ["ssl", "schema"]:
data[key] = data[key].lower()
data.pop('uri_list')
data.pop('cert_name')
search_bases = data.pop(constants.LDAP_SEARCH_BASES_SCHEMA_NAME, {})
attribute_maps = data.pop(constants.LDAP_ATTRIBUTE_MAP_SCHEMA_NAME, {})
# Flatten the search_bases and attribute_maps prior to DB insertion
for key in constants.LDAP_SEARCH_BASE_KEYS:
data[key] = search_bases.get(key)
for nss_type, keys in constants.LDAP_ATTRIBUTE_MAPS.items():
for key in keys:
data[key] = attribute_maps[nss_type].get(key)
return data
@accepts(roles=['DIRECTORY_SERVICE_READ'])
@returns(List('schema_choices', items=[Ref('nss_info_ldap')]))
async def schema_choices(self):
"""
Returns list of available LDAP schema choices.
"""
return await self.middleware.call('directoryservices.nss_info_choices', 'LDAP')
@accepts(roles=['DIRECTORY_SERVICE_READ'])
@returns(List('ssl_choices', items=[Ref('ldap_ssl_choice', 'ssl')]))
async def ssl_choices(self):
"""
Returns list of SSL choices.
"""
return await self.middleware.call('directoryservices.ssl_choices', 'LDAP')
@private
async def hostnames_to_uris(self, data):
ret = []
for h in data['hostname']:
proto = 'ldaps' if SSL(data['ssl']) == SSL.USESSL else 'ldap'
parsed = urlparse(f"{proto}://{h}")
try:
port = parsed.port
host = parsed.netloc if not parsed.port else parsed.netloc.rsplit(':', 1)[0]
except ValueError:
"""
ParseResult.port will raise a ValueError if the port is not an int
Ignore for now. ValidationError will be raised in common_validate()
"""
host, port = h.rsplit(':', 1)
if port is None:
port = 636 if SSL(data['ssl']) == SSL.USESSL else 389
uri = f"{proto}://{host}:{port}"
ret.append(uri)
return ret
@private
async def common_validate(self, new, old, verrors):
if not new["enable"]:
return
ad_enabled = (await self.middleware.call("activedirectory.config"))['enable']
if ad_enabled:
verrors.add(
"ldap_update.enable",
"LDAP service may not be enabled while Active Directory service is enabled."
)
if new["certificate"]:
verrors.extend(await self.middleware.call(
"certificate.cert_services_validation",
new["certificate"], "ldap_update.certificate", False
))
if not new["bindpw"] and not new["kerberos_principal"] and not new["anonbind"]:
verrors.add(
"ldap_update.binddn",
"Bind credentials or kerberos keytab are required for an authenticated bind."
)
if new["bindpw"] and new["kerberos_principal"]:
new["bindpw"] = ""
if not new["basedn"]:
verrors.add(
"ldap_update.basedn",
"The basedn parameter is required."
)
if not new["hostname"]:
verrors.add(
"ldap_update.hostname",
"The LDAP hostname parameter is required."
)
for idx, uri in enumerate(new["uri_list"]):
parsed = urlparse(uri)
try:
port = parsed.port
except ValueError:
verrors.add(f"ldap_update.hostname.{idx}",
f"Invalid port number: [{port}].")
@private
async def convert_ldap_err_to_verr(self, data, e, verrors):
if e.extra == "INVALID_CREDENTIALS":
verrors.add('ldap_update.binddn',
'Remote LDAP server returned response that '
'credentials are invalid.')
elif e.extra == "STRONG_AUTH_NOT_SUPPORTED" and data['certificate']:
verrors.add('ldap_update.certificate',
'Certificate-based authentication is not '
f'supported by remote LDAP server: {e.errmsg}.')
elif e.extra == "NO_SUCH_OBJECT":
verrors.add('ldap_update.basedn',
'Remote LDAP server returned "NO_SUCH_OBJECT". This may '
'indicate that the base DN is syntactically correct, but does '
'not exist on the server.')
elif e.extra == "INVALID_DN_SYNTAX":
verrors.add('ldap_update.basedn',
'Remote LDAP server returned that the base DN is '
'syntactically invalid.')
elif e.extra == "KRB5KDC_ERR_C_PRINCIPAL_UNKNOWN":
verrors.add('ldap_update.kerberos_principal',
'SASL GSSAPI failed with error (Client not found in kerberos '
'database). This may indicate a misconfiguration in DNS server '
'triggering a failure to validate the kerberos principal via '
'reverse lookup zone. Exact error returned by kerberos library is '
f'as follows: {e.errmsg}')
elif e.extra:
verrors.add('ldap_update', f'[{e.extra}]: {e.errmsg}')
else:
verrors.add('ldap_update', e.errmsg)
@private
async def object_sid_to_string(self, objectsid):
version = struct.unpack('B', objectsid[0:1])[0]
if version != 1:
raise CallError(f"{version}: Invalid SID version")
sid_length = struct.unpack('B', objectsid[1:2])[0]
authority = struct.unpack(b'>Q', b'\x00\x00' + objectsid[2:8])[0]
objectsid = objectsid[8:]
if len(objectsid) != 4 * sid_length:
raise CallError("Invalid SID length")
output_sid = f'S-{version}-{authority}'
for v in struct.iter_unpack('<L', objectsid):
output_sid += f'-{v[0]}'
return output_sid
@private
async def ldap_validate(self, old, data, verrors):
for idx, h in enumerate(data['uri_list']):
host, port = urlparse(h).netloc.rsplit(':', 1)
try:
await self.middleware.call('ldap.port_is_listening', host, int(port), data['dns_timeout'])
except Exception as e:
verrors.add(
f'ldap_update.hostname.{idx}',
f'Failed to open socket to remote LDAP server: {e}'
)
return
try:
await self.validate_credentials(data)
except CallError as e:
await self.convert_ldap_err_to_verr(data, e, verrors)
return
if not set(old['hostname']) & set(data['hostname']):
# No overlap between old and new hostnames and so force server_type autodetection
data['server_type'] = None
if not data['server_type'] and data['enable']:
data['server_type'] = await self.autodetect_ldap_settings(data)
if data['server_type'] == constants.SERVER_TYPE_ACTIVE_DIRECTORY:
verrors.add(
'ldap_update.hostname',
'Active Directory plugin must be used to join Active Directory domains.'
)
@private
async def autodetect_ldap_settings(self, data):
"""
The root dse on remote LDAP server contains basic LDAP configuration information.
By the time this method is called we have already been able to complete an LDAP
bind with the provided credentials.
This method provides basic LDAP server implementation specific configuration
parameters that can later be fine-tuned by the admin if they are undesired.
"""
rootdse = (await self.middleware.call('ldap.get_root_DSE', data))[0]['data']
if 'vendorName' in rootdse:
"""
FreeIPA domain. For now assume in this case that vendorName will
be 389 project.
"""
if rootdse['vendorName'][0] != '389 Project':
self.logger.debug(
'%s: unrecognized vendor name, setting LDAP server type to GENERIC',
rootdse['vendorName'][0]
)
return constants.SERVER_TYPE_GENERIC
default_naming_context = rootdse['defaultnamingcontext'][0]
data.update({'schema': 'RFC2307BIS'})
bases = data[constants.LDAP_SEARCH_BASES_SCHEMA_NAME]
bases[constants.SEARCH_BASE_USER] = f'cn=users,cn=accounts,{default_naming_context}'
bases[constants.SEARCH_BASE_GROUP] = f'cn=groups,cn=accounts,{default_naming_context}'
bases[constants.SEARCH_BASE_NETGROUP] = f'cn=ng,cn=compat,{default_naming_context}'
return constants.SERVER_TYPE_FREEIPA
elif 'domainControllerFunctionality' in rootdse:
"""
ActiveDirectory domain.
"""
return constants.SERVER_TYPE_ACTIVE_DIRECTORY
elif 'objectClass' in rootdse:
"""
OpenLDAP
"""
if 'OpenLDAProotDSE' not in rootdse['objectClass']:
self.logger.debug(
'%s: unexpected objectClass values in LDAP root DSE',
rootdse['objectClass']
)
return constants.SERVER_TYPE_GENERIC
return constants.SERVER_TYPE_OPENLDAP
return constants.SERVER_TYPE_GENERIC
@accepts(Ref('ldap_update'), audit='LDAP configuration update')
@job(lock="ldap_start_stop")
async def do_update(self, job, data):
"""
`hostname` list of ip addresses or hostnames of LDAP servers with
which to communicate in order of preference. Failover only occurs
if the current LDAP server is unresponsive.
`basedn` specifies the default base DN to use when performing ldap
operations. The base must be specified as a Distinguished Name in LDAP
format.
`binddn` specifies the default bind DN to use when performing ldap
operations. The bind DN must be specified as a Distinguished Name in
LDAP format.
`anonbind` use anonymous authentication.
`ssl` establish SSL/TLS-protected connections to the LDAP server(s).
GSSAPI signing is disabled on SSL/TLS-protected connections if
kerberos authentication is used.
`certificate` LDAPs client certificate to be used for certificate-
based authentication.
`validate_certificates` specifies whether to perform checks on server
certificates in a TLS session. If enabled, TLS_REQCERT demand is set.
The server certificate is requested. If no certificate is provided or
if a bad certificate is provided, the session is immediately terminated.
If disabled, TLS_REQCERT allow is set. The server certificate is
requested, but all errors are ignored.
`kerberos_realm` in which the server is located. This parameter is
only required for SASL GSSAPI authentication to the remote LDAP server.
`kerberos_principal` kerberos principal to use for SASL GSSAPI
authentication to the remote server. If `kerberos_realm` is specified
without a keytab, then the `binddn` and `bindpw` are used to
perform to obtain the ticket necessary for GSSAPI authentication.
`timeout` specifies a timeout (in seconds) after which calls to
synchronous LDAP APIs will abort if no response is received.
`dns_timeout` specifies the timeout (in seconds) after which the
poll(2)/select(2) following a connect(2) returns in case of no activity
for openldap. For nslcd this specifies the time limit (in seconds) to
use when connecting to the directory server. This directly impacts the
length of time that the LDAP service tries before failing over to
a secondary LDAP URI.
The following are advanced settings are configuration parameters for
handling LDAP servers that do not fully comply with RFC-2307. In most
situations all of the following parameters should be set to null,
which indicates to backend to use default for the specified NSS info
schema.
`search_bases` - these parameters allow specifying a non-standard
search base for users (`base_user`), groups (`base_group`), and
netgroups (`base_netgroup`). Must be a valid LDAP DN. No remote
validation is performed that the search base exists or contains
expected objects.
`attribute_maps` - allow specifying alternate non-RFC-compliant
attribute names for `passwd`, `shadow`, `group`, and `netgroup` object
classes as specified in RFC 2307. Setting key to `null` has special
meaning that RFC defaults for the configure `nss_info_schema` will
be used.
`server_type` is a readonly key indicating the server_type detected
internally by TrueNAS. Value will be set to one of the following:
`ACTIVE_DIRECTORY`, `FREEIPA`, `GENERIC`, and `OPENLDAP`. Generic
is default if TrueNAS is unable to determine LDAP server type via
information in the LDAP root DSE.
"""
verrors = ValidationErrors()
must_reload = False
old = await self.config()
new = old.copy()
new_search_bases = data.pop(constants.LDAP_SEARCH_BASES_SCHEMA_NAME, {})
new_attributes = data.pop(constants.LDAP_ATTRIBUTE_MAP_SCHEMA_NAME, {})
if data['hostname'] is None:
del data['hostname']
new.update(data)
new[constants.LDAP_SEARCH_BASES_SCHEMA_NAME] | new_search_bases
for nss_type in constants.LDAP_ATTRIBUTE_MAPS.keys():
new[constants.LDAP_ATTRIBUTE_MAP_SCHEMA_NAME][nss_type] | new_attributes.get(nss_type, {})
new['uri_list'] = await self.hostnames_to_uris(new)
await self.common_validate(new, old, verrors)
verrors.check()
if data.get('certificate') and data['certificate'] != old['certificate']:
new_cert = await self.middleware.call('certificate.query',
[('id', '=', data['certificate'])],
{'get': True})
new['cert_name'] = new_cert['name']
if old != new:
must_reload = True
if new['enable']:
await self.ldap_validate(old, new, verrors)
verrors.check()
await self.ldap_compress(new)
await self.middleware.call('datastore.update', self._config.datastore, new['id'], new, {'prefix': 'ldap_'})
ds_type = DSType.IPA if new['server_type'] == constants.SERVER_TYPE_FREEIPA else DSType.LDAP
if must_reload:
try:
if new['enable']:
await self.__start(job, ds_type)
else:
await self.__stop(job, ds_type)
except Exception:
# Failed during configuration change. Make sure we fail safe.
await self.middleware.call(
'datastore.update', self._config.datastore, new['id'],
{'enable': False}, {'prefix': 'ldap_'}
)
await self.middleware.call(
'directoryservices.health.set_state',
ds_type.value, DSStatus.DISABLED.name
)
for etc_file in ds_type.etc_files:
await self.middleware.call('etc.generate', etc_file)
raise
return await self.config()
@private
def port_is_listening(self, host, port, timeout=1):
ret = False
try:
ipaddress.IPv6Address(host)
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
except ipaddress.AddressValueError:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if timeout:
s.settimeout(timeout)
try:
s.connect((host, port))
ret = True
except Exception as e:
raise CallError(e)
finally:
s.close()
return ret
@private
async def kinit(self, ldap_config):
if await self.middleware.call(
'kerberos.check_ticket',
{'ccache': krb5ccache.SYSTEM.name},
False
):
return
payload = {
'dstype': DSType.LDAP.value,
'conf': {
'binddn': ldap_config.get('binddn', ''),
'bindpw': ldap_config.get('bindpw', ''),
'kerberos_realm': ldap_config.get('kerberos_realm', ''),
'kerberos_principal': ldap_config.get('kerberos_principal', ''),
}
}
cred = await self.middleware.call('kerberos.get_cred', payload)
await self.middleware.call('kerberos.do_kinit', {'krb5_cred': cred})
@private
async def validate_credentials(self, ldap_config=None):
"""
This method validates that user-supplied credentials can be used to
successfully perform a bind to the specified LDAP server. If bind is
using GSSAPI, then we must first kinit.
"""
client_conf = await self.ldap_conf_to_client_config(ldap_config)
if client_conf['bind_type'] == 'GSSAPI':
await self.kinit(ldap_config)
await self.middleware.call('ldapclient.validate_credentials', client_conf)
@private
async def get_root_DSE(self, ldap_config=None):
"""
root DSE is defined in RFC4512, and must include the following:
`namingContexts` naming contexts held in the LDAP sever
`subschemaSubentry` subschema entries known by the LDAP server
`altServer` alternative servers in case this one is unavailable
`supportedExtension` list of supported extended operations
`supportedControl` list of supported controls
`supportedSASLMechnaisms` recognized Simple Authentication and Security layers
(SASL) [RFC4422] mechanisms.
`supportedLDAPVersion` LDAP versions implemented by the LDAP server
In practice, this full data is not returned from many LDAP servers
"""
client_conf = await self.ldap_conf_to_client_config(ldap_config)
return await self.middleware.call('ldapclient.get_root_dse', {"ldap-configuration": client_conf})
@private
async def get_dn(self, dn=None, scope=None, ldap_config=None):
"""
Outputs contents of specified DN in JSON. By default will target the basedn.
"""
client_conf = await self.ldap_conf_to_client_config(ldap_config)
payload = {
"dn": dn,
"ldap-configuration": client_conf,
}
if scope:
payload['scope'] = scope
return await self.middleware.call('ldapclient.get_dn', payload)
@private
def create_sssd_dirs(self):
os.makedirs('/var/run/sssd-cache/mc', mode=0o755, exist_ok=True)
os.makedirs('/var/run/sssd-cache/db', mode=0o755, exist_ok=True)
@private
async def ipa_config(self, conf=None):
"""
Private method to convert our LDAP datstore config to IPA config. This is
temporary solution until we can refactor AD + LDAP + IPA into a single
"directoryservices" plugin
"""
if conf is None:
conf = await self.config()
if conf['server_type'] != constants.SERVER_TYPE_FREEIPA:
raise CallError('not an IPA domain')
nc = await self.middleware.call('network.configuration.config')
if conf['kerberos_realm']:
realm = (await self.middleware.call(
'kerberos.realm.query', [['id', '=', conf['kerberos_realm']]], {'get': True}
))['realm']
elif conf['basedn']:
# No realm in ldap config and so we need to guess at it
realm = '.'.join(
[x.strip().strip('dc=') for x in conf['basedn'].split(',')]
).upper()
else:
raise CallError('Unable to determine kerberos realm')
if nc['domain'] != 'local':
domain = nc['domain']
else:
domain = realm.lower()
await self.middleware.call('network.configuration.update', {'domain': domain})
if 'hostname_virtual' in nc:
hostname = nc['hostname_virtual']
else:
hostname = nc['hostname']
if hostname == 'truenas':
raise CallError('Hostname should be changed from default value prior to joining IPA domain')
if (await self.middleware.call('smb.config'))['netbiosname'] == 'truenas':
# first try setting our netbiosname to match hostname
# Unfortunately hostnames are more permissive than netbios names and so
# there is some chance this will fail
try:
await self.middleware.call('smb.update', {'netbiosname': hostname})
except Exception:
self.logger.warning('%: failed to update netbiosname', hostname, exc_info=True)
raise CallError('SMB netbios name should be changed from default value prior to joining IPA domain')
username = conf['binddn'].split(',')[0].split('=')[1]
return {
'realm': realm,
'domain': domain,
'basedn': conf['basedn'],
'host': f'{nc["hostname"].lower()}.{realm.lower()}',
'target_server': conf['hostname'][0],
'username': username
}
@private
async def has_ipa_host_keytab(self):
return bool(await self.middleware.call(
'kerberos.keytab.query',
[['name', '=', IpaConfigName.IPA_HOST_KEYTAB.value]],
{'count': True}
))
@private
async def ipa_kinit(self, ipa_conf, bindpw):
princ = f'{ipa_conf["username"]}@{ipa_conf["realm"]}'
await self.middleware.call('kerberos.do_kinit', {
'krb5_cred': {
'username': princ,
'password': bindpw
},
'kinit-options': {
'kdc_override': {
'domain': ipa_conf['realm'],
'kdc': ipa_conf['target_server'],
'libdefaults_aux': [
'udp_preference_limit=0',
]
}
}
})
@private
async def __start(self, job, ds_type):
"""
This is the private start method for the LDAP / IPA directory service
If it successfully completes then cache will be built and SSSD configured and running. On failure
the directory service will be disabled.
"""
job.set_progress(0, 'Preparing to configure LDAP directory service.')
await self.middleware.call('directoryservices.health.set_state', ds_type.value, DSStatus.JOINING.name)
ldap = await self.config()
await self.middleware.call('ldap.create_sssd_dirs')
dom_join_resp = DomainJoinResponse.ALREADY_JOINED.value
# If user has an IPA host keytab then we assume that we're properly joined to IPA
if ds_type is DSType.IPA and not await self.has_ipa_host_keytab():
ipa_config = await self.ipa_config(ldap)
try:
await self.ipa_kinit(ipa_config, ldap['bindpw'])
dom_join_resp = await job.wrap(await self.middleware.call(
'directoryservices.connection.join_domain', 'IPA', ipa_config['domain']
))
await self.middleware.call('alert.oneshot_delete', 'IPALegacyConfiguration')
except KRB5Error as err:
# Kerberos error means we most likely have are an IPA client that is using legacy LDAP client
# compatibilty in FreeIPA (which is what we used in 24.04) and does not have server properly
# configured to join IPA domain
await self.middleware.call(
'alert.oneshot_create',
'IPALegacyConfiguration',
{'errmsg': str(err)}
)
# switch over to LDAP for our status updates and reporting
ds_type = DSType.LDAP
await self.middleware.call('directoryservices.health.set_state', ds_type.value, DSStatus.JOINING.name)
except CallError as err:
# We may have a kerberos error encapsulated in CallError due to translation from job results
# In this case we also want to fall back to using legacy LDAP client compatibility.
# We will expand this whitelist as we determine there are more somewhat-recoverable KRB5 errors.
if not err.err_msg.startswith('[KRB5_REALM_UNKNOWN]'):
raise err
await self.middleware.call(
'alert.oneshot_create',
'IPALegacyConfiguration',
{'errmsg': str(err)}
)
# switch over to LDAP for our status updates and reporting
ds_type = DSType.LDAP
await self.middleware.call('directoryservices.health.set_state', ds_type.value, DSStatus.JOINING.name)
# We activate the IPA service while performing a domain join and so we should avoid
# going thorugh the activation routine a second time
match dom_join_resp:
case DomainJoinResponse.PERFORMED_JOIN.value:
# Change state to HEALTHY before performing final health check
# We must be HEALTHY priory to adding privileges otherwise attempt will fail
await self.middleware.call('directoryservices.health.set_state', ds_type.value, DSStatus.HEALTHY.name)
await self.middleware.call('directoryservices.health.check')
await self.middleware.call(
'directoryservices.connection.grant_privileges',
DSType.IPA.value, ipa_config['domain']
)
case DomainJoinResponse.ALREADY_JOINED.value:
cache_job_id = await self.middleware.call('directoryservices.connection.activate')
await job.wrap(await self.middleware.call('core.job_wait', cache_job_id))
# Change state to HEALTHY before performing final health check
await self.middleware.call('directoryservices.health.set_state', ds_type.value, DSStatus.HEALTHY.name)
# Force health check so that user gets immediate feedback if something
# went sideways while enabling
await self.middleware.call('directoryservices.health.check')
case _:
raise CallError(f'{dom_join_resp}: unexpected domain join response')
job.set_progress(100, 'LDAP directory service started.')
@private
async def __stop(self, job, ds_type):
job.set_progress(0, 'Preparing to stop LDAP directory service.')
await self.middleware.call('directoryservices.health.set_state', ds_type.value, DSStatus.DISABLED.name)
await self.middleware.call('service.stop', 'sssd')
for etc_file in ds_type.etc_files:
await self.middleware.call('etc.generate', etc_file)
await self.middleware.call('directoryservices.cache.abort_refresh')
await self.middleware.call('alert.oneshot_delete', 'IPALegacyConfiguration')
if await self.middleware.call(
'kerberos.check_ticket',
{'ccache': krb5ccache.SYSTEM.name},
False
):
await self.middleware.call('kerberos.kdestroy')
job.set_progress(100, 'LDAP directory service stopped.')
| 40,310 | Python | .py | 841 | 36.675386 | 118 | 0.604008 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,466 | failover.py | truenas_middleware/src/middlewared/middlewared/plugins/failover.py | import asyncio
import base64
import errno
import json
import itertools
import logging
import os
import shutil
import stat
import time
from functools import partial
from middlewared.auth import TrueNasNodeSessionManagerCredentials
from middlewared.schema import accepts, Bool, Dict, Int, List, NOT_PROVIDED, Str, returns, Patch
from middlewared.service import (
job, no_auth_required, no_authz_required, pass_app, private, CallError, ConfigService,
ValidationError, ValidationErrors
)
import middlewared.sqlalchemy as sa
from middlewared.plugins.auth import AuthService
from middlewared.plugins.config import FREENAS_DATABASE
from middlewared.plugins.failover_.zpool_cachefile import ZPOOL_CACHE_FILE, ZPOOL_CACHE_FILE_OVERWRITE
from middlewared.plugins.failover_.configure import HA_LICENSE_CACHE_KEY
from middlewared.plugins.failover_.remote import NETWORK_ERRORS
from middlewared.plugins.system.reboot import RebootReason
from middlewared.plugins.update import SYSTEM_UPGRADE_REBOOT_REASON
from middlewared.plugins.update_.install import STARTING_INSTALLER
from middlewared.plugins.update_.utils import DOWNLOAD_UPDATE_FILE
from middlewared.plugins.update_.utils_linux import mount_update
from middlewared.utils.contextlib import asyncnullcontext
ENCRYPTION_CACHE_LOCK = asyncio.Lock()
logger = logging.getLogger('failover')
class FailoverModel(sa.Model):
__tablename__ = 'system_failover'
id = sa.Column(sa.Integer(), primary_key=True)
disabled = sa.Column(sa.Boolean(), default=False)
master_node = sa.Column(sa.String(1))
timeout = sa.Column(sa.Integer(), default=0)
class FailoverService(ConfigService):
HA_MODE = None
LAST_STATUS = None
LAST_DISABLEDREASONS = None
class Config:
datastore = 'system.failover'
datastore_extend = 'failover.failover_extend'
cli_private = True
role_prefix = 'FAILOVER'
ENTRY = Dict(
'failover_entry',
Int('id', required=True),
Bool('disabled', required=True),
Int('timeout', required=True),
Bool('master', required=True),
)
@private
async def failover_extend(self, data):
data['master'] = await self.middleware.call('failover.node') == data.pop('master_node')
return data
@accepts(Patch(
'failover_entry', 'failover_update',
('edit', {'name': 'master', 'method': lambda x: setattr(x, 'null', True)}),
('rm', {'name': 'id'}),
('attr', {'update': True}),
), audit='Failover config update')
async def do_update(self, data):
"""
Update failover state.
`disabled` When true indicates that HA will be disabled.
`master` Marks the particular node in the chassis as the master node.
The standby node will have the opposite value.
`timeout` is the time to WAIT until a failover occurs when a network
event occurs on an interface that is marked critical for failover AND
HA is enabled and working appropriately.
The default time to wait is 2 seconds.
**NOTE**
This setting does NOT effect the `disabled` or `master` parameters.
"""
master = data.pop('master', NOT_PROVIDED)
old = await self.middleware.call('datastore.config', 'system.failover')
new = old.copy()
new.update(data)
if master is not NOT_PROVIDED:
# The node making the call is the one we want to make MASTER by default
new['master_node'] = await self.middleware.call('failover.node')
else:
new['master_node'] = await self._master_node(master)
verrors = ValidationErrors()
if new['disabled'] is False:
if not await self.middleware.call('interface.query', [('failover_critical', '=', True)]):
verrors.add(
'failover_update.disabled',
'You need at least one critical interface to enable failover.',
)
verrors.check()
await self.middleware.call('datastore.update', 'system.failover', new['id'], new)
if new['disabled']:
if new['master_node'] == await self.middleware.call('failover.node'):
await self.middleware.call('failover.force_master')
else:
await self.middleware.call('failover.call_remote', 'failover.force_master')
return await self.config()
async def _master_node(self, master):
node = await self.middleware.call('failover.node')
if node == 'A':
if master:
return 'A'
else:
return 'B'
elif node == 'B':
if master:
return 'B'
else:
return 'A'
else:
raise CallError('Unable to change node state in MANUAL mode')
@no_authz_required
@accepts()
@returns(Bool())
def licensed(self):
"""Checks whether this instance is licensed as a HA unit"""
try:
is_ha = self.middleware.call_sync('cache.get', HA_LICENSE_CACHE_KEY)
except KeyError:
is_ha = False
if (info := self.middleware.call_sync('system.license')) is not None and info['system_serial_ha']:
is_ha = True
self.middleware.call_sync('cache.put', HA_LICENSE_CACHE_KEY, is_ha)
return is_ha
@private
async def ha_mode(self):
# update the class attribute so that all instances
# of this class see the correct value
if FailoverService.HA_MODE is None:
FailoverService.HA_MODE = await self.middleware.call(
'failover.enclosure.detect'
)
return FailoverService.HA_MODE
@accepts(roles=['FAILOVER_READ'])
@returns(Str())
async def hardware(self):
"""
Returns the hardware type for an HA system.
ECHOSTREAM (z-series)
ECHOWARP (m-series)
LAJOLLA2 (f-series)
SUBLIGHT (h-series)
PUMA (x-series)
BHYVE (HA VMs for CI)
IXKVM (HA VMs (on KVM) for CI)
MANUAL (everything else)
"""
return (await self.middleware.call('failover.ha_mode'))[0]
@accepts(roles=['FAILOVER_READ'])
@returns(Str())
async def node(self):
"""
Returns the slot position in the chassis that
the controller is located.
A - First node
B - Seconde Node
MANUAL - slot position in chassis could not be determined
"""
return (await self.middleware.call('failover.ha_mode'))[1]
@private
@accepts()
@returns(List(Str('interface')))
async def internal_interfaces(self):
"""
This is a p2p ethernet connection on HA systems.
"""
return await self.middleware.call('failover.internal_interface.detect')
@no_auth_required
@accepts()
@returns(Str())
@pass_app(rest=True)
async def status(self, app):
"""
Get the current HA status.
Returns:
MASTER
BACKUP
ELECTING
IMPORTING
ERROR
SINGLE
"""
status = await self._status(app)
if status != self.LAST_STATUS:
self.LAST_STATUS = status
self.middleware.send_event('failover.status', 'CHANGED', fields={'status': status})
return status
async def _status(self, app):
try:
status = await self.middleware.call('cache.get', 'failover_status')
except KeyError:
status = await self.middleware.call('failover.status.get_local', app)
if status:
await self.middleware.call('cache.put', 'failover_status', status, 300)
if status:
return status
try:
# timeout of 5 seconds is necessary here since this could be called
# when the other node has been forcefully rebooted so the websocket
# connection is "up" but the default TCP window hasn't elapsed so
# the connection remains alive. Without the timeout, this could take
# 20+ seconds to return which is unacceptable during a failover event.
remote_imported = await self.middleware.call(
'failover.call_remote', 'zfs.pool.query_imported_fast', [], {'timeout': 5}
)
if len(remote_imported) <= 1:
# getting here means we dont have a pool and neither does remote node
return 'ERROR'
else:
# Other node has the pool (excluding boot pool)
return 'BACKUP'
except Exception as e:
# Anything other than ClientException is unexpected and should be logged
if not isinstance(e, CallError):
self.logger.warning('Failed checking failover status', exc_info=True)
return 'UNKNOWN'
@private
async def status_refresh(self):
await self.middleware.call('cache.pop', 'failover_status')
# Kick a new status so it may be ready on next user call
await self.middleware.call('failover.status')
await self.middleware.call('failover.disabled.reasons')
@accepts(roles=['FAILOVER_READ'])
@returns(Bool())
def in_progress(self):
"""
Returns True if there is an ongoing failover event.
"""
event = self.middleware.call_sync(
'core.get_jobs', [
('method', 'in', [
'failover.events.vrrp_master',
'failover.events.vrrp_backup'
]),
('state', 'in', ('RUNNING', 'WAITING')),
]
)
return bool(event)
@no_auth_required
@accepts()
@returns(List('ips', items=[Str('ip')]))
@pass_app(rest=True)
async def get_ips(self, app):
"""Get a list of IPs for which the webUI can be accessed."""
return await self.middleware.call('system.general.get_ui_urls')
@accepts(audit='Failover become passive')
@returns()
def become_passive(self):
"""
This method is only called manually by the end-user so we fully expect that they
know what they're doing. Furthermore, this method will only run if failover has NOT
been administratively disabled. The reason why we only allow this in that scenario
is because the failover logic (on the other node) will ignore any failover "event"
that comes in if failover has been administratively disabled. This immediately causes
the HA system to go into a "faulted" state because the other node will get the VIPs
but it will not import the zpool and it will not start fenced. Only way out of that
situation is to manually fix things (import zpool, migrate VIPs, start fenced, etc).
NOTE: The only "safe" way to "become passive" is to use the STCNITH method (similar to STONITH).
(i.e. Shoot The Current Node In The Head)
This ensures that the current node gets out of the way _completely_ so there is no chance
of the zpool being imported at the same time on both nodes (which can ultimately end in data corruption).
"""
if self.middleware.call_sync('failover.config')['disabled'] is True:
raise ValidationError('failover.become_passive', 'Failover must be enabled.')
else:
try:
# have to enable the "magic" sysrq triggers
with open('/proc/sys/kernel/sysrq', 'w') as f:
f.write('1')
# now violently reboot
with open('/proc/sysrq-trigger', 'w') as f:
f.write('b')
except Exception:
# yeah...this isn't good
self.logger.error('Unexpected failure in failover.become_passive', exc_info=True)
finally:
# this shouldn't be reached but better safe than sorry
os.system('shutdown -r now')
@accepts(roles=['FAILOVER_WRITE'], audit='Failover force master')
@returns(Bool())
async def force_master(self):
"""
Force this controller to become MASTER, if it's not already.
"""
if not await self.middleware.call('system.is_enterprise'):
return False
if await self.middleware.call('failover.status') == 'MASTER':
return False
crit_ints = [i for i in await self.middleware.call('interface.query') if i.get('failover_critical', False)]
if crit_ints:
await self.middleware.call('failover.events.event', crit_ints[0]['name'], 'forcetakeover')
return True
else:
# if there are no interfaces marked critical for failover and this method was
# still called, then we can at least start fenced to reserve the disks
rc = await self.middleware.call('failover.fenced.start', True)
return not rc if rc != 6 else bool(rc) # 6 means already running
@accepts(Dict(
'options',
Bool('reboot', default=False),
), roles=['FAILOVER_WRITE'])
@returns()
def sync_to_peer(self, options):
"""
Sync database and files to the other controller.
`reboot` as true will reboot the other controller after syncing.
"""
standby = ' standby controller.'
self.logger.debug('Pulling system dataset UUID from' + standby)
self.middleware.call_sync('systemdataset.ensure_standby_uuid')
self.logger.debug('Syncing database to' + standby)
self.middleware.call_sync('failover.datastore.send')
self.logger.debug('Syncing cached encryption keys to' + standby)
self.middleware.call_sync('failover.sync_keys_to_remote_node')
self.logger.debug('Syncing zpool cachefile, license, pwenc and authorized_keys files to' + standby)
self.send_small_file('/data/license')
self.send_small_file('/data/pwenc_secret')
self.send_small_file('/home/admin/.ssh/authorized_keys')
self.send_small_file('/home/truenas_admin/.ssh/authorized_keys')
self.send_small_file('/root/.ssh/authorized_keys')
self.send_small_file(ZPOOL_CACHE_FILE, ZPOOL_CACHE_FILE_OVERWRITE)
self.middleware.call_sync('failover.call_remote', 'failover.zpool.cachefile.setup', ['SYNC'])
self.middleware.call_sync(
'failover.call_remote', 'core.call_hook', ['config.on_upload', [FREENAS_DATABASE]],
)
# need to make sure the license information is updated on the standby node since
# it's cached in memory
_prev = self.middleware.call_sync('system.product_type')
self.middleware.call_sync(
'failover.call_remote', 'core.call_hook', ['system.post_license_update', [_prev]]
)
if options['reboot']:
self.middleware.call_sync('failover.call_remote', 'system.reboot', ['Failover sync to peer', {'delay': 2}])
@accepts(roles=['FAILOVER_WRITE'])
@returns()
def sync_from_peer(self):
"""
Sync database and files from the other controller.
"""
self.middleware.call_sync('failover.call_remote', 'failover.sync_to_peer')
@private
def send_small_file(self, path, dest=None):
try:
with open(path, 'rb') as f:
st = os.fstat(f.fileno())
if not stat.S_ISREG(st.st_mode):
raise CallError(f'{path!r} must be a regular file')
first = True
dest = path if dest is None else dest
opts = {'mode': st.st_mode, 'uid': st.st_uid, 'gid': st.st_gid}
while True:
read = f.read(1024 * 1024 * 10)
if not read:
break
opts.update({'append': not first})
self.middleware.call_sync(
'failover.call_remote',
'filesystem.file_receive',
[dest, base64.b64encode(read).decode(), opts]
)
first = False
except FileNotFoundError:
return
@private
async def get_disks_local(self):
try:
lbd = await self.middleware.call('boot.get_disks')
return [
serial for disk, serial in (await self.middleware.call('device.get_disks', False, True)).items()
if disk not in lbd
]
except Exception:
self.logger.error('Unhandled exception in get_disks_local', exc_info=True)
@private
async def mismatch_nics(self):
"""Determine if NICs match between both controllers."""
result = {'missing_local': list(), 'missing_remote': list()}
try:
local_nics = await self.middleware.call('interface.query', [], {'extra': {'retrieve_names_only': True}})
local_nics = set(i['name'] for i in local_nics)
except Exception:
self.logger.error('Unhandled exception querying ifaces on local controller', exc_info=True)
return result
try:
remote_nics = await self.middleware.call(
'failover.call_remote', 'interface.query', [[], {'extra': {'retrieve_names_only': True}}],
{'raise_connect_error': False, 'timeout': 2, 'connect_timeout': 2}
)
except Exception:
self.logger.error('Unhandled exception querying ifaces on remote controller', exc_info=True)
else:
if remote_nics is not None:
remote_nics = set(i['name'] for i in remote_nics)
result['missing_local'] = sorted(remote_nics - local_nics)
result['missing_remote'] = sorted(local_nics - remote_nics)
return result
@private
async def mismatch_disks(self):
"""On HA systems, the block device names can be different between the controllers.
Because of this fact, we need to check the serials of each disk which should be the
same between the controllers.
"""
result = {'missing_local': list(), 'missing_remote': list()}
if (ld := await self.get_disks_local()) is not None:
try:
rd = await self.middleware.call(
'failover.call_remote', 'failover.get_disks_local', [],
{'raise_connect_error': False, 'timeout': 2, 'connect_timeout': 2}
)
except Exception:
self.logger.error('Unhandled exception in get_disks_local on remote controller', exc_info=True)
else:
if rd is not None:
result['missing_local'] = sorted(set(rd) - set(ld))
result['missing_remote'] = sorted(set(ld) - set(rd))
return result
@accepts(Dict(
'options',
List(
'pools', items=[
Dict(
'pool_keys',
Str('name', required=True),
Str('passphrase', required=True)
)
],
),
List(
'datasets', items=[
Dict(
'dataset_keys',
Str('name', required=True),
Str('passphrase', required=True),
)
],
),
))
@returns(Bool())
async def unlock(self, options):
"""
Unlock datasets in HA, syncing passphrase between controllers and forcing this controller
to be MASTER importing the pools.
"""
if options['pools'] or options['datasets']:
await self.middleware.call(
'failover.update_encryption_keys', {
'pools': options['pools'],
'datasets': options['datasets'],
},
)
return await self.middleware.call('failover.force_master')
@private
@accepts(
Str('pool_name'),
)
@returns(Dict(
List('unlocked', items=[Str('dataset')], required=True),
Dict(
'failed',
required=True,
additional_attrs=True,
example={'vol1/enc': {'error': 'Invalid Key', 'skipped': []}},
),
))
@job(lock=lambda args: f'failover_dataset_unlock_{args[0]}')
async def unlock_zfs_datasets(self, job, pool_name):
# Unnlock all (if any) zfs datasets for `pool_name`
# that we have keys for in the cache or the database.
zfs_keys = [
{'name': name, 'passphrase': passphrase}
for name, passphrase in (await self.encryption_keys())['zfs'].items()
if name == pool_name or name.startswith(f'{pool_name}/')
]
unlock_job = await self.middleware.call(
'pool.dataset.unlock', pool_name, {
'recursive': True,
'datasets': zfs_keys,
# Do not waste time handling attachments, failover process will restart services and regenerate configs
# for us
'toggle_attachments': False,
}
)
return await job.wrap(unlock_job)
@private
@accepts()
async def encryption_keys(self):
# TODO: remove GELI key since it's
# not supported in SCALE
return await self.middleware.call(
'cache.get_or_put', 'failover_encryption_keys', 0, lambda: {'geli': {}, 'zfs': {}}
)
@private
@accepts(
Dict(
'update_encryption_keys',
Bool('sync_keys', default=True),
List(
'pools', items=[
Dict(
'pool_geli_keys',
Str('name', required=True),
Str('passphrase', required=True),
)
],
),
List(
'datasets', items=[
Dict(
'dataset_keys',
Str('name', required=True),
Str('passphrase', required=True),
)
],
),
)
)
async def update_encryption_keys(self, options):
# TODO: remove `pools` key and `geli` logic
# since GELI is not supported in SCALE
if not options['pools'] and not options['datasets']:
raise CallError('Please specify pools/datasets to update')
async with ENCRYPTION_CACHE_LOCK:
keys = await self.encryption_keys()
for pool in options['pools']:
keys['geli'][pool['name']] = pool['passphrase']
for dataset in options['datasets']:
keys['zfs'][dataset['name']] = dataset['passphrase']
await self.middleware.call('cache.put', 'failover_encryption_keys', keys)
if options['sync_keys']:
await self.sync_keys_to_remote_node(lock=False)
@private
@accepts(
Dict(
'remove_encryption_keys',
Bool('sync_keys', default=True),
List('pools', items=[Str('pool')]),
List('datasets', items=[Str('dataset')]),
)
)
async def remove_encryption_keys(self, options):
# TODO: remove `pools` key and `geli` logic
# since GELI is not supported in SCALE
if not options['pools'] and not options['datasets']:
raise CallError('Please specify pools/datasets to remove')
async with ENCRYPTION_CACHE_LOCK:
keys = await self.encryption_keys()
for pool in options['pools']:
keys['geli'].pop(pool, None)
for dataset in options['datasets']:
keys['zfs'] = {
k: v for k, v in keys['zfs'].items() if k != dataset and not k.startswith(f'{dataset}/')
}
await self.middleware.call('cache.put', 'failover_encryption_keys', keys)
if options['sync_keys']:
await self.sync_keys_to_remote_node(lock=False)
@private
async def is_single_master_node(self):
return await self.middleware.call('failover.status') in ('MASTER', 'SINGLE')
@accepts(
Str('action', enum=['ENABLE', 'DISABLE']),
Dict(
'options',
Bool('active'),
),
roles=['FAILOVER_WRITE']
)
@returns()
async def control(self, action, options):
if not options:
# The node making the call is the one we want to make MASTER by default
node = await self._master_node((await self.middleware.call('failover.node')))
else:
node = await self._master_node(options.get('active'))
failover = await self.middleware.call('datastore.config', 'system.failover')
if action == 'ENABLE':
if failover['disabled'] is False:
# Already enabled
return False
update = {
'disabled': False,
'master_node': node,
}
elif action == 'DISABLE':
if failover['disabled'] is True:
# Already disabled
return False
update = {
'disabled': True,
'master_node': node,
}
await self.middleware.call('datastore.update', 'system.failover', failover['id'], update)
@private
def upgrade_version(self):
return 1
@accepts(Dict(
'failover_upgrade',
Str('train', empty=False),
Bool('resume', default=False),
Bool('resume_manual', default=False),
), roles=['FAILOVER_WRITE'], audit='Failover upgrade')
@returns(Bool())
@job(lock='failover_upgrade', pipes=['input'], check_pipes=False)
def upgrade(self, job, options):
"""
Upgrades both controllers.
Files will be downloaded to the Active Controller and then transferred to the Standby
Controller.
Upgrade process will start concurrently on both nodes.
Once both upgrades are applied, the Standby Controller will reboot. This job will wait for
that job to complete before finalizing.
`resume` should be set to `true` if a previous call to this method returned a `CallError` with `errno=EAGAIN`
meaning that an upgrade can be performed with a warning and that warning is accepted. In that case, you also
have to set `resume_manual` to `true` if a previous call to this method was performed using update file upload.
"""
if self.middleware.call_sync('failover.status') != 'MASTER':
raise CallError('Upgrade can only run on Active Controller.')
if not options['resume']:
try:
job.check_pipe('input')
except ValueError:
updatefile = False
else:
updatefile = True
else:
updatefile = options['resume_manual']
train = options.get('train')
if train:
self.middleware.call_sync('update.set_train', train)
local_path = self.middleware.call_sync('update.get_update_location')
updatefile_name = 'updatefile.sqsh'
updatefile_localpath = os.path.join(local_path, updatefile_name)
if not options['resume'] and updatefile:
# means manual update file was provided so write it
# to local storage
job.set_progress(None, 'Uploading update file')
os.makedirs(local_path, exist_ok=True)
with open(updatefile_localpath, 'wb') as f:
shutil.copyfileobj(job.pipes.input.r, f, 1048576)
try:
if not self.middleware.call_sync('failover.call_remote', 'system.ready'):
raise CallError('Standby Controller is not ready.')
if not options['resume'] and not updatefile:
# means no update file was provided so go out to
# the interwebz and download it
def download_callback(j):
job.set_progress(
None, j['progress']['description'] or 'Downloading upgrade files'
)
djob = self.middleware.call_sync('update.download', job_on_progress_cb=download_callback)
djob.wait_sync(raise_error=True)
if not djob.result:
raise CallError('No updates available.')
if updatefile:
effective_updatefile_name = updatefile_name
else:
effective_updatefile_name = DOWNLOAD_UPDATE_FILE
# `truenas-installer` automatically determines new BE dataset name based on the version and existing BE
# names. As BE names can be different on different controllers, automatic process can't be trusted to
# choose the same bootenv name on both controllers so we explicitly specify BE name for HA upgrades.
with mount_update(os.path.join(local_path, effective_updatefile_name)) as mounted:
with open(os.path.join(mounted, 'manifest.json')) as f:
manifest = json.load(f)
bootenv_name = manifest['version']
existing_bootenvs = set([
be['name'] for be in self.middleware.call_sync('bootenv.query')
] + [
be['name'] for be in self.middleware.call_sync('failover.call_remote', 'bootenv.query')
])
if bootenv_name in existing_bootenvs:
for i in itertools.count(1):
probe_bootenv_name = f"{bootenv_name}-{i}"
if probe_bootenv_name not in existing_bootenvs:
bootenv_name = probe_bootenv_name
break
dataset_name = f'{self.middleware.call_sync("boot.pool_name")}/ROOT/{bootenv_name}'
remote_path = self.middleware.call_sync('failover.call_remote', 'update.get_update_location')
if not options['resume']:
# Replicate uploaded or downloaded update it to the standby
job.set_progress(None, 'Sending files to Standby Controller')
token = self.middleware.call_sync('failover.call_remote', 'auth.generate_token')
self.middleware.call_sync(
'failover.send_file',
token,
os.path.join(local_path, effective_updatefile_name),
os.path.join(remote_path, effective_updatefile_name),
{'mode': 0o600}
)
local_version = self.middleware.call_sync('system.version')
remote_version = self.middleware.call_sync('failover.call_remote', 'system.version')
local_started_installer = False
local_progress = remote_progress = 0
local_descr = remote_descr = 'Starting upgrade'
def callback(j, controller):
nonlocal local_started_installer, local_progress, remote_progress, local_descr, remote_descr
if controller == 'LOCAL' and j['progress']['description'] == STARTING_INSTALLER:
local_started_installer = True
if j['state'] not in ['RUNNING', 'SUCCESS']:
return
if controller == 'LOCAL':
local_progress = j["progress"]["percent"]
local_descr = f'{int(j["progress"]["percent"])}%: {j["progress"]["description"]}'
else:
remote_progress = j["progress"]["percent"]
remote_descr = f'{int(j["progress"]["percent"])}%: {j["progress"]["description"]}'
job.set_progress(
min(local_progress, remote_progress),
f'Active Controller: {local_descr}\n' + f'Standby Controller: {remote_descr}'
)
update_options = {
'dataset_name': dataset_name,
'resume': options['resume'],
}
if updatefile:
update_method = 'update.manual'
update_remote_args = [os.path.join(remote_path, updatefile_name), update_options]
update_local_args = [updatefile_localpath, update_options]
else:
update_method = 'update.update'
update_remote_args = [update_options]
update_local_args = [update_options]
# upgrade the local (active) controller
ljob = self.middleware.call_sync(
update_method, *update_local_args,
job_on_progress_cb=partial(callback, controller='LOCAL')
)
# Wait for local installer to pass pre-checks and start the install process itself so that we do not start
# remote upgrade if a pre-check fails.
while not local_started_installer:
try:
ljob.wait_sync(raise_error=True, timeout=1)
except TimeoutError:
pass
if local_version == remote_version:
# start the upgrade on the remote (standby) controller
rjob = self.middleware.call_sync(
'failover.call_remote', update_method, update_remote_args, {
'job_return': True,
'callback': partial(callback, controller='REMOTE')
}
)
else:
rjob = None
ljob.wait_sync(raise_error=True)
remote_boot_id = self.middleware.call_sync('failover.call_remote', 'system.boot_id')
# check the remote (standby) controller upgrade job
if rjob:
rjob.result()
self.middleware.call_sync(
'failover.call_remote', 'system.reboot', [SYSTEM_UPGRADE_REBOOT_REASON, {'delay': 5}], {'job': True},
)
except Exception:
raise
# SCALE is using systemd and at the time of writing this, the
# DefaultTimeoutStopSec setting hasn't been changed and so
# defaults to 90 seconds. This means when the system is sent the
# shutdown signal, all the associated user-space programs are
# asked to be shutdown. If any of those take longer than 90
# seconds to respond to SIGTERM then the program is sent SIGKILL.
# Finally, if after 90 seconds the standby controller is still
# responding to remote requests then play it safe and assume the
# reboot failed (this should be rare but my future self will
# appreciate the fact I wrote this out because of the inevitable
# complexities of gluster/k8s/vms etc etc for which I predict
# will exhibit this behavior :P )
job.set_progress(None, 'Waiting on the Standby Controller to reboot.')
try:
retry_time = time.monotonic()
shutdown_timeout = 90 # seconds
while time.monotonic() - retry_time < shutdown_timeout:
self.middleware.call_sync(
'failover.call_remote', 'core.ping', [], {'timeout': 5}
)
time.sleep(5)
except CallError:
pass
else:
raise CallError(
f'Timed out waiting {shutdown_timeout} seconds for the standby controller to reboot',
errno.ETIMEDOUT
)
if not self.upgrade_waitstandby():
raise CallError(
'Timed out waiting for the standby controller to upgrade.',
errno.ETIMEDOUT
)
# we captured the `remote_boot_id` up above earlier in the upgrade process.
# This variable represents a 1-time unique boot id. It's supposed to be different
# every time the system boots up. If this check is True, then it's safe to say
# that the remote system never rebooted, therefore, never completing the upgrade
# process....which isn't good.
if remote_boot_id == self.middleware.call_sync('failover.call_remote', 'system.boot_id'):
raise CallError('Standby Controller failed to reboot.')
self.middleware.call_sync('system.reboot.add_reason', RebootReason.UPGRADE.name, RebootReason.UPGRADE.value)
return True
@private
def upgrade_waitstandby(self, seconds=1200):
"""
We will wait up to 20 minutes by default for the Standby Controller to reboot.
This values come from observation from support of how long a M-series can take.
"""
retry_time = time.monotonic()
system_ready = False
failover_in_progress = True
while time.monotonic() - retry_time < seconds:
try:
if system_ready is False and not self.middleware.call_sync('failover.call_remote', 'system.ready'):
time.sleep(5)
continue
else:
system_ready = True
if failover_in_progress is True and self.middleware.call_sync(
'failover.call_remote', 'failover.in_progress'
):
time.sleep(5)
continue
else:
failover_in_progress = False
if self.middleware.call_sync('failover.call_remote', 'failover.status') != 'BACKUP':
time.sleep(5)
continue
except CallError as e:
if e.errno in NETWORK_ERRORS:
time.sleep(5)
continue
raise
else:
return True
return False
@private
async def sync_keys_from_remote_node(self):
"""
Sync ZFS encryption keys from the active node.
"""
if not await self.middleware.call('failover.licensed'):
return
# only sync keys if we're the BACKUP node
if (await self.middleware.call('failover.status')) != 'BACKUP':
return
# make sure we can contact the MASTER node
try:
assert (await self.middleware.call('failover.call_remote', 'core.ping')) == 'pong'
except Exception:
self.logger.error(
'Failed to contact active controller when syncing encryption keys', exc_info=True
)
return
try:
await self.middleware.call('failover.call_remote', 'failover.sync_keys_to_remote_node')
except Exception:
self.logger.error(
'Failed to sync keys from active controller when syncing encryption keys', exc_info=True
)
@private
async def sync_keys_to_remote_node(self, lock=True):
"""
Sync ZFS encryption keys to the standby node.
"""
if not await self.middleware.call('failover.licensed'):
return
# only sync keys if we're the MASTER node
if (await self.middleware.call('failover.status')) != 'MASTER':
return
# make sure we can contact the BACKUP node
try:
assert (await self.middleware.call('failover.call_remote', 'core.ping')) == 'pong'
except Exception:
self.logger.error(
'Failed to contact standby controller when syncing encryption keys', exc_info=True
)
return
async with ENCRYPTION_CACHE_LOCK if lock else asyncnullcontext():
try:
keys = await self.encryption_keys()
await self.middleware.call(
'failover.call_remote', 'cache.put', ['failover_encryption_keys', keys]
)
except Exception as e:
await self.middleware.call('alert.oneshot_create', 'FailoverKeysSyncFailed', None)
self.logger.error('Failed to sync keys with standby controller: %s', str(e), exc_info=True)
else:
await self.middleware.call('alert.oneshot_delete', 'FailoverKeysSyncFailed', None)
try:
kmip_keys = await self.middleware.call('kmip.kmip_memory_keys')
await self.middleware.call(
'failover.call_remote', 'kmip.update_memory_keys', [kmip_keys]
)
except Exception as e:
await self.middleware.call(
'alert.oneshot_create', 'FailoverKMIPKeysSyncFailed', {'error': str(e)}
)
self.logger.error(
'Failed to sync KMIP keys with standby controller: %s', str(e), exc_info=True
)
else:
await self.middleware.call('alert.oneshot_delete', 'FailoverKMIPKeysSyncFailed', None)
async def ha_permission(middleware, app):
try:
if not app.authenticated and app.origin.is_ha_connection:
await AuthService.session_manager.login(app, TrueNasNodeSessionManagerCredentials())
except AttributeError:
pass
async def interface_pre_sync_hook(middleware):
await middleware.call('failover.internal_interface.pre_sync')
async def hook_license_update(middleware, *args, **kwargs):
FailoverService.HA_MODE = None
await middleware.call('failover.status_refresh')
async def hook_post_rollback_setup_ha(middleware, *args, **kwargs):
"""
This hook needs to be run after a NIC rollback operation and before
an `interfaces.sync` operation on a TrueNAS HA system
"""
if not await middleware.call('failover.licensed'):
return
try:
await middleware.call('failover.call_remote', 'core.ping')
except Exception:
middleware.logger.debug('[HA] Failed to contact standby controller', exc_info=True)
return
await middleware.call('failover.datastore.send')
middleware.logger.debug('[HA] Successfully sent database to standby controller')
async def hook_setup_ha(middleware, *args, **kwargs):
if not await middleware.call('failover.licensed'):
return
if not await middleware.call('interface.query', [('failover_virtual_aliases', '!=', [])]):
return
if not await middleware.call('pool.query'):
return
# If we have reached this stage make sure status is up to date
await middleware.call('failover.status_refresh')
try:
ha_configured = await middleware.call(
'failover.call_remote', 'failover.status'
) != 'SINGLE'
except Exception:
ha_configured = False
if ha_configured:
# Perform basic initialization of DLM, in case it is needed by iSCSI ALUA
middleware.logger.debug('[HA] Initialize DLM')
await middleware.call('dlm.create')
# If HA is already configured and failover has been disabled,
# and we have gotten to this point, then this means a few things could be happening.
# 1. a new interface is being added
# 2. an alias is being added to an already configured interface
# 3. an interface is being modified (changing vhid/ip etc)
# 4. an interface is being deleted
# In the event #2 happens listed above, there is a race condition that
# must be accounted for. When an alias is added to an already configured interface,
# a CARP event will be triggered and the interface will go from MASTER to INIT->BACKUP->MASTER which
# generates a devd event that is processed by the failover.event plugin.
# It takes a few seconds for the kernel to transition the CARP interface from BACKUP->MASTER.
# However, we refresh the failover.status while this interface is transitioning.
# This means that failover.status returns 'ERROR'.
# To work around this we check 2 things:
# 1. if failover.status == 'MASTER' then we continue
# or
# 2. the node in the chassis is marked as the master_node in the webUI
# (because failover has been disabled in the webUI)
cur_status = await middleware.call('failover.status')
config = await middleware.call('failover.config')
if cur_status == 'MASTER' or (config['master'] and config['disabled']):
# In the event HA is configured and the end-user deletes
# an interface, we need to sync the database over to the
# standby node before we call `interface.sync`
middleware.logger.debug('[HA] Sending database to standby node')
await middleware.call('failover.datastore.send')
# Need to send the zpool cachefile to the other node so it matches
# when a failover event occurs
middleware.logger.debug('[HA] Sending zpool cachefile to standby node')
await middleware.call('failover.send_small_file', ZPOOL_CACHE_FILE, ZPOOL_CACHE_FILE_OVERWRITE)
await middleware.call('failover.call_remote', 'failover.zpool.cachefile.setup', ['SYNC'])
middleware.logger.debug('[HA] Configuring network on standby node')
await middleware.call('failover.call_remote', 'interface.sync')
return
# when HA is initially setup, we don't synchronize service states to the
# standby controller. Minimally, however, it's nice to synchronize ssh
# (if appropriate, of course)
filters = [('srv_service', '=', 'ssh')]
ssh_enabled = remote_ssh_started = False
ssh = await middleware.call('datastore.query', 'services.services', filters)
if ssh:
if ssh[0]['srv_enable']:
ssh_enabled = True
if await middleware.call('failover.call_remote', 'service.started', ['ssh']):
remote_ssh_started = True
middleware.logger.debug('[HA] Setting up')
middleware.logger.debug('[HA] Synchronizing database and files')
await middleware.call('failover.sync_to_peer')
middleware.logger.debug('[HA] Configuring network on standby node')
await middleware.call('failover.call_remote', 'interface.sync')
if ssh_enabled and not remote_ssh_started:
middleware.logger.debug('[HA] Starting SSH on standby node')
await middleware.call('failover.call_remote', 'service.start', ['ssh'])
middleware.logger.debug('[HA] Refreshing failover status')
await middleware.call('failover.status_refresh')
middleware.logger.info('[HA] Setup complete')
middleware.send_event('failover.setup', 'ADDED', fields={})
async def hook_pool_export(middleware, pool=None, *args, **kwargs):
await middleware.call('failover.remove_encryption_keys', {'pools': [pool]})
async def hook_pool_dataset_unlock(middleware, datasets):
datasets = [
{'name': ds['name'], 'passphrase': ds['encryption_key']}
for ds in datasets if ds['key_format'].upper() == 'PASSPHRASE'
]
if datasets:
await middleware.call('failover.update_encryption_keys', {'datasets': datasets})
async def hook_pool_dataset_post_create(middleware, dataset_data):
if dataset_data['encrypted']:
if str(dataset_data['key_format']).upper() == 'PASSPHRASE':
await middleware.call(
'failover.update_encryption_keys', {
'datasets': [{'name': dataset_data['name'], 'passphrase': dataset_data['encryption_key']}]
}
)
else:
kmip = await middleware.call('kmip.config')
if kmip['enabled'] and kmip['manage_zfs_keys']:
await middleware.call('failover.sync_keys_to_remote_node')
async def hook_pool_dataset_post_delete_lock(middleware, dataset):
await middleware.call('failover.remove_encryption_keys', {'datasets': [dataset]})
async def hook_pool_dataset_change_key(middleware, dataset_data):
if dataset_data['key_format'] == 'PASSPHRASE' or dataset_data['old_key_format'] == 'PASSPHRASE':
if dataset_data['key_format'] == 'PASSPHRASE':
await middleware.call(
'failover.update_encryption_keys', {
'datasets': [{'name': dataset_data['name'], 'passphrase': dataset_data['encryption_key']}]
}
)
else:
await middleware.call('failover.remove_encryption_keys', {'datasets': [dataset_data['name']]})
else:
kmip = await middleware.call('kmip.config')
if kmip['enabled'] and kmip['manage_zfs_keys']:
await middleware.call('failover.sync_keys_to_remote_node')
async def hook_pool_dataset_inherit_parent_encryption_root(middleware, dataset):
await middleware.call('failover.remove_encryption_keys', {'datasets': [dataset]})
async def hook_kmip_sync(middleware, *args, **kwargs):
await middleware.call('failover.sync_keys_to_remote_node')
async def service_remote(middleware, service, verb, options):
"""
Most of service actions need to be replicated to the standby node so we don't lose
too much time during failover regenerating things (e.g. users database)
This is the middleware side of what legacy UI did on service changes.
"""
ignore = ('system', 'smartd', 'nfs', 'netdata', 'truecommand', 'docker')
if not options['ha_propagate'] or service in ignore or service == 'nginx' and verb == 'stop':
return
elif await middleware.call('failover.status') != 'MASTER':
return
try:
await middleware.call('failover.call_remote', 'core.bulk', [
f'service.{verb}', [[service, options]]
], {'raise_connect_error': False})
except Exception:
middleware.logger.warning('Failed to run %s(%s)', verb, service, exc_info=True)
async def _event_system_ready(middleware, event_type, args):
# called when system is ready to issue an event in case HA upgrade is pending.
if await middleware.call('failover.status') in ('MASTER', 'SINGLE'):
return
def remote_status_event(middleware, *args, **kwargs):
middleware.call_sync('failover.status_refresh')
async def setup(middleware):
middleware.event_register('failover.setup', 'Sent when failover is being setup.')
middleware.event_register('failover.status', 'Sent when failover status changes.', no_auth_required=True)
middleware.event_subscribe('system.ready', _event_system_ready)
middleware.register_hook('core.on_connect', ha_permission, sync=True)
middleware.register_hook('interface.pre_sync', interface_pre_sync_hook, sync=True)
middleware.register_hook('interface.post_sync', hook_setup_ha, sync=True)
middleware.register_hook('interface.post_rollback', hook_post_rollback_setup_ha, sync=True)
middleware.register_hook('pool.post_create_or_update', hook_setup_ha, sync=True)
middleware.register_hook('pool.post_export', hook_pool_export, sync=True)
middleware.register_hook('pool.post_import', hook_setup_ha, sync=True)
middleware.register_hook('dataset.post_create', hook_pool_dataset_post_create, sync=True)
middleware.register_hook('dataset.post_delete', hook_pool_dataset_post_delete_lock, sync=True)
middleware.register_hook('dataset.post_lock', hook_pool_dataset_post_delete_lock, sync=True)
middleware.register_hook('dataset.post_unlock', hook_pool_dataset_unlock, sync=True)
middleware.register_hook('dataset.change_key', hook_pool_dataset_change_key, sync=True)
middleware.register_hook(
'dataset.inherit_parent_encryption_root', hook_pool_dataset_inherit_parent_encryption_root, sync=True
)
middleware.register_hook('kmip.sed_keys_sync', hook_kmip_sync, sync=True)
middleware.register_hook('kmip.zfs_keys_sync', hook_kmip_sync, sync=True)
middleware.register_hook('system.post_license_update', hook_license_update, sync=False)
middleware.register_hook('service.pre_action', service_remote, sync=False)
# Register callbacks to properly refresh HA status and send events on changes
await middleware.call('failover.remote_subscribe', 'system.ready', remote_status_event)
await middleware.call('failover.remote_subscribe', 'system.reboot', remote_status_event)
await middleware.call('failover.remote_subscribe', 'system.shutdown', remote_status_event)
await middleware.call('failover.remote_on_connect', remote_status_event)
await middleware.call('failover.remote_on_disconnect', remote_status_event)
if await middleware.call('system.ready'):
# We add a delay here to give the standby node middleware a chance to boot up because
# if we do it asap, it is highly likely that the standby node middleware is not ready
# to make connection to the active node middleware.
asyncio.get_event_loop().call_later(
30, lambda: middleware.create_task(middleware.call('failover.sync_keys_from_remote_node'))
)
| 52,952 | Python | .py | 1,099 | 37.020928 | 119 | 0.608229 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,467 | device.py | truenas_middleware/src/middlewared/middlewared/plugins/device.py | from middlewared.schema import accepts, Bool, Dict, Int, List, OROperator, returns, Str
from middlewared.service import Service
class DeviceService(Service):
class Config:
cli_namespace = 'system.device'
@accepts(
Dict(
'data',
Str('type', enum=['SERIAL', 'DISK', 'GPU'], required=True),
Bool('get_partitions', required=False, default=False),
Bool('serials_only', required=False, default=False),
),
roles=['READONLY_ADMIN']
)
@returns(OROperator(
List('serial_info', items=[Dict(
'serial_info',
Str('name', required=True),
Str('location'),
Str('drivername'),
Str('start'),
Int('size'),
Str('description'),
)]),
List('gpu_info', items=[Dict(
'gpu_info',
Dict(
'addr',
Str('pci_slot', required=True),
Str('domain', required=True),
Str('bus', required=True),
Str('slot', True),
),
Str('description', required=True),
List('devices', items=[Dict(
'gpu_device',
Str('pci_id', required=True),
Str('pci_slot', required=True),
Str('vm_pci_slot', required=True),
)]),
Str('vendor', required=True, null=True),
Bool('available_to_host', required=True),
Bool('uses_system_critical_devices', required=True),
Str('critical_reason', required=True, null=True),
additional_attrs=True,
),
]),
Dict('disk_info', additional_attrs=True),
name='device_info',
))
async def get_info(self, data):
"""
Get info for `data['type']` device.
If `type` is "DISK":
`get_partitions`: boolean, when set to True will query partition
information for the disks. NOTE: this can be expensive on
systems with a large number of disks present.
`serials_only`: boolean, when set to True will query serial information
_ONLY_ for the disks.
"""
method = f'device.get_{data["type"].lower()}s'
if method == 'device.get_disks':
return await self.middleware.call(method, data['get_partitions'], data['serials_only'])
return await self.middleware.call(method)
| 2,474 | Python | .py | 64 | 27.265625 | 99 | 0.534719 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,468 | migration.py | truenas_middleware/src/middlewared/middlewared/plugins/migration.py | import asyncio
import os
import sqlite3
from middlewared.service import Service
import middlewared.sqlalchemy as sa
from middlewared.utils.plugins import load_modules
from middlewared.utils.python import get_middlewared_dir
def load_migrations(middleware):
return sorted(load_modules(os.path.join(get_middlewared_dir(), "migration")), key=lambda x: x.__name__)
class MigrationModel(sa.Model):
__tablename__ = 'system_migration'
id = sa.Column(sa.Integer(), primary_key=True)
name = sa.Column(sa.String(255), unique=True)
class MigrationService(Service):
class Config:
private = True
async def run(self):
if await self.middleware.call("keyvalue.get", "run_migration", False):
executed_migrations = {m["name"] for m in await self.middleware.call("datastore.query", "system.migration")}
for module in load_migrations(self.middleware):
name = module.__name__
if name in executed_migrations:
continue
self.middleware.logger.info("Running migration %s", name)
try:
if asyncio.iscoroutinefunction(module.migrate):
await module.migrate(self.middleware)
else:
await self.middleware.run_in_thread(module.migrate, self.middleware)
except Exception:
self.middleware.logger.error("Error running migration %s", name, exc_info=True)
continue
await self.middleware.call("datastore.insert", "system.migration", {"name": name}, {"ha_sync": False})
await self.middleware.call("keyvalue.set", "run_migration", False, {"ha_sync": False})
def on_config_upload(middleware, path):
conn = sqlite3.connect(path)
try:
conn.execute("REPLACE INTO system_keyvalue (key, value) VALUES ('run_migration', 'true')")
finally:
conn.close()
async def setup(middleware):
middleware.register_hook('config.on_upload', on_config_upload, sync=True)
| 2,078 | Python | .py | 42 | 39.642857 | 120 | 0.652454 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,469 | snmp.py | truenas_middleware/src/middlewared/middlewared/plugins/snmp.py | import middlewared.sqlalchemy as sa
import subprocess
import os
from contextlib import suppress
from middlewared.common.ports import ServicePortDelegate
from middlewared.plugins.snmp_.utils_snmp_user import (
SNMPSystem, _add_system_user,
add_snmp_user, delete_snmp_user, get_users_cmd
)
from middlewared.schema import Bool, Dict, Int, Password, Str
from middlewared.service import private, SystemServiceService, ValidationErrors
from middlewared.validators import Email, Match, Or, Range
class SNMPModel(sa.Model):
__tablename__ = 'services_snmp'
id = sa.Column(sa.Integer(), primary_key=True)
snmp_location = sa.Column(sa.String(255))
snmp_contact = sa.Column(sa.String(120))
snmp_traps = sa.Column(sa.Boolean(), default=False)
snmp_v3 = sa.Column(sa.Boolean(), default=False)
snmp_community = sa.Column(sa.String(120), default='public')
snmp_v3_username = sa.Column(sa.String(20))
snmp_v3_authtype = sa.Column(sa.String(3), default='SHA')
snmp_v3_password = sa.Column(sa.EncryptedText())
snmp_v3_privproto = sa.Column(sa.String(3), nullable=True)
snmp_v3_privpassphrase = sa.Column(sa.EncryptedText(), nullable=True)
snmp_options = sa.Column(sa.Text())
snmp_loglevel = sa.Column(sa.Integer(), default=3)
snmp_zilstat = sa.Column(sa.Boolean(), default=False)
class SNMPService(SystemServiceService):
class Config:
service = 'snmp'
service_verb = 'restart'
datastore = 'services.snmp'
datastore_prefix = 'snmp_'
cli_namespace = 'service.snmp'
ENTRY = Dict(
'snmp_entry',
Str('location', required=True),
Str('contact', required=True, validators=[Or(Email(), Match(r'^[-_a-zA-Z0-9\s]*$'))]),
Bool('traps', required=True),
Bool('v3', required=True),
Str('community', validators=[Match(r'^[-_.a-zA-Z0-9\s]*$')], default='public', required=True),
Str('v3_username', max_length=20, required=True),
Str('v3_authtype', enum=['', 'MD5', 'SHA'], required=True),
Password('v3_password', required=True),
Str('v3_privproto', enum=[None, 'AES', 'DES'], null=True, required=True),
Password('v3_privpassphrase', required=True, null=True),
Int('loglevel', validators=[Range(min_=0, max_=7)], required=True),
Str('options', max_length=None, required=True),
Bool('zilstat', required=True),
Int('id', required=True),
)
@private
def get_snmp_users(self):
"""
NOTE: This should be called with SNMP running
Use snmpwalk and the SNMP system user to get the list
"""
# Make sure we have the SNMP system user
if not SNMPSystem.SYSTEM_USER['key']:
self.middleware.call_sync('snmp.init_v3_user')
users = []
if cmd := get_users_cmd():
try:
# This call will timeout if SNMP is not running
res = subprocess.run(cmd, capture_output=True)
users = [x.split()[-1].strip('\"') for x in res.stdout.decode().splitlines()]
except Exception:
self.logger.warning("Failed to list snmp v3 users")
else:
self.logger.warning("SNMP system user is not configured. Stop and restart SNMP or reboot.")
return users
@private
def get_defaults(self):
"""
Get default config settings.
Fixup nullable strings
"""
SNMPModel_defaults = {}
prefix = self._config.datastore_prefix
for attrib in SNMPModel.__dict__.keys():
if attrib.startswith(prefix):
try:
val = getattr(getattr(SNMPModel, attrib), "default").arg
except AttributeError:
nullable = getattr(getattr(SNMPModel, attrib), "nullable")
val = "" if not nullable and isinstance(attrib, str) else None
if not callable(val):
SNMPModel_defaults[attrib.lstrip(prefix)] = val
return SNMPModel_defaults
@private
async def _is_snmp_running(self):
""" Internal helper function for use by this module """
current_state = await self.middleware.call(
'service.query', [["service", "=", "snmp"]], {"select": ["state"]}
)
return current_state[0]['state'] == 'RUNNING'
@private
async def init_v3_user(self):
"""
Purpose: Make sure we have configured the snmpAuthUser
This will generate the SNMP system user and, if we needed, repair the v3 user
This will start and stop SNMP as needed.
NOTE: This will raise CallError if SNMP is unable to be started
Process:
1) Record current SNMP run state
2) Stop SNMP and delete the private config file
3) Start SNMP to regenerate a new config file without any v3 users
4) Stop SNMP and add v3 user markers to the private config
5) Start SNMP to integrate the v3 users
- snmpd detects the 'markers', internally generates the user
and deletes the 'markers'.
6) Restore SNMP to 'current' run state
Process notes:
- We delete the private config file to make sure we're starting with
a pristine config file that contains no bogus user markers or other chaff.
"""
config = await self.middleware.call('snmp.config')
# 1) Record current SNMP run state
snmp_service = await self.middleware.call("service.query", [("service", "=", "snmp")], {"get": True})
# 2) Stop SNMP and delete the private config file
await self.middleware.call("service.stop", "snmp")
with suppress(FileNotFoundError):
await self.middleware.run_in_thread(os.remove, SNMPSystem.PRIV_CONF)
# 3) Start SNMP to regenerate a new config file without any v3 users
await self.middleware.call('service.start', 'snmp')
# 4) Stop SNMP and add v3 user markers to the private config
await self.middleware.call("service.stop", "snmp")
await self.middleware.run_in_thread(_add_system_user)
# if configured, add the v3 user
if config['v3_username']:
await self.middleware.run_in_thread(add_snmp_user, config)
# 5) Start SNMP to integrate the v3 users
await self.middleware.call('service.start', 'snmp')
# 6) Restore SNMP to 'current' run state
if snmp_service['state'] == "STOPPED":
await self.middleware.call("service.stop", "snmp")
async def do_update(self, data):
"""
Update SNMP Service Configuration.
--- Rules ---
Enabling v3:
requires v3_username, v3_authtype and v3_password
Disabling v3:
By itself will retain the v3 user settings and config in the 'private' config,
but remove the entry in the public config to block v3 access by that user.
Disabling v3 and clearing the v3_username:
This will do the actions described in 'Disabling v3' and take the extra step to
remove the user from the 'private' config.
The 'v3_*' settings are valid and enforced only when 'v3' is enabled
"""
# Make sure we have the SNMP system user
if not SNMPSystem.SYSTEM_USER['key']:
await self.init_v3_user()
old = await self.config()
new = old.copy()
new.update(data)
verrors = ValidationErrors()
# If not v3, then must have a community 'passcode'
if not new['v3'] and not new['community']:
verrors.add('snmp_update.community', 'This field is required when SNMPv3 is disabled')
# If v3, then must supply a username, authtype and password
if new['v3']:
# All _nearly_ the same, but different field IDs.
if not new['v3_username']:
verrors.add('snmp_update.v3_username', 'This field is required when SNMPv3 is enabled')
if not new['v3_authtype']:
verrors.add('snmp_update.v3_authtype', 'This field is required when SNMPv3 is enabled')
if not new['v3_password']:
verrors.add('snmp_update.v3_password', 'This field is required when SNMPv3 is enabled')
# Get the above fixed first
verrors.check()
if new['v3_password'] and len(new['v3_password']) < 8:
verrors.add('snmp_update.v3_password', 'Password must contain at least 8 characters')
if new['v3_privproto'] and not new['v3_privpassphrase']:
verrors.add(
'snmp_update.v3_privpassphrase', 'This field is required when SNMPv3 private protocol is specified'
)
verrors.check()
# To delete the v3 user:
# From the UI: In the following order, clear the username field, then uncheck the v3 checkbox
# From midclt: set {'v3': False, 'v3_username': ''}
if not any([new['v3'], new['v3_username']]) and old['v3_username']:
# v3 is disabled: Are we asked to delete the v3 user?
# Process to delete the SNMPv3 user
# 1) SNMP must be running
# 2) Delete the user with the snmpusm shell command
# 3) Clear the v3 settings in the config
# 3) Restore SNMP run state
snmp_service = await self.middleware.call("service.query", [("service", "=", "snmp")], {"get": True})
await self.middleware.call('service.start', 'snmp')
try:
await self.middleware.run_in_thread(delete_snmp_user, old['v3_username'])
except Exception:
verrors.add("Cannot delete user. Please stop and restart SNMP or reboot, then try again.")
else:
config_default = self.get_defaults()
default_v3_config = {k: v for (k, v) in config_default.items() if k.startswith('v3')}
new.update(default_v3_config)
# Restore original SNMP state
if 'STOPPED' in snmp_service['state']:
await self.middleware.call('service.stop', 'snmp')
await self._update_service(old, new)
# Manage update to SNMP v3 user
if new['v3']:
# v3 is enabled: Are there _any_ changes in the v3_* settings?
new_set = set({k: v for k, v in new.items() if k.startswith('v3_')}.items())
old_set = set({k: v for k, v in old.items() if k.startswith('v3_')}.items())
v3_diffs = new_set ^ old_set
if any(v3_diffs):
await self.init_v3_user()
return await self.config()
class SNMPServicePortDelegate(ServicePortDelegate):
name = 'snmp'
namespace = 'snmp'
title = 'SNMP Service'
async def get_ports_bound_on_wildcards(self):
return [160, 161]
async def setup(middleware):
await middleware.call('port.register_attachment_delegate', SNMPServicePortDelegate(middleware))
| 11,041 | Python | .py | 220 | 40.25 | 115 | 0.621531 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,470 | acme_protocol.py | truenas_middleware/src/middlewared/middlewared/plugins/acme_protocol.py | import josepy as jose
import json
import requests
from middlewared.api import api_method
from middlewared.api.current import (
ACMERegistrationCreateArgs, ACMERegistrationCreateResult, DNSAuthenticatorUpdateArgs, DNSAuthenticatorUpdateResult,
DNSAuthenticatorCreateArgs, DNSAuthenticatorCreateResult, DNSAuthenticatorDeleteArgs, DNSAuthenticatorDeleteResult,
ACMERegistrationEntry, ACMEDNSAuthenticatorEntry,
)
from middlewared.schema import ValidationErrors
from middlewared.service import CallError, CRUDService, private
import middlewared.sqlalchemy as sa
from acme import client, messages
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import rsa
# TODO: See what can be done to respect rate limits
class ACMERegistrationModel(sa.Model):
__tablename__ = 'system_acmeregistration'
id = sa.Column(sa.Integer(), primary_key=True)
uri = sa.Column(sa.String(200))
directory = sa.Column(sa.String(200), unique=True)
tos = sa.Column(sa.String(200))
new_account_uri = sa.Column(sa.String(200))
new_nonce_uri = sa.Column(sa.String(200))
new_order_uri = sa.Column(sa.String(200))
revoke_cert_uri = sa.Column(sa.String(200))
class ACMERegistrationBodyModel(sa.Model):
__tablename__ = 'system_acmeregistrationbody'
id = sa.Column(sa.Integer(), primary_key=True)
contact = sa.Column(sa.String(254))
status = sa.Column(sa.String(10))
key = sa.Column(sa.Text())
acme_id = sa.Column(sa.ForeignKey('system_acmeregistration.id'), index=True)
class ACMERegistrationService(CRUDService):
class Config:
datastore = 'system.acmeregistration'
datastore_extend = 'acme.registration.register_extend'
namespace = 'acme.registration'
private = True
entry = ACMERegistrationEntry
@private
async def register_extend(self, data):
data['body'] = {
key: value for key, value in
(await self.middleware.call(
'datastore.query', 'system.acmeregistrationbody',
[['acme', '=', data['id']]], {'get': True}
)).items() if key != 'acme'
}
return data
@private
def get_directory(self, acme_directory_uri):
self.middleware.call_sync('network.general.will_perform_activity', 'acme')
try:
acme_directory_uri = acme_directory_uri.rstrip('/')
response = requests.get(acme_directory_uri).json()
return messages.Directory({
key: response[key] for key in ['newAccount', 'newNonce', 'newOrder', 'revokeCert']
})
except (requests.ConnectionError, requests.Timeout, json.JSONDecodeError, KeyError) as e:
raise CallError(f'Unable to retrieve directory : {e}')
@api_method(ACMERegistrationCreateArgs, ACMERegistrationCreateResult)
def do_create(self, data):
"""
Register with ACME Server
Create a registration for a specific ACME Server registering root user with it
`acme_directory_uri` is a directory endpoint for any ACME Server
.. examples(websocket)::
Register with ACME Server
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "acme.registration.create",
"params": [{
"tos": true,
"acme_directory_uri": "https://acme-staging-v02.api.letsencrypt.org/directory"
"JWK_create": {
"key_size": 2048,
"public_exponent": 65537
}
}]
}
"""
# STEPS FOR CREATION
# 1) CREATE KEY
# 2) REGISTER CLIENT
# 3) SAVE REGISTRATION OBJECT
# 4) SAVE REGISTRATION BODY
verrors = ValidationErrors()
directory = self.get_directory(data['acme_directory_uri'])
if not isinstance(directory, messages.Directory):
verrors.add(
'acme_registration_create.acme_directory_uri',
f'System was unable to retrieve the directory with the specified acme_directory_uri: {directory}'
)
# Normalizing uri after directory call as let's encrypt staging api
# does not accept a trailing slash right now
data['acme_directory_uri'] += '/' if data['acme_directory_uri'][-1] != '/' else ''
if not data['tos']:
verrors.add(
'acme_registration_create.tos',
'Please agree to the terms of service'
)
# For now we assume that only root is responsible for certs issued under ACME protocol
email = self.middleware.call_sync('mail.local_administrator_email')
if not email:
raise CallError(
'Please configure an email address for any local administrator user which will be used with the ACME '
'server'
)
if self.middleware.call_sync(
'acme.registration.query', [['directory', '=', data['acme_directory_uri']]]
):
verrors.add(
'acme_registration_create.acme_directory_uri',
'A registration with the specified directory uri already exists'
)
verrors.check()
key = jose.JWKRSA(key=rsa.generate_private_key(
public_exponent=data['JWK_create']['public_exponent'],
key_size=data['JWK_create']['key_size'],
backend=default_backend()
))
acme_client = client.ClientV2(directory, client.ClientNetwork(key))
register = acme_client.new_account(
messages.NewRegistration.from_data(
email=email,
terms_of_service_agreed=True
)
)
# We have registered with the acme server
# Save registration object
registration_id = self.middleware.call_sync(
'datastore.insert',
self._config.datastore,
{
'uri': register.uri,
'tos': register.terms_of_service or '',
'new_account_uri': directory.newAccount,
'new_nonce_uri': directory.newNonce,
'new_order_uri': directory.newOrder,
'revoke_cert_uri': directory.revokeCert,
'directory': data['acme_directory_uri']
}
)
# Save registration body
self.middleware.call_sync(
'datastore.insert',
'system.acmeregistrationbody',
{
'contact': register.body.contact[0],
'status': register.body.status,
'key': key.json_dumps(),
'acme': registration_id
}
)
return self.middleware.call_sync(f'{self._config.namespace}.get_instance', registration_id)
class ACMEDNSAuthenticatorModel(sa.Model):
__tablename__ = 'system_acmednsauthenticator'
id = sa.Column(sa.Integer(), primary_key=True)
authenticator = sa.Column(sa.String(64))
name = sa.Column(sa.String(64), unique=True)
attributes = sa.Column(sa.JSON(encrypted=True))
class DNSAuthenticatorService(CRUDService):
class Config:
namespace = 'acme.dns.authenticator'
datastore = 'system.acmednsauthenticator'
cli_namespace = 'system.acme.dns_auth'
entry = ACMEDNSAuthenticatorEntry
@private
async def common_validation(self, data, schema_name, old=None):
verrors = ValidationErrors()
filters = [['name', '!=', old['name']]] if old else []
filters.append(['name', '=', data['name']])
if await self.query(filters):
verrors.add(f'{schema_name}.name', 'Specified name is already in use')
if data['authenticator'] not in await self.middleware.call('acme.dns.authenticator.get_authenticator_schemas'):
verrors.add(
f'{schema_name}.authenticator',
f'System does not support {data["authenticator"]} as an Authenticator'
)
else:
authenticator_obj = await self.middleware.call('acme.dns.authenticator.get_authenticator_internal', data)
data['attributes'] = await authenticator_obj.validate_credentials(self.middleware, data['attributes'])
verrors.check()
@api_method(DNSAuthenticatorCreateArgs, DNSAuthenticatorCreateResult)
async def do_create(self, data):
"""
Create a DNS Authenticator
Create a specific DNS Authenticator containing required authentication details for the said
provider to successfully connect with it
.. examples(websocket)::
Create a DNS Authenticator for Route53
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "acme.dns.authenticator.create",
"params": [{
"name": "route53_authenticator",
"authenticator": "route53",
"attributes": {
"access_key_id": "AQX13",
"secret_access_key": "JKW90"
}
}]
}
"""
await self.common_validation(data, 'dns_authenticator_create')
id_ = await self.middleware.call(
'datastore.insert',
self._config.datastore,
data,
)
return await self.get_instance(id_)
@api_method(DNSAuthenticatorUpdateArgs, DNSAuthenticatorUpdateResult)
async def do_update(self, id_, data):
"""
Update DNS Authenticator of `id`
.. examples(websocket)::
Update a DNS Authenticator of `id`
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "acme.dns.authenticator.update",
"params": [
1,
{
"name": "route53_authenticator",
"attributes": {
"access_key_id": "AQX13",
"secret_access_key": "JKW90"
}
}
]
}
"""
old = await self.get_instance(id_)
new = old.copy()
new.update(data)
await self.common_validation(new, 'dns_authenticator_update', old)
await self.middleware.call(
'datastore.update',
self._config.datastore,
id_,
new
)
return await self.get_instance(id_)
@api_method(DNSAuthenticatorDeleteArgs, DNSAuthenticatorDeleteResult)
async def do_delete(self, id_):
"""
Delete DNS Authenticator of `id`
.. examples(websocket)::
Delete a DNS Authenticator of `id`
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "acme.dns.authenticator.delete",
"params": [
1
]
}
"""
await self.middleware.call('certificate.delete_domains_authenticator', id_)
return await self.middleware.call(
'datastore.delete',
self._config.datastore,
id_
)
async def setup(middleware):
await middleware.call('network.general.register_activity', 'acme', 'ACME')
| 11,650 | Python | .py | 274 | 31.145985 | 119 | 0.591303 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,471 | zettarepl.py | truenas_middleware/src/middlewared/middlewared/plugins/zettarepl.py | from collections import defaultdict
from contextlib import asynccontextmanager
from ctypes import c_bool
from datetime import time as _time, timedelta
import errno
import logging
import multiprocessing
import os
import pytz
import queue
import re
import setproctitle
import signal
import socket
import threading
import time
import types
import paramiko.ssh_exception
from truenas_api_client import Client, ClientException
from zettarepl.dataset.create import create_dataset
from zettarepl.dataset.list import list_datasets
from zettarepl.definition.definition import (
DefinitionErrors, PeriodicSnapshotTaskDefinitionError, ReplicationTaskDefinitionError, Definition
)
from zettarepl.observer import (
PeriodicSnapshotTaskStart, PeriodicSnapshotTaskSuccess, PeriodicSnapshotTaskError,
ReplicationTaskScheduled, ReplicationTaskStart, ReplicationTaskSnapshotStart, ReplicationTaskSnapshotProgress,
ReplicationTaskSnapshotSuccess,
ReplicationTaskDataProgress, ReplicationTaskSuccess, ReplicationTaskError,
)
from zettarepl.replication.task.dataset import get_target_dataset
from zettarepl.replication.task.name_pattern import compile_name_regex
from zettarepl.snapshot.list import multilist_snapshots, group_snapshots_by_datasets
from zettarepl.snapshot.name import parse_snapshots_names_with_multiple_schemas
from zettarepl.transport.create import create_transport
from zettarepl.transport.interface import ExecException
from zettarepl.transport.local import LocalShell
from zettarepl.transport.zfscli import get_properties_recursive
from zettarepl.utils.logging import (
LongStringsFilter, ReplicationTaskLoggingLevelFilter, logging_record_replication_task
)
from zettarepl.zettarepl import create_zettarepl
from middlewared.logger import setup_logging
from middlewared.service import CallError, Service
from middlewared.utils.cgroups import move_to_root_cgroups
from middlewared.utils.prctl import die_with_parent
from middlewared.utils.size import format_size
from middlewared.utils.string import make_sentence
from middlewared.utils.threading import start_daemon_thread
from middlewared.utils.time_utils import utc_now
INVALID_DATASETS = (
re.compile(r"boot-pool($|/)"),
re.compile(r"freenas-boot($|/)"),
re.compile(r"[^/]+/\.system($|/)")
)
def lifetime_timedelta(value, unit):
if unit == "HOUR":
return timedelta(hours=value)
if unit == "DAY":
return timedelta(days=value)
if unit == "WEEK":
return timedelta(weeks=value)
if unit == "MONTH":
return timedelta(days=value * 30)
if unit == "YEAR":
return timedelta(days=value * 365)
raise ValueError(f"Invalid lifetime unit: {unit!r}")
def timedelta_iso8601(timedelta):
return f"PT{int(timedelta.total_seconds())}S"
def lifetime_iso8601(value, unit):
return timedelta_iso8601(lifetime_timedelta(value, unit))
def replication_task_exclude(replication_task):
exclude = list(replication_task["exclude"])
if replication_task["recursive"] and not replication_task["replicate"]:
for ds in replication_task["source_datasets"]:
# Exclude all possible FreeNAS system datasets
if "/" not in ds:
exclude.append(f"{ds}/.system")
return exclude
def zettarepl_schedule(schedule):
schedule = {k.replace("_", "-"): v for k, v in schedule.items()}
schedule["day-of-month"] = schedule.pop("dom")
schedule["day-of-week"] = schedule.pop("dow")
for k in ["begin", "end"]:
if k in schedule and isinstance(schedule[k], _time):
schedule[k] = str(schedule[k])[:5]
return schedule
class HoldReplicationTaskException(Exception):
def __init__(self, reason):
self.reason = reason
super().__init__()
class ReplicationTaskLog:
def __init__(self, task_id, log):
self.task_id = task_id
self.log = log
class ObserverQueueLoggingHandler(logging.Handler):
def __init__(self, observer_queue):
self.observer_queue = observer_queue
super().__init__()
def emit(self, record):
replication_task_id = logging_record_replication_task(record)
if replication_task_id is not None:
self.observer_queue.put(ReplicationTaskLog(replication_task_id, self.format(record)))
class ZettareplProcess:
def __init__(self, definition, debug_level, log_handler, command_queue, observer_queue, startup_error):
self.definition = definition
self.debug_level = debug_level
self.log_handler = log_handler
self.command_queue = command_queue
self.observer_queue = observer_queue
self.startup_error = startup_error
self.zettarepl = None
self.vm_contexts = {}
self.vmware_contexts = {}
def __call__(self):
try:
setproctitle.setproctitle('middlewared (zettarepl)')
die_with_parent()
move_to_root_cgroups(os.getpid())
if logging.getLevelName(self.debug_level) == logging.TRACE:
# If we want TRACE then we want all debug from zettarepl
default_level = logging.DEBUG
elif logging.getLevelName(self.debug_level) == logging.DEBUG:
# Regular development level. We don't need verbose debug from zettarepl
default_level = logging.INFO
else:
default_level = logging.getLevelName(self.debug_level)
setup_logging("", "DEBUG", self.log_handler)
oqlh = ObserverQueueLoggingHandler(self.observer_queue)
oqlh.setFormatter(logging.Formatter('[%(asctime)s] %(levelname)-8s [%(threadName)s] [%(name)s] %(message)s',
'%Y/%m/%d %H:%M:%S'))
logging.getLogger("zettarepl").addHandler(oqlh)
for handler in logging.getLogger("zettarepl").handlers:
handler.addFilter(LongStringsFilter())
handler.addFilter(ReplicationTaskLoggingLevelFilter(default_level))
definition = Definition.from_data(self.definition, raise_on_error=False)
self.observer_queue.put(DefinitionErrors(definition.errors))
self.zettarepl = create_zettarepl(definition)
self.zettarepl.set_observer(self._observer)
self.zettarepl.set_tasks(definition.tasks)
start_daemon_thread(target=self._process_command_queue)
except Exception:
logging.getLogger("zettarepl").error("Unhandled exception during zettarepl startup", exc_info=True)
self.startup_error.value = True
return
while True:
try:
self.zettarepl.run()
except Exception:
logging.getLogger("zettarepl").error("Unhandled exception", exc_info=True)
time.sleep(10)
def _observer(self, message):
self.observer_queue.put(message)
logger = logging.getLogger("middlewared.plugins.zettarepl")
try:
if isinstance(message, (PeriodicSnapshotTaskStart, PeriodicSnapshotTaskSuccess, PeriodicSnapshotTaskError)):
task_id = int(message.task_id.split("_")[-1])
if isinstance(message, PeriodicSnapshotTaskStart):
with Client() as c:
context = None
vm_context = None
if begin_context := c.call("vmware.periodic_snapshot_task_begin", task_id):
context = c.call("vmware.periodic_snapshot_task_proceed", begin_context, job=True)
if vm_context := c.call("vm.periodic_snapshot_task_begin", task_id):
c.call("vm.suspend_vms", list(vm_context))
self.vm_contexts[task_id] = vm_context
self.vmware_contexts[task_id] = context
if context and context["vmsynced"]:
# If there were no failures and we successfully took some VMWare snapshots
# set the ZFS property to show the snapshot has consistent VM snapshots
# inside it.
return message.response(properties={"freenas:vmsynced": "Y"})
if isinstance(message, (PeriodicSnapshotTaskSuccess, PeriodicSnapshotTaskError)):
context = self.vmware_contexts.pop(task_id, None)
vm_context = self.vm_contexts.pop(task_id, None)
if context or vm_context:
with Client() as c:
if context:
c.call("vmware.periodic_snapshot_task_end", context, job=True)
if vm_context:
c.call("vm.resume_suspended_vms", list(vm_context))
except ClientException as e:
if e.error:
logger.error("Unhandled exception in ZettareplProcess._observer: %r", e.error)
if e.trace:
logger.error("Unhandled exception in ZettareplProcess._observer:\n%s", e.trace["formatted"])
except Exception:
logger.error("Unhandled exception in ZettareplProcess._observer", exc_info=True)
def _process_command_queue(self):
logger = logging.getLogger("middlewared.plugins.zettarepl")
while self.zettarepl is not None:
command, args = self.command_queue.get()
if command == "config":
if "max_parallel_replication_tasks" in args:
self.zettarepl.max_parallel_replication_tasks = args["max_parallel_replication_tasks"]
if "timezone" in args:
self.zettarepl.scheduler.tz_clock.timezone = pytz.timezone(args["timezone"])
if command == "tasks":
definition = Definition.from_data(args, raise_on_error=False)
self.observer_queue.put(DefinitionErrors(definition.errors))
self.zettarepl.set_tasks(definition.tasks)
if command == "run_task":
class_name, task_id = args
for task in self.zettarepl.tasks:
if task.__class__.__name__ == class_name and task.id == task_id:
logger.debug("Running task %r", task)
self.zettarepl.scheduler.interrupt([task])
break
else:
logger.warning("Task %s(%r) not found", class_name, task_id)
self.observer_queue.put(ReplicationTaskError(task_id, "Task not found"))
class ZettareplService(Service):
class Config:
private = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.lock = threading.Lock()
self.command_queue = None
self.observer_queue = multiprocessing.Queue()
self.observer_queue_reader = None
self.replication_jobs_channels = defaultdict(list)
self.onetime_replication_tasks = {}
self.queue = None
self.process = None
self.zettarepl = None
def is_running(self):
return self.process is not None and self.process.is_alive()
def start(self):
try:
definition, hold_tasks = self.middleware.call_sync("zettarepl.get_definition")
except Exception as e:
self.logger.error("Error generating zettarepl definition", exc_info=True)
self.middleware.call_sync("zettarepl.set_error", {
"state": "ERROR",
"datetime": utc_now(),
"error": make_sentence(str(e)),
})
raise CallError(f"Internal error: {e!r}")
else:
self.middleware.call_sync("zettarepl.set_error", None)
with self.lock:
if not self.is_running():
self.queue = multiprocessing.Queue()
startup_error = multiprocessing.Value(c_bool, False)
zettarepl_process = ZettareplProcess(
definition,
self.middleware.debug_level,
self.middleware.log_handler,
self.queue,
self.observer_queue,
startup_error,
)
self.process = multiprocessing.Process(name="zettarepl", target=zettarepl_process)
self.process.start()
start_daemon_thread(target=self._join, args=(self.process, startup_error))
if self.observer_queue_reader is None:
self.observer_queue_reader = start_daemon_thread(target=self._observer_queue_reader)
self.middleware.call_sync("zettarepl.notify_definition", definition, hold_tasks)
def stop(self):
with self.lock:
if self.process:
self.process.terminate()
event = threading.Event()
def target():
try:
os.waitpid(self.process.pid, 0)
except ChildProcessError:
pass
event.set()
start_daemon_thread(target=target)
if not event.wait(5):
self.logger.warning("Zettarepl was not joined in time, sending SIGKILL")
os.kill(self.process.pid, signal.SIGKILL)
self.process = None
def _join(self, process, startup_error):
process.join()
if startup_error.value:
return
restart = False
with self.lock:
if process == self.process:
restart = True
if restart:
self.logger.error("Abnormal zettarepl process termination with code %r, restarting", process.exitcode)
for k, v in self.middleware.call_sync("zettarepl.get_state").get("tasks", {}).items():
if k.startswith("replication_") and v.get("state") in ("WAITING", "RUNNING"):
error = f"Abnormal zettarepl process termination with code {process.exitcode}."
self.middleware.call_sync("zettarepl.set_state", k, {
"state": "ERROR",
"datetime": utc_now(),
"error": error,
})
task_id = k[len("replication_"):]
for channel in self.replication_jobs_channels[task_id]:
channel.put(ReplicationTaskError(task_id, error))
self.middleware.call_sync("zettarepl.start")
def update_config(self, config):
if self.queue:
self.queue.put(("config", config))
def update_tasks(self):
try:
definition, hold_tasks = self.middleware.call_sync("zettarepl.get_definition")
except Exception as e:
self.logger.error("Error generating zettarepl definition", exc_info=True)
self.middleware.call_sync("zettarepl.set_error", {
"state": "ERROR",
"datetime": utc_now(),
"error": make_sentence(str(e)),
})
return
else:
self.middleware.call_sync("zettarepl.set_error", None)
if self._is_empty_definition(definition):
self.middleware.call_sync("zettarepl.stop")
else:
self.middleware.call_sync("zettarepl.start")
self.queue.put(("tasks", definition))
self.middleware.call_sync("zettarepl.notify_definition", definition, hold_tasks)
async def run_periodic_snapshot_task(self, id_):
try:
self.queue.put(("run_task", ("PeriodicSnapshotTask", f"task_{id_}")))
except Exception:
raise CallError("Replication service is not running")
def run_replication_task(self, id_, really_run, job):
if really_run:
try:
self.queue.put(("run_task", ("ReplicationTask", f"task_{id_}")))
except Exception:
raise CallError("Replication service is not running")
self._run_replication_task_job(f"task_{id_}", job)
def run_onetime_replication_task(self, job, task):
self.onetime_replication_tasks[job.id] = task
try:
self.update_tasks()
state = self.middleware.call_sync("zettarepl.get_state")
if "error" in state:
raise CallError(state["error"])
task_state = state["tasks"].get(f"job_{job.id}")
if task_state:
if task_state["state"] == "ERROR":
raise CallError(task_state["error"])
if task_state["state"] == "HOLD":
raise CallError(task_state["reason"])
if task_state["state"] != "WAITING":
raise CallError(task_state)
self.queue.put(("run_task", ("ReplicationTask", f"job_{job.id}")))
self._run_replication_task_job(f"job_{job.id}", job)
finally:
self.onetime_replication_tasks.pop(job.id)
self.update_tasks()
def _run_replication_task_job(self, id_, job):
channels = self.replication_jobs_channels[id_]
channel = queue.Queue()
channels.append(channel)
snapshot_start_message = None
snapshot_progress_message = None
data_progress_message = None
try:
while True:
message = channel.get()
if isinstance(message, ReplicationTaskLog):
job.logs_fd.write(message.log.encode("utf8", "ignore") + b"\n")
if isinstance(message, ReplicationTaskSnapshotStart):
snapshot_start_message = message
snapshot_progress_message = None
self._set_replication_task_progress(job, snapshot_start_message, snapshot_progress_message,
data_progress_message)
if isinstance(message, ReplicationTaskSnapshotProgress):
snapshot_progress_message = message
self._set_replication_task_progress(job, snapshot_start_message, snapshot_progress_message,
data_progress_message)
if isinstance(message, ReplicationTaskDataProgress):
data_progress_message = message
self._set_replication_task_progress(job, snapshot_start_message, snapshot_progress_message,
data_progress_message)
if isinstance(message, ReplicationTaskSuccess):
return
if isinstance(message, ReplicationTaskError):
raise CallError(make_sentence(message.error))
finally:
channels.remove(channel)
def _set_replication_task_progress(self, job, snapshot_start_message, snapshot_progress_message,
data_progress_message):
if snapshot_start_message is None:
return
if snapshot_progress_message is None:
message = snapshot_start_message
progress = 100 * (message.snapshots_sent / message.snapshots_total)
text = (
f"Sending {message.snapshots_sent + 1} of {message.snapshots_total}: "
f"{message.dataset}@{message.snapshot}"
)
else:
message = snapshot_progress_message
progress = 100 * (
(message.snapshots_sent + message.bytes_sent / (message.bytes_total or float("inf"))) /
message.snapshots_total
)
text = (
f"Sending {message.snapshots_sent + 1} of {message.snapshots_total}: "
f"{message.dataset}@{message.snapshot} ({format_size(message.bytes_sent)} / "
f"{format_size(message.bytes_total)})"
)
if data_progress_message is not None:
# Destination can result being larger than source
# Do this to avoid displaying progress like "[total 11.11 TiB out of 11.04 TiB]"
total = max(data_progress_message.dst_size, data_progress_message.src_size)
text += (
f" [total {format_size(data_progress_message.dst_size)} of "
f"{format_size(total)}]"
)
job.set_progress(progress, text)
async def list_datasets(self, transport, ssh_credentials=None):
async with self._handle_ssh_exceptions():
async with self._get_zettarepl_shell(transport, ssh_credentials) as shell:
datasets = await self.middleware.run_in_thread(list_datasets, shell)
return [
ds
for ds in datasets
if not any(r.match(ds) for r in INVALID_DATASETS)
]
async def create_dataset(self, dataset, transport, ssh_credentials=None):
async with self._handle_ssh_exceptions():
async with self._get_zettarepl_shell(transport, ssh_credentials) as shell:
return await self.middleware.run_in_thread(create_dataset, shell, dataset)
async def count_eligible_manual_snapshots(self, data):
if data["naming_schema"] and data["name_regex"]:
raise CallError("`naming_schema` and `name_regex` cannot be used simultaneously", errno.EINVAL)
async with self._handle_ssh_exceptions():
async with self._get_zettarepl_shell(data["transport"], data["ssh_credentials"]) as shell:
snapshots = await self.middleware.run_in_thread(
multilist_snapshots, shell, [(dataset, False) for dataset in data["datasets"]]
)
if data["naming_schema"]:
parsed = parse_snapshots_names_with_multiple_schemas([s.name for s in snapshots], data["naming_schema"])
elif data["name_regex"]:
try:
name_pattern = compile_name_regex(data["name_regex"])
except Exception as e:
raise CallError(f"Invalid `name_regex`: {e}")
parsed = [s.name for s in snapshots if name_pattern.match(s.name)]
else:
raise CallError("Either `naming_schema` or `name_regex` must be specified", errno.EINVAL)
return {
"total": len(snapshots),
"eligible": len(parsed),
}
async def get_source_target_datasets_mapping(self, source_datasets, target_dataset):
fake_replication_task = types.SimpleNamespace()
fake_replication_task.source_datasets = source_datasets
fake_replication_task.target_dataset = target_dataset
return {
source_dataset: get_target_dataset(fake_replication_task, source_dataset)
for source_dataset in source_datasets
}
async def target_unmatched_snapshots(self, direction, source_datasets, target_dataset, transport, ssh_credentials):
datasets = await self.get_source_target_datasets_mapping(source_datasets, target_dataset)
try:
local_shell = LocalShell()
async with self._get_zettarepl_shell(transport, ssh_credentials) as remote_shell:
if direction == "PUSH":
source_shell = local_shell
target_shell = remote_shell
else:
source_shell = remote_shell
target_shell = local_shell
target_datasets = set(await self.middleware.run_in_thread(list_datasets, target_shell))
datasets = {source_dataset: target_dataset
for source_dataset, target_dataset in datasets.items()
if target_dataset in target_datasets}
source_snapshots = group_snapshots_by_datasets(await self.middleware.run_in_thread(
multilist_snapshots, source_shell, [(dataset, False) for dataset in datasets.keys()]
))
target_snapshots = group_snapshots_by_datasets(await self.middleware.run_in_thread(
multilist_snapshots, target_shell, [(dataset, False) for dataset in datasets.values()]
))
except Exception as e:
raise CallError(repr(e))
errors = {}
for source_dataset, target_dataset in datasets.items():
unmatched_snapshots = list(set(target_snapshots.get(target_dataset, [])) -
set(source_snapshots.get(source_dataset, [])))
if unmatched_snapshots:
errors[target_dataset] = unmatched_snapshots
return errors
async def datasets_have_encryption(self, datasets, recursive, transport, ssh_credentials=None):
async with self._handle_ssh_exceptions():
async with self._get_zettarepl_shell(transport, ssh_credentials) as shell:
try:
properties_result = await self.middleware.run_in_thread(
get_properties_recursive, shell, datasets, {"encryption": str}, recursive=recursive,
)
except ExecException as e:
self.middleware.logger.debug("Encryption not supported on shell %r: %r (exit code = %d)",
shell, e.stdout.split("\n")[0], e.returncode)
return []
result = []
for dataset, properties in properties_result.items():
if properties["encryption"] != "off":
if any(dataset.startswith(f"{parent}/") for parent in result):
continue
result.append(dataset)
return result
async def get_definition(self):
config = await self.middleware.call("replication.config.config")
timezone = (await self.middleware.call("system.general.config"))["timezone"]
pools = {pool["name"]: pool for pool in await self.middleware.call("pool.query")}
hold_tasks = {}
periodic_snapshot_tasks = {}
for periodic_snapshot_task in await self.middleware.call("pool.snapshottask.query", [["enabled", "=", True]]):
hold_task_reason = self._hold_task_reason(pools, periodic_snapshot_task["dataset"])
if hold_task_reason:
hold_tasks[f"periodic_snapshot_task_{periodic_snapshot_task['id']}"] = hold_task_reason
continue
periodic_snapshot_tasks[f"task_{periodic_snapshot_task['id']}"] = self.periodic_snapshot_task_definition(
periodic_snapshot_task,
)
replication_tasks = {}
for replication_task in await self.middleware.call("replication.query", [["enabled", "=", True]]):
try:
replication_tasks[f"task_{replication_task['id']}"] = await self._replication_task_definition(
pools, replication_task
)
except HoldReplicationTaskException as e:
hold_tasks[f"replication_task_{replication_task['id']}"] = e.reason
for job_id, replication_task in self.onetime_replication_tasks.items():
try:
replication_tasks[f"job_{job_id}"] = await self._replication_task_definition(pools, replication_task)
except HoldReplicationTaskException as e:
hold_tasks[f"job_{job_id}"] = e.reason
definition = {
"max-parallel-replication-tasks": config["max_parallel_replication_tasks"],
"timezone": timezone,
"use-removal-dates": True,
"periodic-snapshot-tasks": periodic_snapshot_tasks,
"replication-tasks": replication_tasks,
}
# Test if does not cause exceptions
Definition.from_data(definition, raise_on_error=False)
hold_tasks = {
task_id: {
"state": "HOLD",
"datetime": utc_now(),
"reason": make_sentence(reason),
}
for task_id, reason in hold_tasks.items()
}
return definition, hold_tasks
def periodic_snapshot_task_definition(self, periodic_snapshot_task):
return {
"dataset": periodic_snapshot_task["dataset"],
"recursive": periodic_snapshot_task["recursive"],
"exclude": periodic_snapshot_task["exclude"],
"lifetime": lifetime_iso8601(periodic_snapshot_task["lifetime_value"],
periodic_snapshot_task["lifetime_unit"]),
"naming-schema": periodic_snapshot_task["naming_schema"],
"schedule": zettarepl_schedule(periodic_snapshot_task["schedule"]),
"allow-empty": periodic_snapshot_task["allow_empty"],
}
async def _replication_task_definition(self, pools, replication_task):
if replication_task["direction"] == "PUSH":
for source_dataset in replication_task["source_datasets"]:
hold_task_reason = self._hold_task_reason(pools, source_dataset)
if hold_task_reason:
raise HoldReplicationTaskException(hold_task_reason)
if replication_task["direction"] == "PULL":
hold_task_reason = self._hold_task_reason(pools, replication_task["target_dataset"])
if hold_task_reason:
raise HoldReplicationTaskException(hold_task_reason)
if replication_task["transport"] != "LOCAL":
if not await self.middleware.call("network.general.can_perform_activity", "replication"):
raise HoldReplicationTaskException("Replication network activity is disabled")
try:
transport = await self._define_transport(
replication_task["transport"],
(replication_task["ssh_credentials"] or {}).get("id"),
replication_task["netcat_active_side"],
replication_task["netcat_active_side_listen_address"],
replication_task["netcat_active_side_port_min"],
replication_task["netcat_active_side_port_max"],
replication_task["netcat_passive_side_connect_address"],
replication_task["sudo"],
)
except CallError as e:
raise HoldReplicationTaskException(e.errmsg)
properties_exclude = replication_task["properties_exclude"].copy()
properties_override = replication_task["properties_override"].copy()
for property_ in ["mountpoint", "sharenfs", "sharesmb"]:
if property_ == "mountpoint" and not replication_task.get("exclude_mountpoint_property", True):
continue
if property_ not in properties_override:
if property_ not in properties_exclude:
properties_exclude.append(property_)
definition = {
"direction": replication_task["direction"].lower(),
"transport": transport,
"source-dataset": replication_task["source_datasets"],
"target-dataset": replication_task["target_dataset"],
"recursive": replication_task["recursive"],
"exclude": replication_task_exclude(replication_task),
"properties": replication_task["properties"],
"properties-exclude": properties_exclude,
"properties-override": properties_override,
"replicate": replication_task["replicate"],
"periodic-snapshot-tasks": [
f"task_{periodic_snapshot_task['id']}"
for periodic_snapshot_task in replication_task["periodic_snapshot_tasks"]
],
"auto": replication_task["auto"],
"only-matching-schedule": replication_task["only_matching_schedule"],
"allow-from-scratch": replication_task["allow_from_scratch"],
"only-from-scratch": replication_task.get("only_from_scratch", False),
"readonly": replication_task["readonly"].lower(),
"hold-pending-snapshots": replication_task["hold_pending_snapshots"],
"retention-policy": replication_task["retention_policy"].lower(),
"large-block": replication_task["large_block"],
"embed": replication_task["embed"],
"compressed": replication_task["compressed"],
"retries": replication_task["retries"],
"logging-level": (replication_task["logging_level"] or "NOTSET").lower(),
}
if replication_task["encryption"]:
if replication_task["encryption_inherit"]:
definition["encryption"] = "inherit"
else:
definition["encryption"] = {
"key": replication_task["encryption_key"],
"key-format": replication_task["encryption_key_format"].lower(),
"key-location": replication_task["encryption_key_location"],
}
if replication_task["naming_schema"]:
definition["naming-schema"] = replication_task["naming_schema"]
if replication_task["also_include_naming_schema"]:
definition["also-include-naming-schema"] = replication_task["also_include_naming_schema"]
if replication_task["name_regex"]:
definition["name-regex"] = replication_task["name_regex"]
if replication_task["schedule"] is not None:
definition["schedule"] = zettarepl_schedule(replication_task["schedule"])
if replication_task["restrict_schedule"] is not None:
definition["restrict-schedule"] = zettarepl_schedule(replication_task["restrict_schedule"])
if replication_task["lifetime_value"] is not None and replication_task["lifetime_unit"] is not None:
definition["lifetime"] = lifetime_iso8601(replication_task["lifetime_value"],
replication_task["lifetime_unit"])
if replication_task["lifetimes"]:
definition["lifetimes"] = {
f"lifetime_{i}": {
"schedule": zettarepl_schedule(lifetime["schedule"]),
"lifetime": lifetime_iso8601(lifetime["lifetime_value"], lifetime["lifetime_unit"]),
}
for i, lifetime in enumerate(replication_task["lifetimes"])
}
if replication_task["compression"] is not None:
definition["compression"] = replication_task["compression"].lower()
if replication_task["speed_limit"] is not None:
definition["speed-limit"] = replication_task["speed_limit"]
return definition
def _hold_task_reason(self, pools, dataset):
pool = dataset.split("/")[0]
if pool not in pools:
return f"Pool {pool} does not exist"
if pools[pool]["status"] == "OFFLINE":
return f"Pool {pool} is offline"
@asynccontextmanager
async def _handle_ssh_exceptions(self):
try:
yield
except paramiko.ssh_exception.BadHostKeyException as e:
fingerprint = ":".join([hex(c)[2:] for c in e.key.get_fingerprint()])
raise CallError(
"Remote host identification has changed. Someone could be eavesdropping on you right now (man-in-the-"
"middle attack)! It is also possible that a host key has just been changed. The fingerprint for the "
f"RSA key sent by the remote host is {fingerprint}. Please edit your SSH connection and click "
"\"Discover Remote Host Key\" to resolve this issue.",
errno=errno.EACCES,
)
except (socket.timeout, paramiko.ssh_exception.NoValidConnectionsError, paramiko.ssh_exception.SSHException,
IOError, OSError) as e:
raise CallError(repr(e).replace("[Errno None] ", ""), errno=errno.EACCES)
@asynccontextmanager
async def _get_zettarepl_shell(self, transport, ssh_credentials):
if transport != "LOCAL":
await self.middleware.call("network.general.will_perform_activity", "replication")
if transport == "SSH+NETCAT":
# There is no difference shell-wise, but `_define_transport` for `SSH+NETCAT` will fail if we don't
# supply `netcat_active_side` and other parameters which are totally unrelated here.
transport = "SSH"
transport_definition = await self._define_transport(transport, ssh_credentials)
transport = create_transport(transport_definition)
shell = transport.shell(transport)
try:
yield shell
finally:
await self.middleware.run_in_thread(shell.close)
async def _define_transport(self, transport, ssh_credentials=None, netcat_active_side=None,
netcat_active_side_listen_address=None, netcat_active_side_port_min=None,
netcat_active_side_port_max=None, netcat_passive_side_connect_address=None,
sudo=False):
if transport in ["SSH", "SSH+NETCAT"]:
if ssh_credentials is None:
raise CallError(f"You should pass SSH credentials for {transport} transport")
ssh_credentials = await self.middleware.call("keychaincredential.get_of_type", ssh_credentials,
"SSH_CREDENTIALS")
transport_definition = dict(type="ssh", **await self._define_ssh_transport(ssh_credentials), sudo=sudo)
if transport == "SSH+NETCAT":
transport_definition["type"] = "ssh+netcat"
transport_definition["active-side"] = netcat_active_side.lower()
if netcat_active_side_listen_address is not None:
transport_definition["active-side-listen-address"] = netcat_active_side_listen_address
if netcat_active_side_port_min is not None:
transport_definition["active-side-min-port"] = netcat_active_side_port_min
if netcat_active_side_port_max is not None:
transport_definition["active-side-max-port"] = netcat_active_side_port_max
if netcat_passive_side_connect_address is not None:
transport_definition["passive-side-connect-address"] = netcat_passive_side_connect_address
else:
transport_definition = dict(type="local")
return transport_definition
async def _define_ssh_transport(self, credentials):
try:
key_pair = await self.middleware.call("keychaincredential.get_of_type",
credentials["attributes"]["private_key"], "SSH_KEY_PAIR")
except CallError as e:
raise CallError(f"Error while querying SSH key pair for credentials {credentials['id']}: {e!s}")
transport = {
"hostname": credentials["attributes"]["host"],
"port": credentials["attributes"]["port"],
"username": credentials["attributes"]["username"],
"private-key": key_pair["attributes"]["private_key"],
"host-key": credentials["attributes"]["remote_host_key"],
"connect-timeout": credentials["attributes"]["connect_timeout"],
}
if (await self.middleware.call("system.security.config"))["enable_fips"]:
transport["cipher"] = "fips"
return transport
def _is_empty_definition(self, definition):
return not definition["periodic-snapshot-tasks"] and not definition["replication-tasks"]
def _observer_queue_reader(self):
while True:
message = self.observer_queue.get()
try:
self.logger.trace("Observer queue got %r", message)
# Global events
if isinstance(message, DefinitionErrors):
definition_errors = {}
for error in message.errors:
if isinstance(error, PeriodicSnapshotTaskDefinitionError):
definition_errors[f"periodic_snapshot_{error.task_id}"] = {
"state": "ERROR",
"datetime": utc_now(),
"error": make_sentence(str(error)),
}
if isinstance(error, ReplicationTaskDefinitionError):
definition_errors[f"replication_{error.task_id}"] = {
"state": "ERROR",
"datetime": utc_now(),
"error": make_sentence(str(error)),
}
self.middleware.call_sync("zettarepl.set_definition_errors", definition_errors)
# Periodic snapshot task
if isinstance(message, PeriodicSnapshotTaskStart):
self.middleware.call_sync("zettarepl.set_state", f"periodic_snapshot_{message.task_id}", {
"state": "RUNNING",
"datetime": utc_now(),
})
if isinstance(message, PeriodicSnapshotTaskSuccess):
self.middleware.call_sync("zettarepl.set_state", f"periodic_snapshot_{message.task_id}", {
"state": "FINISHED",
"datetime": utc_now(),
})
if isinstance(message, PeriodicSnapshotTaskError):
self.middleware.call_sync("zettarepl.set_state", f"periodic_snapshot_{message.task_id}", {
"state": "ERROR",
"datetime": utc_now(),
"error": make_sentence(message.error),
})
# Replication task events
if isinstance(message, ReplicationTaskScheduled):
if (
(self.middleware.call_sync(
"zettarepl.get_state_internal", f"replication_{message.task_id}"
) or {}).get("state") != "RUNNING"
):
self.middleware.call_sync("zettarepl.set_state", f"replication_{message.task_id}", {
"state": "WAITING",
"datetime": utc_now(),
"reason": message.waiting_reason,
})
if isinstance(message, ReplicationTaskStart):
self.middleware.call_sync("zettarepl.set_state", f"replication_{message.task_id}", {
"state": "RUNNING",
"datetime": utc_now(),
})
# Start fake job if none are already running
if not self.replication_jobs_channels[message.task_id]:
self.middleware.call_sync("replication.run", int(message.task_id[5:]), False)
if isinstance(message, ReplicationTaskLog):
for channel in self.replication_jobs_channels[message.task_id]:
channel.put(message)
if isinstance(message, ReplicationTaskSnapshotStart):
self.middleware.call_sync("zettarepl.set_state", f"replication_{message.task_id}", {
"state": "RUNNING",
"datetime": utc_now(),
"progress": {
"dataset": message.dataset,
"snapshot": message.snapshot,
"snapshots_sent": message.snapshots_sent,
"snapshots_total": message.snapshots_total,
"bytes_sent": 0,
"bytes_total": 0,
# legacy
"current": 0,
"total": 0,
}
})
for channel in self.replication_jobs_channels[message.task_id]:
channel.put(message)
if isinstance(message, ReplicationTaskSnapshotProgress):
self.middleware.call_sync("zettarepl.set_state", f"replication_{message.task_id}", {
"state": "RUNNING",
"datetime": utc_now(),
"progress": {
"dataset": message.dataset,
"snapshot": message.snapshot,
"snapshots_sent": message.snapshots_sent,
"snapshots_total": message.snapshots_total,
"bytes_sent": message.bytes_sent,
"bytes_total": message.bytes_total,
# legacy
"current": message.bytes_sent,
"total": message.bytes_total,
}
})
for channel in self.replication_jobs_channels[message.task_id]:
channel.put(message)
if isinstance(message, ReplicationTaskSnapshotSuccess):
self.middleware.call_sync("zettarepl.set_last_snapshot", f"replication_{message.task_id}",
f"{message.dataset}@{message.snapshot}")
for channel in self.replication_jobs_channels[message.task_id]:
channel.put(message)
if isinstance(message, ReplicationTaskDataProgress):
task_id = f"replication_{message.task_id}"
try:
state = self.middleware.call_sync("zettarepl.get_internal_task_state", task_id)
except KeyError:
pass
else:
if state["state"] == "RUNNING" and "progress" in state:
state["progress"].update({
"root_dataset": message.dataset,
"src_size": message.src_size,
"dst_size": message.dst_size,
})
self.middleware.call_sync("zettarepl.set_state", task_id, state)
for channel in self.replication_jobs_channels[message.task_id]:
channel.put(message)
if isinstance(message, ReplicationTaskSuccess):
self.middleware.call_sync("zettarepl.set_state", f"replication_{message.task_id}", {
"state": "FINISHED",
"datetime": utc_now(),
"warnings": message.warnings,
})
for channel in self.replication_jobs_channels[message.task_id]:
channel.put(message)
if isinstance(message, ReplicationTaskError):
self.middleware.call_sync("zettarepl.set_state", f"replication_{message.task_id}", {
"state": "ERROR",
"datetime": utc_now(),
"error": make_sentence(message.error),
})
for channel in self.replication_jobs_channels[message.task_id]:
channel.put(message)
except Exception:
self.logger.warning("Unhandled exception in observer_queue_reader", exc_info=True)
async def terminate(self):
await self.middleware.call("zettarepl.flush_state")
await self.middleware.run_in_thread(self.stop)
async def pool_configuration_change(middleware, *args, **kwargs):
await middleware.call("zettarepl.update_tasks")
async def setup(middleware):
await middleware.call("zettarepl.load_state")
try:
await middleware.call("zettarepl.start")
except Exception:
middleware.logger.error("Unhandled exception during zettarepl startup", exc_info=True)
middleware.register_hook("pool.post_import", pool_configuration_change, sync=True)
middleware.register_hook("pool.post_export", pool_configuration_change, sync=True)
middleware.register_hook("pool.post_lock", pool_configuration_change, sync=True)
middleware.register_hook("pool.post_unlock", pool_configuration_change, sync=True)
middleware.register_hook("pool.post_create_or_update", pool_configuration_change, sync=True)
| 48,569 | Python | .py | 889 | 39.813273 | 120 | 0.587476 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,472 | kerberos.py | truenas_middleware/src/middlewared/middlewared/plugins/kerberos.py | import asyncio
import base64
import errno
import gssapi
import os
import subprocess
import tempfile
import time
from middlewared.schema import accepts, Dict, Int, List, Patch, Str, OROperator, Password, Ref, Bool
from middlewared.service import CallError, ConfigService, CRUDService, job, periodic, private, ValidationErrors
import middlewared.sqlalchemy as sa
from middlewared.utils import run
from middlewared.utils.directoryservices.constants import DSType
from middlewared.utils.directoryservices.krb5_constants import (
KRB_Keytab,
krb5ccache,
KRB_AppDefaults,
KRB_LibDefaults,
KRB_ETYPE,
KRB_TKT_CHECK_INTERVAL,
PERSISTENT_KEYRING_PREFIX,
)
from middlewared.utils.directoryservices.krb5 import (
gss_get_current_cred,
gss_acquire_cred_principal,
gss_acquire_cred_user,
gss_dump_cred,
extract_from_keytab,
keytab_services,
klist_impl,
ktutil_list_impl
)
from middlewared.utils.directoryservices.krb5_conf import KRB5Conf
from middlewared.utils.directoryservices.krb5_error import KRB5Error
class KerberosModel(sa.Model):
__tablename__ = 'directoryservice_kerberossettings'
id = sa.Column(sa.Integer(), primary_key=True)
ks_appdefaults_aux = sa.Column(sa.Text())
ks_libdefaults_aux = sa.Column(sa.Text())
class KerberosService(ConfigService):
class Config:
service = "kerberos"
datastore = 'directoryservice.kerberossettings'
datastore_prefix = "ks_"
cli_namespace = "directory_service.kerberos.settings"
role_prefix = 'DIRECTORY_SERVICE'
@accepts(Dict(
'kerberos_settings_update',
Str('appdefaults_aux', max_length=None),
Str('libdefaults_aux', max_length=None),
update=True
), audit='Kerberos configuration update')
async def do_update(self, data):
"""
`appdefaults_aux` add parameters to "appdefaults" section of the krb5.conf file.
`libdefaults_aux` add parameters to "libdefaults" section of the krb5.conf file.
"""
verrors = ValidationErrors()
old = await self.config()
new = old.copy()
new.update(data)
verrors.add_child(
'kerberos_settings_update',
await self._validate_appdefaults(new['appdefaults_aux'])
)
verrors.add_child(
'kerberos_settings_update',
await self._validate_libdefaults(new['libdefaults_aux'])
)
verrors.check()
await self.middleware.call(
'datastore.update', self._config.datastore, old['id'], new,
{'prefix': self._config.datastore_prefix}
)
await self.middleware.call('etc.generate', 'kerberos')
return await self.config()
@private
@accepts(Ref('kerberos-options'))
def ccache_path(self, data):
krb_ccache = krb5ccache[data['ccache']]
path_out = krb_ccache.value
if krb_ccache == krb5ccache.USER:
path_out += str(data['ccache_uid'])
return path_out
@private
def generate_stub_config(self, realm, kdc=None, libdefaultsaux=None):
"""
This method generates a temporary krb5.conf file that is used for the purpose
of validating credentials and performing domain joins. During the domain join
process it is important to hard-code a single KDC because our new account may
not have replicated to other KDCs yet. Once we have joined a domain and inserted
proper realm configuration this temporary config will be removed by a call
to etc.generate kerberos.
"""
aux = libdefaultsaux or []
krbconf = KRB5Conf()
libdefaults = {
str(KRB_LibDefaults.DEFAULT_REALM): realm,
str(KRB_LibDefaults.DNS_LOOKUP_REALM): 'false',
str(KRB_LibDefaults.FORWARDABLE): 'true',
str(KRB_LibDefaults.DEFAULT_CCACHE_NAME): PERSISTENT_KEYRING_PREFIX + '%{uid}'
}
realms = [{
'realm': realm,
'admin_server': [],
'kdc': [],
'kpasswd_server': []
}]
if kdc:
realms[0]['kdc'].append(kdc)
libdefaults[str(KRB_LibDefaults.DNS_LOOKUP_KDC)] = 'false'
libdefaults[str(KRB_LibDefaults.DNS_CANONICALIZE_HOSTNAME)] = 'false'
krbconf.add_libdefaults(libdefaults, '\n'.join(aux))
krbconf.add_realms(realms)
krbconf.write()
@private
@accepts(
Dict(
'kerberos-options',
Str('ccache', enum=[x.name for x in krb5ccache], default=krb5ccache.SYSTEM.name),
Int('ccache_uid', default=0),
register=True,
),
Bool('raise_error', default=True)
)
def check_ticket(self, data, raise_error):
"""
Perform very basic test that we have a valid kerberos ticket in the
specified ccache.
If `raise_error` is set (default), then a CallError is raised with
errno set to ENOKEY if ticket cannot be read or if ticket is expired.
returns True if ccache can be read and ticket is not expired, otherwise
returns False
"""
krb_ccache = krb5ccache[data['ccache']]
ccache_path = krb_ccache.value
if krb_ccache is krb5ccache.USER:
ccache_path += str(data['ccache_uid'])
if (cred := gss_get_current_cred(ccache_path, False)) is not None:
return gss_dump_cred(cred)
if raise_error:
raise CallError("Kerberos ticket is required.", errno.ENOKEY)
return None
@private
async def _validate_param_type(self, data):
supported_validation_types = [
'boolean',
'cctype',
'etypes',
'keytab',
]
if data['ptype'] not in supported_validation_types:
return
if data['ptype'] == 'boolean':
if data['value'].upper() not in ['YES', 'TRUE', 'NO', 'FALSE']:
raise CallError(f'[{data["value"]}] is not boolean')
if data['ptype'] == 'etypes':
for e in data['value'].split(' '):
try:
KRB_ETYPE(e)
except Exception:
raise CallError(f'[{e}] is not a supported encryption type')
if data['ptype'] == 'cctype':
available_types = ['FILE', 'MEMORY', 'DIR']
if data['value'] not in available_types:
raise CallError(f'[{data["value"]}] is an unsupported cctype. '
f'Available types are {", ".join(available_types)}. '
'This parameter is case-sensitive')
if data['ptype'] == 'keytab':
try:
KRB_Keytab(data['value'])
except Exception:
raise CallError(f'{data["value"]} is an unsupported keytab path')
@private
async def _validate_appdefaults(self, appdefaults):
verrors = ValidationErrors()
for line in appdefaults.splitlines():
param = line.split('=')
if len(param) == 2 and (param[1].strip())[0] != '{':
validated_param = list(filter(
lambda x: param[0].strip() in (x.value)[0], KRB_AppDefaults
))
if not validated_param:
verrors.add(
'kerberos_appdefaults',
f'{param[0]} is an invalid appdefaults parameter.'
)
continue
try:
await self._validate_param_type({
'ptype': (validated_param[0]).value[1],
'value': param[1].strip()
})
except Exception as e:
verrors.add(
'kerberos_appdefaults',
f'{param[0]} has invalid value: {e.errmsg}.'
)
continue
return verrors
@private
async def _validate_libdefaults(self, libdefaults):
verrors = ValidationErrors()
for line in libdefaults.splitlines():
param = line.split('=')
if len(param) == 2:
validated_param = list(filter(
lambda x: param[0].strip() in (x.value)[0], KRB_LibDefaults
))
if not validated_param:
verrors.add(
'kerberos_libdefaults',
f'{param[0]} is an invalid libdefaults parameter.'
)
continue
try:
await self._validate_param_type({
'ptype': (validated_param[0]).value[1],
'value': param[1].strip()
})
except Exception as e:
verrors.add(
'kerberos_libdefaults',
f'{param[0]} has invalid value: {e.errmsg}.'
)
else:
verrors.add('kerberos_libdefaults', f'{line} is an invalid libdefaults parameter.')
return verrors
@private
@accepts(Dict(
"get-kerberos-creds",
Str("dstype", required=True, enum=[x.value for x in DSType]),
OROperator(
Dict(
'ad_parameters',
Str('bindname'),
Str('bindpw'),
Str('domainname'),
Str('kerberos_principal')
),
Dict(
'ldap_parameters',
Str('binddn'),
Str('bindpw'),
Int('kerberos_realm'),
Str('kerberos_principal')
),
name='conf',
required=True
)
))
async def get_cred(self, data):
'''
Get kerberos cred from directory services config to use for `do_kinit`.
'''
conf = data.get('conf', {})
if conf.get('kerberos_principal'):
return {'kerberos_principal': conf['kerberos_principal']}
verrors = ValidationErrors()
dstype = DSType(data['dstype'])
if dstype is DSType.AD:
for k in ['bindname', 'bindpw', 'domainname']:
if not conf.get(k):
verrors.add(f'conf.{k}', 'Parameter is required.')
verrors.check()
return {
'username': f'{conf["bindname"]}@{conf["domainname"].upper()}',
'password': conf['bindpw']
}
for k in ['binddn', 'bindpw', 'kerberos_realm']:
if not conf.get(k):
verrors.add(f'conf.{k}', 'Parameter is required.')
verrors.check()
krb_realm = await self.middleware.call(
'kerberos.realm.query',
[('id', '=', conf['kerberos_realm'])],
{'get': True}
)
bind_cn = (conf['binddn'].split(','))[0].split("=")
return {
'username': f'{bind_cn[1]}@{krb_realm["realm"]}',
'password': conf['bindpw']
}
@private
def _dump_current_cred(self, credential, ccache_path):
""" returns dump of kerberos ccache if valid and not about to expire """
if (current_cred := gss_get_current_cred(ccache_path, False)) is None:
return None
if str(current_cred.name) == credential:
if current_cred.lifetime > KRB_TKT_CHECK_INTERVAL * 2:
return gss_dump_cred(current_cred)
# We need to pass through kdestroy because ccache is in kernel keyring
kdestroy = subprocess.run(['kdestroy', '-c', ccache_path], check=False, capture_output=True)
if kdestroy.returncode != 0:
raise CallError(f'kdestroy failed with error: {kdestroy.stderr.decode()}')
return None
@private
@accepts(Dict(
'do_kinit',
OROperator(
Dict(
'kerberos_username_password',
Str('username', required=True),
Password('password', required=True),
register=True
),
Dict(
'kerberos_keytab',
Str('kerberos_principal', required=True),
),
name='krb5_cred',
required=True,
),
Patch(
'kerberos-options',
'kinit-options',
('add', {'name': 'renewal_period', 'type': 'int', 'default': 7}),
('add', {'name': 'lifetime', 'type': 'int', 'default': 0}),
('add', {
'name': 'kdc_override',
'type': 'dict',
'args': [
Str('domain', default=None),
Str('kdc', default=None),
List('libdefaults_aux', default=None)
]
}),
)
))
def do_kinit(self, data):
ccache = krb5ccache[data['kinit-options']['ccache']]
creds = data['krb5_cred']
has_principal = 'kerberos_principal' in creds
ccache_uid = data['kinit-options']['ccache_uid']
ccache_path = self.ccache_path({
'ccache': data['kinit-options']['ccache'],
'ccache_uid': data['kinit-options']['ccache_uid']
})
if ccache == krb5ccache.USER:
if has_principal:
raise CallError('User-specific ccache not permitted with keytab-based kinit')
if ccache_uid == 0:
raise CallError('User-specific ccache not permitted for uid 0')
if data['kinit-options']['kdc_override']['kdc'] is not None:
override = data['kinit-options']['kdc_override']
if override['domain'] is None:
raise CallError('Domain missing from KDC override')
self.generate_stub_config(override['domain'], override['kdc'], override['libdefaults_aux'])
if has_principal:
principals = self.middleware.call_sync('kerberos.keytab.kerberos_principal_choices')
if creds['kerberos_principal'] not in principals:
self.logger.debug('Selected kerberos principal [%s] not available in keytab principals: %s. '
'Regenerating kerberos keytab from configuration file.',
creds['kerberos_principal'], ','.join(principals))
self.middleware.call_sync('etc.generate', 'kerberos')
if (current_cred := self._dump_current_cred(creds['kerberos_principal'], ccache_path)) is not None:
return
try:
gss_acquire_cred_principal(
creds['kerberos_principal'],
ccache_path=ccache_path,
lifetime=data['kinit-options']['lifetime'] or None
)
except gssapi.exceptions.BadNameError:
raise CallError(
f'{creds["kerberos_principal"]}: not a valid kerberos principal name',
errno.EINVAL
)
except gssapi.exceptions.MissingCredentialsError as exc:
if exc.min_code & 0xFF:
# this is in krb5 lib error table
raise KRB5Error(
gss_major=exc.maj_code,
gss_minor=exc.min_code,
errmsg=exc.gen_message()
)
# Error may be in different error table. Convert to CallError
# for now, but we may special handling in future.
raise CallError(str(exc))
except Exception as exc:
raise CallError(str(exc))
else:
if (current_cred := self._dump_current_cred(creds['username'], ccache_path)) is not None:
# we already have a ticket skip unnecessary ccache manipulation
return current_cred
if not creds['password']:
raise CallError('Password is required')
try:
gss_acquire_cred_user(
creds['username'],
creds['password'],
ccache_path=ccache_path,
lifetime=data['kinit-options']['lifetime'] or None
)
except gssapi.exceptions.BadNameError:
raise CallError(
f'{creds["username"]}: not a valid kerberos user name',
errno.EINVAL
)
except gssapi.exceptions.MissingCredentialsError as exc:
if exc.min_code & 0xFF:
# this is in krb5 lib error table
raise KRB5Error(
gss_major=exc.maj_code,
gss_minor=exc.min_code,
errmsg=exc.gen_message()
)
# Error may be in different error table. Convert to CallError
# for now, but we may special handling in future.
raise CallError(str(exc))
except Exception as exc:
raise CallError(str(exc))
if ccache == krb5ccache.USER:
os.chown(ccache_path, ccache_uid, -1)
@private
async def _kinit(self):
"""
For now we only check for kerberos realms explicitly configured in AD and LDAP.
"""
ad = await self.middleware.call('activedirectory.config')
ldap = await self.middleware.call('ldap.config')
await self.middleware.call('etc.generate', 'kerberos')
payload = {}
if ad['enable']:
payload = {
'dstype': DSType.AD.value,
'conf': {
'bindname': ad['bindname'],
'bindpw': ad.get('bindpw', ''),
'domainname': ad['domainname'],
'kerberos_principal': ad['kerberos_principal'],
}
}
if ldap['enable'] and ldap['kerberos_realm']:
payload = {
'dstype': DSType.LDAP.value,
'conf': {
'binddn': ldap['binddn'],
'bindpw': ldap['bindpw'],
'kerberos_realm': ldap['kerberos_realm'],
'kerberos_principal': ldap['kerberos_principal'],
}
}
if not payload:
return
cred = await self.get_cred(payload)
return await self.middleware.call('kerberos.do_kinit', {'krb5_cred': cred})
@private
@accepts(Patch(
'kerberos-options',
'klist-options',
('add', {'name': 'timeout', 'type': 'int', 'default': 10}),
))
async def klist(self, data):
ccache = krb5ccache[data['ccache']].value
try:
return await asyncio.wait_for(
self.middleware.run_in_thread(klist_impl, ccache),
timeout=data['timeout']
)
except asyncio.TimeoutError:
raise CallError(f'Attempt to list kerberos tickets timed out after {data["timeout"]} seconds')
@private
@accepts(Ref('kerberos-options'))
async def kdestroy(self, data):
kdestroy = await run(['kdestroy', '-c', krb5ccache[data['ccache']].value], check=False)
if kdestroy.returncode != 0:
raise CallError(f'kdestroy failed with error: {kdestroy.stderr.decode()}')
return
@private
async def stop(self):
renewal_job = await self.middleware.call(
'core.get_jobs',
[['method', '=', 'kerberos.wait_for_renewal'], ['state', '=', 'RUNNING']]
)
if renewal_job:
await self.middleware.call('core.job_abort', renewal_job[0]['id'])
await self.kdestroy()
return
@private
async def start(self, realm=None, kinit_timeout=30):
"""
kinit can hang because it depends on DNS. If it has not returned within
30 seconds, it is safe to say that it has failed.
"""
await self.middleware.call('etc.generate', 'kerberos')
try:
cred = await asyncio.wait_for(self.middleware.create_task(self._kinit()), timeout=kinit_timeout)
except asyncio.TimeoutError:
raise CallError(f'Timed out hung kinit after [{kinit_timeout}] seconds')
await self.middleware.call('kerberos.wait_for_renewal')
return cred
@private
@job(lock="kerberos_renew_watch", transient=True, lock_queue_size=1)
def wait_for_renewal(self, job):
while True:
if (cred := gss_get_current_cred(krb5ccache.SYSTEM.value, raise_error=False)) is None:
# We don't have kerberos ticket or it has already expired
# We can redo our kinit
ds = self.middleware.call_sync('directoryservices.status')
if ds['type'] is None:
self.logger.debug(
'Directory services are disabled. Exiting job to wait '
'for renewal of kerberos ticket.'
)
break
self.logger.debug('Kerberos ticket check failed, getting new ticket')
self.middleware.call_sync('kerberos.start')
elif cred.lifetime <= KRB_TKT_CHECK_INTERVAL:
self.middleware.call_sync('kerberos.start')
time.sleep(KRB_TKT_CHECK_INTERVAL)
class KerberosRealmModel(sa.Model):
__tablename__ = 'directoryservice_kerberosrealm'
id = sa.Column(sa.Integer(), primary_key=True)
krb_realm = sa.Column(sa.String(120))
krb_kdc = sa.Column(sa.String(120))
krb_admin_server = sa.Column(sa.String(120))
krb_kpasswd_server = sa.Column(sa.String(120))
__table_args__ = (
sa.Index("directoryservice_kerberosrealm_krb_realm", "krb_realm", unique=True),
)
class KerberosRealmService(CRUDService):
class Config:
datastore = 'directoryservice.kerberosrealm'
datastore_prefix = 'krb_'
datastore_extend = 'kerberos.realm.kerberos_extend'
namespace = 'kerberos.realm'
cli_namespace = 'directory_service.kerberos.realm'
role_prefix = 'DIRECTORY_SERVICE'
@private
async def kerberos_extend(self, data):
for param in ['kdc', 'admin_server', 'kpasswd_server']:
data[param] = data[param].split(' ') if data[param] else []
return data
@private
async def kerberos_compress(self, data):
for param in ['kdc', 'admin_server', 'kpasswd_server']:
data[param] = ' '.join(data[param])
return data
ENTRY = Patch(
'kerberos_realm_create', 'kerberos_realm_entry',
('add', Int('id')),
)
@accepts(
Dict(
'kerberos_realm_create',
Str('realm', required=True),
List('kdc'),
List('admin_server'),
List('kpasswd_server'),
register=True
),
audit='Kerberos realm create:',
audit_extended=lambda data: data['realm']
)
async def do_create(self, data):
"""
Create a new kerberos realm. This will be automatically populated during the
domain join process in an Active Directory environment. Kerberos realm names
are case-sensitive, but convention is to only use upper-case.
Entries for kdc, admin_server, and kpasswd_server are not required.
If they are unpopulated, then kerberos will use DNS srv records to
discover the correct servers. The option to hard-code them is provided
due to AD site discovery. Kerberos has no concept of Active Directory
sites. This means that middleware performs the site discovery and
sets the kerberos configuration based on the AD site.
"""
verrors = ValidationErrors()
verrors.add_child('kerberos_realm_create', await self._validate(data))
verrors.check()
data = await self.kerberos_compress(data)
id_ = await self.middleware.call(
'datastore.insert', self._config.datastore, data,
{'prefix': self._config.datastore_prefix}
)
await self.middleware.call('etc.generate', 'kerberos')
await self.middleware.call('service.restart', 'cron')
return await self.get_instance(id_)
@accepts(
Int('id', required=True),
Patch(
"kerberos_realm_create",
"kerberos_realm_update",
("attr", {"update": True})
),
audit='Kerberos realm update:',
audit_callback=True
)
async def do_update(self, audit_callback, id_, data):
"""
Update a kerberos realm by id. This will be automatically populated during the
domain join process in an Active Directory environment. Kerberos realm names
are case-sensitive, but convention is to only use upper-case.
"""
old = await self.get_instance(id_)
audit_callback(old['realm'])
new = old.copy()
new.update(data)
data = await self.kerberos_compress(new)
id_ = await self.middleware.call(
'datastore.update', self._config.datastore, id_, new,
{'prefix': self._config.datastore_prefix}
)
await self.middleware.call('etc.generate', 'kerberos')
return await self.get_instance(id_)
@accepts(Int('id'), audit='Kerberos realm delete:', audit_callback=True)
async def do_delete(self, audit_callback, id_):
"""
Delete a kerberos realm by ID.
"""
realm_name = (await self.get_instance(id_))['realm']
audit_callback(realm_name)
await self.middleware.call('datastore.delete', self._config.datastore, id_)
await self.middleware.call('etc.generate', 'kerberos')
@private
async def _validate(self, data):
verrors = ValidationErrors()
realms = await self.query()
for realm in realms:
if realm['realm'].upper() == data['realm'].upper():
verrors.add('kerberos_realm', f'kerberos realm with name {realm["realm"]} already exists.')
return verrors
class KerberosKeytabModel(sa.Model):
__tablename__ = 'directoryservice_kerberoskeytab'
id = sa.Column(sa.Integer(), primary_key=True)
keytab_file = sa.Column(sa.EncryptedText())
keytab_name = sa.Column(sa.String(120), unique=True)
class KerberosKeytabService(CRUDService):
class Config:
datastore = 'directoryservice.kerberoskeytab'
datastore_prefix = 'keytab_'
namespace = 'kerberos.keytab'
cli_namespace = 'directory_service.kerberos.keytab'
role_prefix = 'DIRECTORY_SERVICE'
ENTRY = Patch(
'kerberos_keytab_create', 'kerberos_keytab_entry',
('add', Int('id')),
)
@accepts(
Dict(
'kerberos_keytab_create',
Str('file', max_length=None, private=True),
Str('name'),
register=True
),
audit='Kerberos keytab create:',
audit_extended=lambda data: data['name']
)
async def do_create(self, data):
"""
Create a kerberos keytab. Uploaded keytab files will be merged with the system
keytab under /etc/krb5.keytab.
`file` b64encoded kerberos keytab
`name` name for kerberos keytab
"""
verrors = ValidationErrors()
verrors.add_child('kerberos_principal_create', await self._validate(data))
verrors.check()
id_ = await self.middleware.call(
'datastore.insert', self._config.datastore, data,
{'prefix': self._config.datastore_prefix}
)
await self.middleware.call('etc.generate', 'kerberos')
return await self.get_instance(id_)
@accepts(
Int('id', required=True),
Patch(
'kerberos_keytab_create',
'kerberos_keytab_update',
),
audit='Kerberos keytab update:',
audit_callback=True
)
async def do_update(self, audit_callback, id_, data):
"""
Update kerberos keytab by id.
"""
old = await self.get_instance(id_)
audit_callback(old['name'])
new = old.copy()
new.update(data)
verrors = ValidationErrors()
verrors.add_child('kerberos_principal_update', await self._validate(new))
verrors.check()
await self.middleware.call(
'datastore.update', self._config.datastore, id_, new,
{'prefix': self._config.datastore_prefix}
)
await self.middleware.call('etc.generate', 'kerberos')
return await self.get_instance(id_)
@accepts(Int('id'), audit='Kerberos keytab delete:', audit_callback=True)
async def do_delete(self, audit_callback, id_):
"""
Delete kerberos keytab by id, and force regeneration of
system keytab.
"""
kt = await self.get_instance(id_)
audit_callback(kt['name'])
if kt['name'] == 'AD_MACHINE_ACCOUNT':
ad_config = await self.middleware.call('activedirectory.config')
if ad_config['enable']:
raise CallError(
'Active Directory machine account keytab may not be deleted while '
'the Active Directory service is enabled.'
)
await self.middleware.call(
'datastore.update', 'directoryservice.activedirectory',
ad_config['id'], {'kerberos_principal': ''}, {'prefix': 'ad_'}
)
await self.middleware.call('datastore.delete', self._config.datastore, id_)
await self.middleware.call('etc.generate', 'kerberos')
await self._cleanup_kerberos_principals()
await self.middleware.call('kerberos.stop')
try:
await self.middleware.call('kerberos.start')
except Exception as e:
self.logger.debug(
'Failed to start kerberos service after deleting keytab entry: %s' % e
)
@private
async def _cleanup_kerberos_principals(self):
principal_choices = await self.middleware.call('kerberos.keytab.kerberos_principal_choices')
ad = await self.middleware.call('activedirectory.config')
ldap = await self.middleware.call('ldap.config')
if ad['kerberos_principal'] and ad['kerberos_principal'] not in principal_choices:
await self.middleware.call('activedirectory.update', {'kerberos_principal': ''})
if ldap['kerberos_principal'] and ldap['kerberos_principal'] not in principal_choices:
await self.middleware.call('ldap.update', {'kerberos_principal': ''})
@private
def _validate_impl(self, data):
"""
- synchronous validation -
For now validation is limited to checking if we can resolve the hostnames
configured for the kdc, admin_server, and kpasswd_server can be resolved
by DNS, and if the realm can be resolved by DNS.
"""
verrors = ValidationErrors()
try:
decoded = base64.b64decode(data['file'])
except Exception as e:
verrors.add("kerberos.keytab_create", f"Keytab is a not a properly base64-encoded string: [{e}]")
return verrors
with tempfile.NamedTemporaryFile() as f:
f.write(decoded)
f.flush()
try:
ktutil_list_impl(f.name)
except Exception as e:
verrors.add("kerberos.keytab_create", f"Failed to validate keytab: [{e}]")
return verrors
@private
async def _validate(self, data):
"""
async wrapper for validate
"""
return await self.middleware.run_in_thread(self._validate_impl, data)
@private
async def ktutil_list(self, keytab_file=KRB_Keytab['SYSTEM'].value):
try:
return await self.middleware.run_in_thread(ktutil_list_impl, keytab_file)
except Exception as e:
self.logger.warning("Failed to list kerberos keytab [%s]: %s",
keytab_file, e)
return []
@private
async def kerberos_principal_choices(self):
"""
Keytabs typically have multiple entries for same principal (differentiated by enc_type).
Since the enctype isn't relevant in this situation, only show unique principal names.
Return empty list if system keytab doesn't exist.
"""
if not await self.middleware.run_in_thread(os.path.exists, KRB_Keytab['SYSTEM'].value):
return []
kerberos_principals = []
for entry in await self.ktutil_list():
if entry['principal'] not in kerberos_principals:
kerberos_principals.append(entry['principal'])
return sorted(kerberos_principals)
@private
def has_nfs_principal(self):
"""
This method checks whether the kerberos keytab contains an nfs service principal
"""
try:
return 'nfs' in keytab_services(KRB_Keytab.SYSTEM.value)
except FileNotFoundError:
return False
@private
def store_ad_keytab(self):
"""
libads automatically generates a system keytab during domain join process. This
method parses the system keytab and inserts as the AD_MACHINE_ACCOUNT keytab.
"""
if not os.path.exists(KRB_Keytab.SYSTEM.value):
self.logger.warning('System keytab is missing. Unable to extract AD machine account keytab.')
return
ad = self.middleware.call_sync('activedirectory.config')
ad_kt_bytes = extract_from_keytab(KRB_Keytab.SYSTEM.value, [['principal', 'Crin', ad['netbiosname']]])
keytab_file = base64.b64encode(ad_kt_bytes).decode()
entry = self.middleware.call_sync('kerberos.keytab.query', [('name', '=', 'AD_MACHINE_ACCOUNT')])
if not entry:
self.middleware.call_sync(
'datastore.insert', self._config.datastore,
{'name': 'AD_MACHINE_ACCOUNT', 'file': keytab_file},
{'prefix': self._config.datastore_prefix}
)
else:
self.middleware.call_sync(
'datastore.update', self._config.datastore, entry[0]['id'],
{'name': 'AD_MACHINE_ACCOUNT', 'file': keytab_file},
{'prefix': self._config.datastore_prefix}
)
if not ad['kerberos_principal']:
self.middleware.call_sync('datastore.update', 'directoryservice.activedirectory', 1, {
'ad_kerberos_principal': f'{ad["netbiosname"]}$@{ad["domainname"]}'
})
@periodic(3600)
@private
async def check_updated_keytab(self):
"""
Check whether keytab needs updating. This currently checks for changes
to the AD_MACHINE_ACCOUNT keytab due to the possibility that it can be
changed by user playing around with `net ads` command from shell.
When this happens, the last_password_change timestamp is altered in
secrets.tdb and so we can base whether to update that keytab entry
based on the timestamp rather than trying to evaluate the keytab itself.
"""
if not await self.middleware.call('system.ready'):
return
if (await self.middleware.call('activedirectory.config'))['enable'] is False:
return
ts = await self.middleware.call('directoryservices.get_last_password_change')
if ts['dbconfig'] == ts['secrets']:
return
self.logger.debug("Machine account password has changed. Stored copies of "
"kerberos keytab and directory services secrets will now "
"be updated.")
await self.middleware.call('directoryservices.secrets.backup')
await self.middleware.call('kerberos.keytab.store_ad_keytab')
| 36,004 | Python | .py | 839 | 31.45292 | 111 | 0.578196 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,473 | config.py | truenas_middleware/src/middlewared/middlewared/plugins/config.py | from datetime import datetime
import glob
import os
import pathlib
import re
import shutil
import subprocess
import tarfile
import tempfile
from middlewared.schema import accepts, Bool, Dict, returns
from middlewared.service import CallError, Service, job, pass_app, private
from middlewared.plugins.pwenc import PWENC_FILE_SECRET
from middlewared.utils.db import FREENAS_DATABASE
CONFIG_FILES = {
'pwenc_secret': PWENC_FILE_SECRET,
'admin_authorized_keys': '/home/admin/.ssh/authorized_keys',
'truenas_admin_authorized_keys': '/home/truenas_admin/.ssh/authorized_keys',
'root_authorized_keys': '/root/.ssh/authorized_keys',
}
RE_CONFIG_BACKUP = re.compile(r'.*(\d{4}-\d{2}-\d{2})-(\d+)\.db$')
UPLOADED_DB_PATH = '/data/uploaded.db'
PWENC_UPLOADED = '/data/pwenc_secret_uploaded'
ADMIN_KEYS_UPLOADED = '/data/admin_authorized_keys_uploaded'
TRUENAS_ADMIN_KEYS_UPLOADED = '/data/truenas_admin_authorized_keys_uploaded'
ROOT_KEYS_UPLOADED = '/data/root_authorized_keys_uploaded'
DATABASE_NAME = os.path.basename(FREENAS_DATABASE)
CONFIGURATION_UPLOAD_REBOOT_REASON = 'Configuration upload'
CONFIGURATION_RESET_REBOOT_REASON = 'Configuration reset'
class ConfigService(Service):
class Config:
cli_namespace = 'system.config'
@private
def save_db_only(self, options, job):
with open(FREENAS_DATABASE, 'rb') as f:
shutil.copyfileobj(f, job.pipes.output.w)
@private
def save_tar_file(self, options, job):
with tempfile.NamedTemporaryFile(delete=True) as ntf:
with tarfile.open(ntf.name, 'w') as tar:
files = {'freenas-v1.db': FREENAS_DATABASE}
if options['secretseed']:
files['pwenc_secret'] = CONFIG_FILES['pwenc_secret']
if options['root_authorized_keys'] and os.path.exists(CONFIG_FILES['admin_authorized_keys']):
files['admin_authorized_keys'] = CONFIG_FILES['admin_authorized_keys']
if options['root_authorized_keys'] and os.path.exists(CONFIG_FILES['truenas_admin_authorized_keys']):
files['truenas_admin_authorized_keys'] = CONFIG_FILES['truenas_admin_authorized_keys']
if options['root_authorized_keys'] and os.path.exists(CONFIG_FILES['root_authorized_keys']):
files['root_authorized_keys'] = CONFIG_FILES['root_authorized_keys']
for arcname, path in files.items():
tar.add(path, arcname=arcname)
with open(ntf.name, 'rb') as f:
shutil.copyfileobj(f, job.pipes.output.w)
@accepts(Dict(
'configsave',
Bool('secretseed', default=False),
Bool('pool_keys', default=False),
Bool('root_authorized_keys', default=False),
))
@returns()
@job(pipes=["output"])
async def save(self, job, options):
"""
Create a tar file of security-sensitive information. These options select which information
is included in the tar file:
`secretseed` bool: When true, include password secret seed.
`pool_keys` bool: IGNORED and DEPRECATED as it does not apply on SCALE systems.
`root_authorized_keys` bool: When true, include "/root/.ssh/authorized_keys" file for the root user.
If none of these options are set, the tar file is not generated and the database file is returned.
"""
options.pop('pool_keys') # ignored, doesn't apply on SCALE
method = self.save_db_only if not any(options.values()) else self.save_tar_file
await self.middleware.run_in_thread(method, options, job)
@accepts()
@returns()
@job(pipes=["input"])
@pass_app(rest=True)
def upload(self, app, job):
"""
Accepts a configuration file via job pipe.
"""
chunk = 1024
max_size = 10485760 # 10MB
with tempfile.NamedTemporaryFile() as stf:
with open(stf.name, 'wb') as f:
while True:
data_in = job.pipes.input.r.read(chunk)
if data_in == b'':
break
else:
f.write(data_in)
if f.tell() > max_size:
raise CallError(f'Uploaded config is greater than maximum allowed size ({max_size} Bytes)')
is_tar = tarfile.is_tarfile(stf.name)
self.upload_impl(stf.name, is_tar_file=is_tar)
self.middleware.run_coroutine(
self.middleware.call('system.reboot', CONFIGURATION_UPLOAD_REBOOT_REASON, {'delay': 10}, app=app),
wait=False,
)
@private
def upload_impl(self, file_or_tar, is_tar_file=False):
with tempfile.TemporaryDirectory() as temp_dir:
if is_tar_file:
with tarfile.open(file_or_tar, 'r') as tar:
tar.extractall(temp_dir)
else:
# if it's just the db then copy it to the same
# temp directory to keep the logic simple(ish).
# it's also important that we add a '.db' suffix
# since we're assuming (since this is a single file)
# that this is the database only and our logic below
# assumes the name of the file is freenas-v1.db OR a
# file that has a '.db' suffix
shutil.copy2(file_or_tar, f'{temp_dir}/{DATABASE_NAME}')
pathobj = pathlib.Path(temp_dir)
found_db_file = None
for i in pathobj.iterdir():
if i.name == DATABASE_NAME or i.suffix == '.db':
# when user saves their config, we put the db in the
# archive using the same name as the db on the local
# filesystem, however, in the past we did not do this
# so the db was named in an unstructured mannner. We
# already make the assumption that the user doesn't
# change the name of the pwenc_secret file so we'll
# make the assumption that the user can change the
# name of the db but doesn't change the suffix.
found_db_file = i
break
if found_db_file is None:
raise CallError('Neither a valid tar or TrueNAS database file was provided.')
p = subprocess.run(['migrate', str(found_db_file.absolute())], capture_output=True, text=True)
if p.returncode != 0:
raise CallError(
f'Uploaded TrueNAS database file is not valid:\n{p.stderr}'
)
# now copy uploaded files/dirs to respective location
send_to_remote = []
for i in pathobj.iterdir():
abspath = str(i.absolute())
if i.name == found_db_file.name:
shutil.move(abspath, UPLOADED_DB_PATH)
send_to_remote.append(UPLOADED_DB_PATH)
if i.name == 'pwenc_secret':
shutil.move(abspath, PWENC_UPLOADED)
send_to_remote.append(PWENC_UPLOADED)
if i.name == 'admin_authorized_keys':
shutil.move(abspath, ADMIN_KEYS_UPLOADED)
send_to_remote.append(ADMIN_KEYS_UPLOADED)
if i.name == 'truenas_admin_authorized_keys':
shutil.move(abspath, TRUENAS_ADMIN_KEYS_UPLOADED)
send_to_remote.append(TRUENAS_ADMIN_KEYS_UPLOADED)
if i.name == 'root_authorized_keys':
shutil.move(abspath, ROOT_KEYS_UPLOADED)
send_to_remote.append(ROOT_KEYS_UPLOADED)
self.middleware.call_hook_sync('config.on_upload', UPLOADED_DB_PATH)
if self.middleware.call_sync('failover.licensed'):
try:
for _file in send_to_remote:
self.middleware.call_sync('failover.send_small_file', _file)
self.middleware.call_sync(
'failover.call_remote', 'core.call_hook', ['config.on_upload', [UPLOADED_DB_PATH]]
)
self.middleware.run_coroutine(
self.middleware.call('failover.call_remote', 'system.reboot', [CONFIGURATION_UPLOAD_REBOOT_REASON]),
wait=False,
)
except Exception as e:
raise CallError(
f'Config uploaded successfully, but remote node responded with error: {e}. '
f'Please use Sync to Peer on the System/Failover page to perform a manual sync after reboot.',
CallError.EREMOTENODEERROR,
)
@accepts(Dict('options', Bool('reboot', default=True)))
@returns()
@job(lock='config_reset', logs=True)
@pass_app(rest=True)
def reset(self, app, job, options):
"""
Reset database to configuration defaults.
If `reboot` is true this job will reboot the system after its completed with a delay of 10
seconds.
"""
job.set_progress(0, 'Performing credential check')
if job.credentials is None:
raise CallError('Unable to check credentials')
if job.credentials.is_user_session and 'SYS_ADMIN' not in job.credentials.user['account_attributes']:
raise CallError('Configuration reset is limited to local SYS_ADMIN account ("root" or "truenas_admin")')
job.set_progress(15, 'Replacing database file')
shutil.copy('/data/factory-v1.db', FREENAS_DATABASE)
job.set_progress(25, 'Running database upload hooks')
self.middleware.call_hook_sync('config.on_upload', FREENAS_DATABASE)
if self.middleware.call_sync('failover.licensed'):
job.set_progress(35, 'Sending database to the other node')
try:
self.middleware.call_sync('failover.send_small_file', FREENAS_DATABASE)
self.middleware.call_sync(
'failover.call_remote', 'core.call_hook', ['config.on_upload', [FREENAS_DATABASE]],
)
if options['reboot']:
self.middleware.run_coroutine(
self.middleware.call(
'failover.call_remote', 'system.reboot', [CONFIGURATION_UPLOAD_REBOOT_REASON],
),
wait=False,
)
except Exception as e:
raise CallError(
f'Config reset successfully, but remote node responded with error: {e}. '
f'Please use Sync to Peer on the System/Failover page to perform a manual sync after reboot.',
CallError.EREMOTENODEERROR,
)
job.set_progress(50, 'Updating initramfs')
self.middleware.call_sync('boot.update_initramfs')
if options['reboot']:
job.set_progress(95, 'Will reboot in 10 seconds')
self.middleware.run_coroutine(
self.middleware.call('system.reboot', CONFIGURATION_RESET_REBOOT_REASON, {'delay': 10}, app=app),
wait=False,
)
@private
def backup(self):
systemdataset = self.middleware.call_sync('systemdataset.config')
if not systemdataset or not systemdataset['path']:
return
# Legacy format
for f in glob.glob(f'{systemdataset["path"]}/*.db'):
if not RE_CONFIG_BACKUP.match(f):
continue
try:
os.unlink(f)
except OSError:
pass
today = datetime.now().strftime("%Y%m%d")
newfile = os.path.join(
systemdataset["path"],
f'configs-{systemdataset["uuid"]}',
self.middleware.call_sync('system.version'),
f'{today}.db',
)
dirname = os.path.dirname(newfile)
if not os.path.exists(dirname):
os.makedirs(dirname)
shutil.copy(FREENAS_DATABASE, newfile)
def setup(middleware):
if os.path.exists(UPLOADED_DB_PATH):
shutil.move(UPLOADED_DB_PATH, FREENAS_DATABASE)
if os.path.exists(PWENC_UPLOADED):
shutil.move(PWENC_UPLOADED, PWENC_FILE_SECRET)
if os.path.exists(ADMIN_KEYS_UPLOADED):
shutil.move(ADMIN_KEYS_UPLOADED, CONFIG_FILES['admin_authorized_keys'])
if os.path.exists(TRUENAS_ADMIN_KEYS_UPLOADED):
shutil.move(TRUENAS_ADMIN_KEYS_UPLOADED, CONFIG_FILES['truenas_admin_authorized_keys'])
if os.path.exists(ROOT_KEYS_UPLOADED):
shutil.move(ROOT_KEYS_UPLOADED, CONFIG_FILES['root_authorized_keys'])
| 12,748 | Python | .py | 253 | 37.84585 | 120 | 0.59669 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,474 | network.py | truenas_middleware/src/middlewared/middlewared/plugins/network.py | import asyncio
import contextlib
import ipaddress
from collections import defaultdict
from itertools import zip_longest
from ipaddress import ip_address, ip_interface
import middlewared.sqlalchemy as sa
from middlewared.service import CallError, CRUDService, filterable, pass_app, private
from middlewared.utils import filter_list
from middlewared.schema import accepts, Bool, Dict, Int, IPAddr, List, Patch, returns, Str, ValidationErrors
from middlewared.validators import Range
from .interface.netif import netif
from .interface.interface_types import InterfaceType
from .interface.lag_options import XmitHashChoices, LacpduRateChoices
class NetworkAliasModel(sa.Model):
__tablename__ = 'network_alias'
id = sa.Column(sa.Integer(), primary_key=True)
alias_interface_id = sa.Column(sa.Integer(), sa.ForeignKey('network_interfaces.id', ondelete='CASCADE'), index=True)
alias_address = sa.Column(sa.String(45), default='')
alias_version = sa.Column(sa.Integer())
alias_netmask = sa.Column(sa.Integer())
alias_address_b = sa.Column(sa.String(45), default='')
alias_vip = sa.Column(sa.String(45), default='')
class NetworkBridgeModel(sa.Model):
__tablename__ = 'network_bridge'
id = sa.Column(sa.Integer(), primary_key=True)
members = sa.Column(sa.JSON(list), default=[])
interface_id = sa.Column(sa.ForeignKey('network_interfaces.id', ondelete='CASCADE'))
stp = sa.Column(sa.Boolean())
enable_learning = sa.Column(sa.Boolean(), default=True)
class NetworkInterfaceModel(sa.Model):
__tablename__ = 'network_interfaces'
id = sa.Column(sa.Integer, primary_key=True)
int_interface = sa.Column(sa.String(300), unique=True)
int_name = sa.Column(sa.String(120))
int_dhcp = sa.Column(sa.Boolean(), default=False)
int_address = sa.Column(sa.String(45), default='')
int_address_b = sa.Column(sa.String(45), default='')
int_version = sa.Column(sa.Integer())
int_netmask = sa.Column(sa.Integer())
int_ipv6auto = sa.Column(sa.Boolean(), default=False)
int_vip = sa.Column(sa.String(45), nullable=True)
int_vhid = sa.Column(sa.Integer(), nullable=True)
int_critical = sa.Column(sa.Boolean(), default=False)
int_group = sa.Column(sa.Integer(), nullable=True)
int_mtu = sa.Column(sa.Integer(), nullable=True)
class NetworkInterfaceLinkAddressModel(sa.Model):
__tablename__ = 'network_interface_link_address'
id = sa.Column(sa.Integer, primary_key=True)
interface = sa.Column(sa.String(300))
link_address = sa.Column(sa.String(17), nullable=True)
link_address_b = sa.Column(sa.String(17), nullable=True)
class NetworkLaggInterfaceModel(sa.Model):
__tablename__ = 'network_lagginterface'
id = sa.Column(sa.Integer, primary_key=True)
lagg_interface_id = sa.Column(sa.Integer(), sa.ForeignKey('network_interfaces.id', ondelete='CASCADE'))
lagg_protocol = sa.Column(sa.String(120))
lagg_xmit_hash_policy = sa.Column(sa.String(8), nullable=True)
lagg_lacpdu_rate = sa.Column(sa.String(4), nullable=True)
class NetworkLaggInterfaceMemberModel(sa.Model):
__tablename__ = 'network_lagginterfacemembers'
id = sa.Column(sa.Integer, primary_key=True)
lagg_ordernum = sa.Column(sa.Integer())
lagg_physnic = sa.Column(sa.String(120), unique=True)
lagg_interfacegroup_id = sa.Column(sa.ForeignKey('network_lagginterface.id', ondelete='CASCADE'), index=True)
class NetworkVlanModel(sa.Model):
__tablename__ = 'network_vlan'
id = sa.Column(sa.Integer(), primary_key=True)
vlan_vint = sa.Column(sa.String(120))
vlan_pint = sa.Column(sa.String(300))
vlan_tag = sa.Column(sa.Integer())
vlan_description = sa.Column(sa.String(120))
vlan_pcp = sa.Column(sa.Integer(), nullable=True)
class InterfaceService(CRUDService):
class Config:
datastore_primary_key_type = 'string'
namespace_alias = 'interfaces'
cli_namespace = 'network.interface'
role_prefix = 'NETWORK_INTERFACE'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._original_datastores = {}
self._rollback_timer = None
ENTRY = Dict(
'interface_entry',
Str('id', required=True),
Str('name', required=True),
Bool('fake', required=True),
Str('type', required=True),
Dict(
'state',
Str('name', required=True),
Str('orig_name', required=True),
Str('description', required=True),
Int('mtu', required=True),
Bool('cloned', required=True),
List('flags', items=[Str('flag')], required=True),
List('nd6_flags', required=True),
List('capabilities', required=True),
Str('link_state', required=True),
Str('media_type', required=True),
Str('media_subtype', required=True),
Str('active_media_type', required=True),
Str('active_media_subtype', required=True),
List('supported_media', required=True),
List('media_options', required=True, null=True),
Str('link_address', required=True),
Str('permanent_link_address', required=True, null=True),
Str('hardware_link_address', required=True),
Int('rx_queues', required=True),
Int('tx_queues', required=True),
List('aliases', required=True, items=[Dict(
'alias',
Str('type', required=True),
Str('address', required=True),
Str('netmask'),
Str('broadcast'),
)]),
List('vrrp_config', null=True),
# lagg section
Str('protocol', null=True),
List('ports', items=[Dict(
'lag_ports',
Str('name'),
List('flags', items=[Str('flag')])
)]),
Str('xmit_hash_policy', default=None, null=True),
Str('lacpdu_rate', default=None, null=True),
# vlan section
Str('parent', null=True),
Int('tag', null=True),
Int('pcp', null=True),
required=True
),
List('aliases', required=True, items=[Dict(
'alias',
Str('type', required=True),
Str('address', required=True),
Str('netmask', required=True),
)]),
Bool('ipv4_dhcp', required=True),
Bool('ipv6_auto', required=True),
Str('description', required=True),
Int('mtu', null=True, required=True),
Str('vlan_parent_interface', null=True),
Int('vlan_tag', null=True),
Int('vlan_pcp', null=True),
Str('lag_protocol'),
List('lag_ports', items=[Str('lag_port')]),
List('bridge_members', items=[Str('member')]), # FIXME: Please document fields for HA Hardware
Bool('enable_learning'),
additional_attrs=True,
)
@private
async def query_names_only(self):
return [i['int_interface'] for i in await self.middleware.call('datastore.query', 'network.interfaces')]
@filterable
def query(self, filters, options):
"""
Query Interfaces with `query-filters` and `query-options`
"""
retrieve_names_only = options.get('extra', {}).get('retrieve_names_only')
data = {}
configs = {
i['int_interface']: i
for i in self.middleware.call_sync('datastore.query', 'network.interfaces')
}
ha_hardware = self.middleware.call_sync('system.is_ha_capable')
ignore = self.middleware.call_sync('interface.internal_interfaces')
# need to handle these platforms specially
fseries = hseries = False
platform = self.middleware.call_sync('truenas.get_chassis_hardware')
if platform.startswith('TRUENAS-F'):
fseries = True
elif platform.startswith('TRUENAS-H'):
hseries = True
for name, iface in netif.list_interfaces().items():
if (name in ignore) or (iface.cloned and name not in configs):
continue
elif any((fseries, hseries)) and iface.bus == 'usb':
# The {f/h}-series platforms will add a usb ethernet device to the system
# when someone opens up the ikvm html5 console. We need to hide this
# interface so users can't configure it
continue
if retrieve_names_only:
data[name] = {'name': name}
continue
try:
data[name] = self.iface_extend(iface.asdict(), configs, ha_hardware)
except OSError:
self.logger.warn('Failed to get interface state for %s', name, exc_info=True)
for name, config in filter(lambda x: x[0] not in data, configs.items()):
if retrieve_names_only:
data[name] = {'name': name}
else:
data[name] = self.iface_extend({
'name': config['int_interface'],
'orig_name': config['int_interface'],
'description': config['int_name'],
'aliases': [],
'link_address': '',
'permanent_link_address': None,
'hardware_link_address': '',
'cloned': True,
'mtu': 1500,
'flags': [],
'nd6_flags': [],
'capabilities': [],
'link_state': '',
'media_type': '',
'media_subtype': '',
'active_media_type': '',
'active_media_subtype': '',
'supported_media': [],
'media_options': [],
'vrrp_config': [],
}, configs, ha_hardware, fake=True)
return filter_list(list(data.values()), filters, options)
@private
def iface_extend(self, iface_state, configs, ha_hardware, fake=False):
itype = self.middleware.call_sync('interface.type', iface_state)
iface = {
'id': iface_state['name'],
'name': iface_state['name'],
'fake': fake,
'type': itype.value,
'state': iface_state,
'aliases': [],
'ipv4_dhcp': False if configs else True,
'ipv6_auto': False if configs else True,
'description': '',
'mtu': None,
}
if ha_hardware:
iface.update({
'failover_critical': False,
'failover_vhid': None,
'failover_group': None,
'failover_aliases': [],
'failover_virtual_aliases': [],
})
iface['state']['vrrp_config'] = []
config = configs.get(iface['name'])
if not config:
return iface
iface.update({
'ipv4_dhcp': config['int_dhcp'],
'ipv6_auto': config['int_ipv6auto'],
'description': config['int_name'],
'mtu': config['int_mtu'],
})
if ha_hardware:
info = ('INET', 32) if config['int_version'] == 4 else ('INET6', 128)
iface.update({
'failover_critical': config['int_critical'],
'failover_vhid': config['int_vhid'],
'failover_group': config['int_group'],
})
if config['int_address_b']:
iface['failover_aliases'].append({
'type': info[0],
'address': config['int_address_b'],
'netmask': config['int_netmask'],
})
if config['int_vip']:
iface['failover_virtual_aliases'].append({
'type': info[0],
'address': config['int_vip'],
'netmask': info[1]
})
for i in filter(lambda x: x['type'] != 'LINK', iface['state']['aliases']):
if i['address'] == config['int_vip']:
iface['state']['vrrp_config'].append({'address': config['int_vip'], 'state': 'MASTER'})
break
else:
iface['state']['vrrp_config'].append({'state': 'BACKUP'})
if itype == InterfaceType.BRIDGE:
filters = [('interface', '=', config['id'])]
if br := self.middleware.call_sync('datastore.query', 'network.bridge', filters):
iface.update({
'bridge_members': br[0]['members'], 'stp': br[0]['stp'], 'enable_learning': br[0]['enable_learning']
})
else:
iface.update({'bridge_members': [], 'stp': True, 'enable_learning': True})
elif itype == InterfaceType.LINK_AGGREGATION:
lag = self.middleware.call_sync(
'datastore.query',
'network.lagginterface',
[('interface', '=', config['id'])],
{'prefix': 'lagg_'}
)
if lag:
lag = lag[0]
if lag['protocol'] in ('lacp', 'loadbalance'):
iface.update({'xmit_hash_policy': (lag.get('xmit_hash_policy') or 'layer2+3').upper()})
if lag['protocol'] == 'lacp':
iface.update({'lacpdu_rate': (lag.get('lacpdu_rate') or 'slow').upper()})
iface.update({'lag_protocol': lag['protocol'].upper(), 'lag_ports': []})
for port in self.middleware.call_sync(
'datastore.query',
'network.lagginterfacemembers',
[('interfacegroup', '=', lag['id'])],
{'prefix': 'lagg_'}
):
iface['lag_ports'].append(port['physnic'])
else:
iface['lag_ports'] = []
elif itype == InterfaceType.VLAN:
vlan = self.middleware.call_sync(
'datastore.query',
'network.vlan',
[('vint', '=', iface['name'])],
{'prefix': 'vlan_'}
)
if vlan:
vlan = vlan[0]
iface.update({
'vlan_parent_interface': vlan['pint'],
'vlan_tag': vlan['tag'],
'vlan_pcp': vlan['pcp'],
})
else:
iface.update({
'vlan_parent_interface': None,
'vlan_tag': None,
'vlan_pcp': None,
})
if (not config['int_dhcp'] or not config['int_ipv6auto']) and config['int_address']:
iface['aliases'].append({
'type': 'INET' if config['int_version'] == 4 else 'INET6',
'address': config['int_address'],
'netmask': config['int_netmask'],
})
filters = [('alias_interface', '=', config['id'])]
for alias in self.middleware.call_sync('datastore.query', 'network.alias', filters):
_type = 'INET' if alias['alias_version'] == 4 else 'INET6'
if alias['alias_address']:
iface['aliases'].append({
'type': _type,
'address': alias['alias_address'],
'netmask': alias['alias_netmask'],
})
if ha_hardware:
if alias['alias_address_b']:
iface['failover_aliases'].append({
'type': _type,
'address': alias['alias_address_b'],
'netmask': alias['alias_netmask'],
})
if alias['alias_vip']:
iface['failover_virtual_aliases'].append({
'type': _type,
'address': alias['alias_vip'],
'netmask': 32 if _type == 'INET' else 128,
})
for i in filter(lambda x: x['type'] != 'LINK', iface['state']['aliases']):
if i['address'] == alias['alias_vip']:
iface['state']['vrrp_config'].append({'address': alias['alias_vip'], 'state': 'MASTER'})
break
else:
iface['state']['vrrp_config'].append({'state': 'BACKUP'})
return iface
@accepts()
@returns(Bool())
def default_route_will_be_removed(self):
"""
On a fresh install of SCALE, dhclient is started for every interface so IP
addresses/routes could be installed via that program. However, when the
end-user goes to configure the first interface we tear down all other interfaces
configs AND delete the default route. We also remove the default route if the
configured gateway doesn't match the one currently installed in kernel.
"""
# FIXME: What about IPv6??
ifaces = self.middleware.call_sync('datastore.query', 'network.interfaces')
rtgw = netif.RoutingTable().default_route_ipv4
if not ifaces and rtgw:
return True
# we have a default route in kernel and we have a route specified in the db
# and they do not match
dbgw = self.middleware.call_sync('network.configuration.config')['ipv4gateway']
return rtgw and (dbgw != rtgw.gateway.exploded)
@accepts(IPAddr('gw', v6=False, required=True))
@returns()
async def save_default_route(self, gw):
"""
This method exists _solely_ to provide a "warning" and therefore
a path for remediation for when an end-user modifies an interface
and we rip the default gateway out from underneath them without
any type of warning.
NOTE: This makes 2 assumptions
1. interface.create/update/delete must have been called before
calling this method
2. this method must be called before `interface.sync` is called
This method exists for the predominant scenario for new users...
1. fresh install SCALE
2. all interfaces start DHCPv4 (v6 is ignored for now)
3. 1 of the interfaces receives an IP address
4. along with the IP, the kernel receives a default route
(by design, of course)
5. user goes to configure this interface as having a static
IP address
6. as we go through and "sync" the changes, we remove the default
route because it exists in the kernel FIB but doesn't exist
in the database.
7. IF the user is connecting via layer3, then they will lose all
access to the TrueNAS and never be able to finalize the changes
to the network because we ripped out the default route which
is how they were communicating to begin with.
In the above scenario, we're going to try and prevent this by doing
the following:
1. fresh install SCALE
2. all interfaces start DHCPv4
3. default route is received
4. user configures an interface
5. When user pushes "Test Changes" (interface.sync), webUI will call
network.configuration.default_route_will_be_removed BEFORE interface.sync
6. if network.configuration.default_route_will_be_removed returns True,
then webUI will open a new modal dialog that gives the end-user
ample warning/verbiage describing the situation. Furthermore, the
modal will allow the user to input a default gateway
7. if user gives gateway, webUI will call this method providing the info
and we'll validate accordingly
8. OR if user doesn't give gateway, they will need to "confirm" this is
desired
9. the default gateway provided to us (if given by end-user) will be stored
in the same in-memory cache that we use for storing the interface changes
and will be rolledback accordingly in this plugin just like everything else
There are a few other scenarios where this is beneficial, but the one listed above
is seen most often by end-users/support team.
"""
if not self._original_datastores:
raise CallError('There are no pending interface changes.')
gw = ip_address(gw)
defgw = {'gc_ipv4gateway': gw.exploded}
for iface in await self.middleware.call('datastore.query', 'network.interfaces'):
gw_reachable = False
try:
gw_reachable = gw in ip_interface(f'{iface["int_address"]}/{iface["int_netmask"]}').network
except ValueError:
# these can be "empty" interface entries so there will be no ip or netmask
# (i.e. when someone creates a VLAN whose parent doesn't have a "config" associated to it)
continue
else:
if gw_reachable:
await self.middleware.call('datastore.update', 'network.globalconfiguration', 1, defgw)
return
for iface in await self.middleware.call('datastore.query', 'network.alias'):
if gw in ip_interface(f'{iface["alias_address"]}/{iface["alias_netmask"]}').network:
await self.middleware.call('datastore.update', 'network.globalconfiguration', 1, defgw)
return
raise CallError(f'{str(gw)!r} is not reachable from any interface on the system.')
@private
async def get_datastores(self):
datastores = {}
datastores['interfaces'] = await self.middleware.call(
'datastore.query', 'network.interfaces'
)
datastores['alias'] = []
for i in await self.middleware.call('datastore.query', 'network.alias'):
i['alias_interface'] = i['alias_interface']['id']
datastores['alias'].append(i)
datastores['bridge'] = []
for i in await self.middleware.call('datastore.query', 'network.bridge'):
i['interface'] = i['interface']['id'] if i['interface'] else None
datastores['bridge'].append(i)
datastores['vlan'] = await self.middleware.call(
'datastore.query', 'network.vlan'
)
datastores['lagg'] = []
for i in await self.middleware.call('datastore.query', 'network.lagginterface'):
i['lagg_interface'] = i['lagg_interface']['id']
datastores['lagg'].append(i)
datastores['laggmembers'] = []
for i in await self.middleware.call('datastore.query', 'network.lagginterfacemembers'):
i['lagg_interfacegroup'] = i['lagg_interfacegroup']['id']
datastores['laggmembers'].append(i)
datastores['ipv4gateway'] = {
'gc_ipv4gateway': (
await self.middleware.call('datastore.query', 'network.globalconfiguration', [], {'get': True})
)['gc_ipv4gateway']
}
return datastores
async def __save_datastores(self):
"""
Save datastores states before performing any actions to interfaces.
This will make sure to be able to rollback configurations in case something
doesnt go as planned.
"""
if self._original_datastores:
return
self._original_datastores = await self.get_datastores()
async def __restore_datastores(self):
if not self._original_datastores:
return
# Deleting network.lagginterface because deleting network.interfaces won't cascade
# (but network.lagginterface will cascade to network.lagginterfacemembers)
await self.middleware.call('datastore.delete', 'network.lagginterface', [])
# Deleting interfaces should cascade to network.alias and network.bridge
await self.middleware.call('datastore.delete', 'network.interfaces', [])
await self.middleware.call('datastore.delete', 'network.vlan', [])
await self.middleware.call('datastore.update', 'network.globalconfiguration', 1, {'gc_ipv4gateway': ''})
for i in self._original_datastores['interfaces']:
await self.middleware.call('datastore.insert', 'network.interfaces', i)
for i in self._original_datastores['alias']:
await self.middleware.call('datastore.insert', 'network.alias', i)
for i in self._original_datastores['bridge']:
await self.middleware.call('datastore.insert', 'network.bridge', i)
for i in self._original_datastores['vlan']:
await self.middleware.call('datastore.insert', 'network.vlan', i)
for i in self._original_datastores['lagg']:
await self.middleware.call('datastore.insert', 'network.lagginterface', i)
for i in self._original_datastores['laggmembers']:
await self.middleware.call('datastore.insert', 'network.lagginterfacemembers', i)
gw = self._original_datastores['ipv4gateway']
await self.middleware.call('datastore.update', 'network.globalconfiguration', 1, gw)
self._original_datastores.clear()
@private
async def get_original_datastores(self):
return self._original_datastores
@accepts(roles=['NETWORK_INTERFACE_WRITE'])
@returns(Bool())
async def has_pending_changes(self):
"""
Returns whether there are pending interfaces changes to be applied or not.
"""
return bool(self._original_datastores)
@accepts(roles=['NETWORK_INTERFACE_WRITE'])
@returns()
async def rollback(self):
"""
Rollback pending interfaces changes.
"""
if self._rollback_timer:
self._rollback_timer.cancel()
self._rollback_timer = None
# We do not check for failover disabled in here because we may be reverting
# the first time HA is being set up and this was already checked during commit.
await self.__restore_datastores()
# All entries are deleted from the network tables on a rollback operation.
# This breaks `failover.status` on TrueNAS HA systems.
# Because of this, we need to manually sync the database to the standby
# controller.
await self.middleware.call_hook('interface.post_rollback')
await self.sync()
@private
async def checkin_impl(self, clear_cache=True):
if self._rollback_timer:
self._rollback_timer.cancel()
self._rollback_timer = None
if clear_cache:
self._original_datastores = {}
@accepts(roles=['NETWORK_INTERFACE_WRITE'])
@returns()
async def checkin(self):
"""
If this method is called after interface changes have been committed and within the checkin timeout,
then the task that automatically rolls back any interface changes is cancelled and the in-memory snapshot
of database tables for the various interface tables will be cleared. The idea is that the end-user has
verified the changes work as intended and need to be committed permanently.
"""
return await self.checkin_impl(clear_cache=True)
@accepts(roles=['NETWORK_INTERFACE_WRITE'])
@returns()
async def cancel_rollback(self):
"""
If this method is called after interface changes have been committed and within the checkin timeout,
then the task that automatically rolls back any interface changes is cancelled and the in-memory snapshot
of database tables for the various interface tables will NOT be cleared.
"""
return await self.checkin_impl(clear_cache=False)
@accepts(roles=['NETWORK_INTERFACE_WRITE'])
@returns(Int('remaining_seconds', null=True))
async def checkin_waiting(self):
"""
Returns whether we are waiting user to check in the applied network changes
before they are rolled back.
Value is in number of seconds or null.
"""
if self._rollback_timer:
remaining = self._rollback_timer.when() - asyncio.get_event_loop().time()
if remaining > 0:
return int(remaining)
@accepts(Dict(
'options',
Bool('rollback', default=True),
Int('checkin_timeout', default=60),
), roles=['NETWORK_INTERFACE_WRITE'])
@returns()
async def commit(self, options):
"""
Commit/apply pending interfaces changes.
`rollback` as true (default) will roll back changes in case they fail to apply.
`checkin_timeout` is the time in seconds it will wait for the checkin call to acknowledge
the interfaces changes happened as planned from the user. If checkin does not happen
within this period of time the changes will get reverted.
"""
verrors = ValidationErrors()
schema = 'interface.commit'
await self.middleware.call('network.common.check_failover_disabled', schema, verrors)
await self.middleware.call('network.common.check_dhcp_or_aliases', schema, verrors)
verrors.check()
try:
await self.sync()
except Exception:
if options['rollback']:
await self.rollback()
raise
if options['rollback'] and options['checkin_timeout']:
loop = asyncio.get_event_loop()
self._rollback_timer = loop.call_later(
options['checkin_timeout'], lambda: self.middleware.create_task(self.rollback())
)
else:
self._original_datastores = {}
@accepts(Dict(
'interface_create',
Str('name'),
Str('description', default=''),
Str('type', enum=['BRIDGE', 'LINK_AGGREGATION', 'VLAN'], required=True),
Bool('ipv4_dhcp', default=False),
Bool('ipv6_auto', default=False),
List('aliases', unique=True, items=[
Dict(
'interface_alias',
Str('type', required=True, default='INET', enum=['INET', 'INET6']),
IPAddr('address', required=True),
Int('netmask', required=True),
register=True,
),
]),
Bool('failover_critical', default=False),
Int('failover_group', null=True),
Int('failover_vhid', null=True, validators=[Range(min_=1, max_=255)]),
List('failover_aliases', items=[
Dict(
'interface_failover_alias',
Str('type', required=True, default='INET', enum=['INET', 'INET6']),
IPAddr('address', required=True),
)
]),
List('failover_virtual_aliases', items=[
Dict(
'interface_virtual_alias',
Str('type', required=True, default='INET', enum=['INET', 'INET6']),
IPAddr('address', required=True),
)
]),
List('bridge_members'),
Bool('enable_learning', default=True),
Bool('stp', default=True),
Str('lag_protocol', enum=['LACP', 'FAILOVER', 'LOADBALANCE', 'ROUNDROBIN', 'NONE']),
Str('xmit_hash_policy', enum=[i.value for i in XmitHashChoices], default=None, null=True),
Str('lacpdu_rate', enum=[i.value for i in LacpduRateChoices], default=None, null=True),
List('lag_ports', items=[Str('interface')]),
Str('vlan_parent_interface'),
Int('vlan_tag', validators=[Range(min_=1, max_=4094)]),
Int('vlan_pcp', validators=[Range(min_=0, max_=7)], null=True),
Int('mtu', validators=[Range(min_=68, max_=9216)], default=None, null=True),
register=True
))
async def do_create(self, data):
"""
Create virtual interfaces (Link Aggregation, VLAN)
For BRIDGE `type` the following attribute is required: bridge_members.
For LINK_AGGREGATION `type` the following attributes are required: lag_ports,
lag_protocol.
For VLAN `type` the following attributes are required: vlan_parent_interface,
vlan_tag and vlan_pcp.
.. examples(cli)::
Create a bridge interface:
> network interface create name=br0 type=BRIDGE bridge_members=enp0s3,enp0s4
aliases="192.168.0.10"
Create a link aggregation interface that has multiple IP addresses in multiple subnets:
> network interface create name=bond0 type=LINK_AGGREGATION lag_protocol=LACP
lag_ports=enp0s8,enp0s9 aliases="192.168.0.20/30","192.168.1.20/30"
Create a DHCP-enabled VLAN interface
> network interface create name=vlan0 type=VLAN vlan_parent_interface=enp0s10
vlan_tag=10 vlan_pcp=4 ipv4_dhcp=true ipv6_auto=true
"""
verrors = ValidationErrors()
await self.middleware.call('network.common.check_failover_disabled', 'interface.create', verrors)
if data['type'] == 'BRIDGE':
required_attrs = ('bridge_members', )
elif data['type'] == 'LINK_AGGREGATION':
required_attrs = ('lag_protocol', 'lag_ports')
elif data['type'] == 'VLAN':
required_attrs = ('vlan_parent_interface', 'vlan_tag')
for i in filter(lambda x: x not in data, required_attrs):
verrors.add(f'interface_create.{i}', 'This field is required')
verrors.check()
await self._common_validation(verrors, 'interface_create', data, data['type'])
verrors.check()
await self.__save_datastores()
name = data.get('name')
if name is None:
if data['type'] == 'BRIDGE':
prefix = 'br'
elif data['type'] == 'LINK_AGGREGATION':
prefix = 'bond'
elif data['type'] == 'VLAN':
prefix = 'vlan'
else:
# should never be reached because it means our schema is broken
raise CallError(f'Invalid interface type: {data["type"]!r}')
name = await self.get_next(prefix)
interface_id = lag_id = None
try:
async for interface_id in self.__create_interface_datastore(data, {'interface': name}):
if data['type'] == 'BRIDGE':
await self.middleware.call('datastore.insert', 'network.bridge', {
'interface': interface_id, 'members': data['bridge_members'], 'stp': data['stp'],
'enable_learning': data['enable_learning']
})
elif data['type'] == 'LINK_AGGREGATION':
lagports_ids = []
lag_proto = data['lag_protocol'].lower()
xmit = lacpdu_rate = None
if lag_proto in ('lacp', 'loadbalance'):
# Based on stress testing done by the performance team, we default to layer2+3
# because the system default is layer2 and with the system default outbound
# traffic did not use the other ports in the lagg. Using layer2+3 fixed it.
xmit = data['xmit_hash_policy'].lower() if data['xmit_hash_policy'] is not None else 'layer2+3'
if lag_proto == 'lacp':
# obviously, lacpdu_rate does not apply to any lagg mode except for lacp
lacpdu_rate = data['lacpdu_rate'].lower() if data['lacpdu_rate'] else 'slow'
lag_id = await self.middleware.call('datastore.insert', 'network.lagginterface', {
'lagg_interface': interface_id,
'lagg_protocol': lag_proto,
'lagg_xmit_hash_policy': xmit,
'lagg_lacpdu_rate': lacpdu_rate,
})
lagports_ids += await self.__set_lag_ports(lag_id, data['lag_ports'])
elif data['type'] == 'VLAN':
await self.middleware.call('datastore.insert', 'network.vlan', {
'vlan_vint': name,
'vlan_pint': data['vlan_parent_interface'],
'vlan_tag': data['vlan_tag'],
'vlan_pcp': data.get('vlan_pcp'),
})
except Exception:
if lag_id:
with contextlib.suppress(Exception):
await self.middleware.call('datastore.delete', 'network.lagginterface', lag_id)
if interface_id:
await self.middleware.call('datastore.delete', 'network.interfaces', interface_id)
raise
return await self.get_instance(name)
@private
async def get_next(self, prefix, start=0):
number = start
ifaces = [
i['int_interface']
for i in await self.middleware.call(
'datastore.query',
'network.interfaces',
[('int_interface', '^', prefix)],
)
]
while f'{prefix}{number}' in ifaces:
number += 1
return f'{prefix}{number}'
async def _common_validation(self, verrors, schema_name, data, itype, update=None):
def _get_filters(key):
return [[key, '!=', update['id']]] if update else []
cant = ' cannot be changed.'
required = ' is required when configuring HA.'
validation_attrs = {
'aliases': ['Active node IP address', cant, required],
'failover_aliases': ['Standby node IP address', cant, required],
'failover_virtual_aliases': ['Virtual IP address', cant, required],
'failover_group': ['Failover group number', cant, required],
'mtu': ['MTU', cant],
'ipv4_dhcp': ['DHCP', cant],
'ipv6_auto': ['Autoconfig for IPv6', cant],
}
ifaces = {i['name']: i for i in await self.middleware.call('interface.query', _get_filters('id'))}
ds_ifaces = await self.middleware.call('datastore.query', 'network.interfaces', _get_filters('int_interface'))
if 'name' in data and data['name'] in ifaces:
verrors.add(f'{schema_name}.name', 'Interface name is already in use.')
if data.get('ipv4_dhcp') and any(
filter(lambda x: x['int_dhcp'] and not ifaces[x['int_interface']]['fake'], ds_ifaces)
):
verrors.add(f'{schema_name}.ipv4_dhcp', 'Only one interface can be used for DHCP.')
if data.get('ipv6_auto') and any(
filter(lambda x: x['int_ipv6auto'] and not ifaces[x['int_interface']]['fake'], ds_ifaces)
):
verrors.add(
f'{schema_name}.ipv6_auto',
'Only one interface can have IPv6 autoconfiguration enabled.'
)
await self.middleware.run_in_thread(self.__validate_aliases, verrors, schema_name, data, ifaces)
bridge_used = {}
vlan_used = {}
lag_used = {}
for k, v in ifaces.items():
if k.startswith('br'):
for port in (v.get('bridge_members') or []):
bridge_used[port] = k
elif k.startswith('vlan'):
vlan_used[k] = v['vlan_parent_interface']
elif k.startswith('bond'):
for port in (v.get('lag_ports') or []):
lag_used[port] = k
if itype == 'PHYSICAL':
if data['name'] in lag_used:
lag_name = lag_used.get(data['name'])
for k, v in validation_attrs.items():
if data.get(k):
verrors.add(
f'{schema_name}.{k}',
f'Interface in use by {lag_name}. {str(v[0]) + str(v[1])}'
)
elif itype == 'BRIDGE':
if 'name' in data:
try:
await self.middleware.call('interface.validate_name', InterfaceType.BRIDGE, data['name'])
except ValueError as e:
verrors.add(f'{schema_name}.name', str(e))
for i, member in enumerate(data.get('bridge_members') or []):
if member not in ifaces:
verrors.add(f'{schema_name}.bridge_members.{i}', 'Not a valid interface.')
continue
if member in bridge_used:
verrors.add(
f'{schema_name}.bridge_members.{i}',
f'Interface {member} is currently in use by {bridge_used[member]}.',
)
elif member in lag_used:
verrors.add(
f'{schema_name}.bridge_members.{i}',
f'Interface {member} is currently in use by {lag_used[member]}.',
)
elif itype == 'LINK_AGGREGATION':
if 'name' in data:
try:
await self.middleware.call('interface.validate_name', InterfaceType.LINK_AGGREGATION, data['name'])
except ValueError as e:
verrors.add(f'{schema_name}.name', str(e))
if data['lag_protocol'] not in await self.middleware.call('interface.lag_supported_protocols'):
verrors.add(
f'{schema_name}.lag_protocol',
f'TrueNAS SCALE does not support LAG protocol {data["lag_protocol"]}',
)
lag_ports = data.get('lag_ports')
if not lag_ports:
verrors.add(f'{schema_name}.lag_ports', 'This field cannot be empty.')
ds_ifaces_set = {i['int_interface'] for i in ds_ifaces}
for i, member in enumerate(lag_ports):
_schema = f'{schema_name}.lag_ports.{i}'
if member not in ifaces:
verrors.add(_schema, f'"{member}" is not a valid interface.')
elif member in lag_used:
verrors.add(_schema, f'Interface {member} is currently in use by {lag_used[member]}.')
elif member in bridge_used:
verrors.add(_schema, f'Interface {member} is currently in use by {bridge_used[member]}.')
elif member in vlan_used:
verrors.add(_schema, f'Interface {member} is currently in use by {vlan_used[member]}.')
elif member in ds_ifaces_set:
verrors.add(_schema, f'Interface {member} is currently in use')
elif itype == 'VLAN':
if 'name' in data:
try:
await self.middleware.call('interface.validate_name', InterfaceType.VLAN, data['name'])
except ValueError as e:
verrors.add(f'{schema_name}.name', str(e))
parent = data.get('vlan_parent_interface')
if parent not in ifaces:
verrors.add(f'{schema_name}.vlan_parent_interface', 'Not a valid interface.')
elif parent in lag_used:
verrors.add(
f'{schema_name}.vlan_parent_interface',
f'Interface {parent} is currently in use by {lag_used[parent]}.',
)
elif parent.startswith('br'):
verrors.add(
f'{schema_name}.vlan_parent_interface',
'Bridge interfaces are not allowed.',
)
else:
parent_iface = ifaces[parent]
mtu = data.get('mtu')
if mtu and mtu > (parent_iface.get('mtu') or 1500):
verrors.add(
f'{schema_name}.mtu',
'VLAN MTU cannot be bigger than parent interface.',
)
aliases = data.get('aliases', []).copy()
aliases.extend(data.get('failover_aliases', []).copy())
aliases.extend(data.get('failover_virtual_aliases', []).copy())
mtu = data.get('mtu')
if mtu and mtu < 1280 and any(i['type'] == 'INET6' for i in aliases):
# we set the minimum MTU to 68 for IPv4 (per https://tools.ietf.org/html/rfc791)
# however, the minimum MTU for IPv6 is 1280 (per https://tools.ietf.org/html/rfc2460)
# so we need to make sure that if a IPv6 address is provided the minimum isn't
# smaller than 1280.
verrors.add(
f'{schema_name}.mtu',
'When specifying an IPv6 address, the MTU cannot be smaller than 1280'
)
if not await self.middleware.call('failover.licensed'):
data.pop('failover_critical', None)
data.pop('failover_group', None)
data.pop('failover_aliases', None)
data.pop('failover_vhid', None)
data.pop('failover_virtual_aliases', None)
else:
failover = await self.middleware.call('failover.config')
ha_configured = await self.middleware.call('failover.status') != 'SINGLE'
if ha_configured and not failover['disabled']:
raise CallError(
'Failover needs to be disabled to perform network configuration changes.'
)
# have to make sure that active, standby and virtual ip addresses are equal
active_node_ips = len(data.get('aliases', []))
standby_node_ips = len(data.get('failover_aliases', []))
virtual_node_ips = len(data.get('failover_virtual_aliases', []))
are_equal = active_node_ips == standby_node_ips == virtual_node_ips
if not are_equal:
verrors.add(
f'{schema_name}.failover_aliases',
'The number of active, standby and virtual IP addresses must be the same.'
)
if not update:
failover_attrs = set(
[k for k, v in validation_attrs.items() if k not in ('mtu', 'ipv4_dhcp', 'ipv6_auto')]
)
configured_attrs = set([i for i in failover_attrs if data.get(i)])
if configured_attrs:
for i in failover_attrs - configured_attrs:
verrors.add(
f'{schema_name}.{i}',
f'{str(validation_attrs[i][0]) + str(validation_attrs[i][2])}',
)
else:
if data.get('failover_critical') and data.get('failover_group') is None:
verrors.add(
f'{schema_name}.failover_group',
'A failover group is required when configuring a critical failover interface.'
)
# creating a "failover" lagg interface on HA systems and trying
# to mark it "critical for failover" isn't allowed as it can cause
# delays in the failover process. (Sometimes failure entirely.)
# However, using this type of lagg interface for "non-critical"
# workloads (i.e. webUI management) is acceptable.
if itype == 'LINK_AGGREGATION':
# there is a chance that we have failover lagg ints marked critical
# for failover in the db so to prevent the webUI from disallowing
# the user to update those interfaces, we'll only enforce this on
# newly created laggs.
if not update:
if data.get('failover_critical') and data.get('lag_protocol') == 'FAILOVER':
msg = 'A lagg interface using the "Failover" protocol '
msg += 'is not allowed to be marked critical for failover.'
verrors.add(f'{schema_name}.failover_critical', msg)
def __validate_aliases(self, verrors, schema_name, data, ifaces):
used_networks_ipv4 = []
used_networks_ipv6 = []
for iface in ifaces.values():
for iface_alias in filter(lambda x: x['type'] in ('INET', 'INET6'), iface['aliases']):
network = ipaddress.ip_network(f'{iface_alias["address"]}/{iface_alias["netmask"]}', strict=False)
if iface_alias['type'] == 'INET':
used_networks_ipv4.append(network)
else:
used_networks_ipv6.append(network)
for i, alias in enumerate(data.get('aliases') or []):
alias_network = ipaddress.ip_network(f'{alias["address"]}/{alias["netmask"]}', strict=False)
if alias_network.version == 4:
used_networks = ((used_networks_ipv4, 'another interface'),)
else:
used_networks = ((used_networks_ipv6, 'another interface'),)
for network_cidrs, message in used_networks:
for used_network in network_cidrs:
if used_network.overlaps(alias_network):
verrors.add(
f'{schema_name}.aliases.{i}',
f'The network {alias_network} is already in use by {message}.'
)
break
async def __convert_interface_datastore(self, data):
return {
'name': data['description'],
'dhcp': data['ipv4_dhcp'],
'ipv6auto': data['ipv6_auto'],
'vhid': data.get('failover_vhid'),
'critical': data.get('failover_critical') or False,
'group': data.get('failover_group'),
'mtu': data.get('mtu') or None,
}
async def __create_interface_datastore(self, data, attrs):
interface_attrs, aliases = self.convert_aliases_to_datastore(data)
interface_attrs.update(attrs)
interface_id = await self.middleware.call(
'datastore.insert',
'network.interfaces',
dict(**(await self.__convert_interface_datastore(data)), **interface_attrs),
{'prefix': 'int_'},
)
yield interface_id
for alias in aliases:
alias['interface'] = interface_id
await self.middleware.call(
'datastore.insert', 'network.alias', dict(**alias), {'prefix': 'alias_'}
)
@private
def convert_aliases_to_datastore(self, data):
da = data['aliases']
dfa = data.get('failover_aliases', [])
dfva = data.get('failover_virtual_aliases', [])
aliases = []
iface = {'address': '', 'address_b': '', 'netmask': '', 'version': '', 'vip': ''}
for idx, (a, fa, fva) in enumerate(zip_longest(da, dfa, dfva, fillvalue={})):
netmask = a['netmask']
ipa = a['address']
ipb = fa.get('address', '')
ipv = fva.get('address', '')
version = ipaddress.ip_interface(ipa).version
if idx == 0:
# first IP address is always written to `network_interface` table
iface['address'] = ipa
iface['address_b'] = ipb
iface['netmask'] = netmask
iface['version'] = version
iface['vip'] = ipv
else:
# this means it's the 2nd (or more) ip address
# on a singular interface so we need to write
# this entry to the alias table
aliases.append({
'address': ipa,
'address_b': ipb,
'netmask': netmask,
'version': version,
'vip': ipv,
})
return iface, aliases
async def __set_lag_ports(self, lag_id, lag_ports):
lagports_ids = []
for idx, i in enumerate(lag_ports):
lagports_ids.append(
await self.middleware.call(
'datastore.insert',
'network.lagginterfacemembers',
{'interfacegroup': lag_id, 'ordernum': idx, 'physnic': i},
{'prefix': 'lagg_'},
)
)
"""
If the link aggregation member was configured we need to reset it,
including removing all its IP addresses.
"""
portinterface = await self.middleware.call(
'datastore.query',
'network.interfaces',
[('interface', '=', i)],
{'prefix': 'int_'},
)
if portinterface:
portinterface = portinterface[0]
portinterface.update({
'dhcp': False,
'address': '',
'address_b': '',
'netmask': 0,
'ipv6auto': False,
'vip': '',
'vhid': None,
'critical': False,
'group': None,
'mtu': None,
})
await self.middleware.call(
'datastore.update',
'network.interfaces',
portinterface['id'],
portinterface,
{'prefix': 'int_'},
)
await self.middleware.call(
'datastore.delete',
'network.alias',
[('alias_interface', '=', portinterface['id'])],
)
return lagports_ids
@accepts(
Str('id'),
Patch(
'interface_create',
'interface_update',
('rm', {'name': 'type'}),
('attr', {'update': True}),
)
)
async def do_update(self, oid, data):
"""
Update Interface of `id`.
.. examples(cli)::
Update network interface static IP:
> network interface update enp0s3 aliases="192.168.0.10"
"""
verrors = ValidationErrors()
await self.middleware.call('network.common.check_failover_disabled', 'interface.update', verrors)
iface = await self.get_instance(oid)
new = iface.copy()
new.update(data)
await self._common_validation(
verrors, 'interface_update', new, iface['type'], update=iface
)
if await self.middleware.call('failover.licensed') and (new.get('ipv4_dhcp') or new.get('ipv6_auto')):
verrors.add('interface_update.dhcp', 'Enabling DHCPv4/v6 on HA systems is unsupported.')
verrors.check()
await self.__save_datastores()
interface_id = None
try:
config = await self.middleware.call(
'datastore.query', 'network.interfaces', [('int_interface', '=', oid)]
)
if not config:
async for i in self.__create_interface_datastore(new, {
'interface': iface['name'],
}):
interface_id = i
config = (await self.middleware.call(
'datastore.query', 'network.interfaces', [('id', '=', interface_id)]
))[0]
else:
config = config[0]
if config['int_interface'] != new['name']:
await self.middleware.call(
'datastore.update',
'network.interfaces',
config['id'],
{'int_interface': new['name']},
)
if iface['type'] == 'PHYSICAL':
link_address_update = {'link_address': iface['state']['hardware_link_address']}
if await self.middleware.call('truenas.is_ix_hardware'):
if await self.middleware.call('failover.node') == 'B':
link_address_update = {'link_address_b': iface['state']['hardware_link_address']}
link_address_row = await self.middleware.call(
'datastore.query', 'network.interface_link_address', [['interface', '=', new['name']]],
)
if link_address_row:
await self.middleware.call(
'datastore.update', 'network.interface_link_address', link_address_row[0]['id'],
link_address_update,
)
else:
await self.middleware.call(
'datastore.insert', 'network.interface_link_address', {
'interface': new['name'],
'link_address': None,
'link_address_b': None,
**link_address_update,
},
)
if iface['type'] == 'BRIDGE':
options = {}
if 'bridge_members' in data:
options['members'] = data['bridge_members']
for key in filter(lambda k: k in data, ('stp', 'enable_learning')):
options[key] = data[key]
if options:
filters = [('interface', '=', config['id'])]
await self.middleware.call('datastore.update', 'network.bridge', filters, options)
elif iface['type'] == 'LINK_AGGREGATION':
xmit = lacpdu = None
if new['lag_protocol'] in ('LACP', 'LOADBALANCE'):
xmit = new.get('xmit_hash_policy', 'layer2+3')
if new['lag_protocol'] == 'LACP':
lacpdu = new.get('lacpdu_rate', 'slow')
lag_id = await self.middleware.call(
'datastore.update',
'network.lagginterface',
[('lagg_interface', '=', config['id'])],
{
'lagg_protocol': new['lag_protocol'].lower(),
'lagg_xmit_hash_policy': xmit,
'lagg_lacpdu_rate': lacpdu,
},
)
if 'lag_ports' in data:
await self.middleware.call(
'datastore.delete',
'network.lagginterfacemembers',
[('lagg_interfacegroup', '=', lag_id)],
)
await self.__set_lag_ports(lag_id, data['lag_ports'])
elif iface['type'] == 'VLAN':
await self.middleware.call(
'datastore.update',
'network.vlan',
[('vlan_vint', '=', iface['name'])],
{
'vint': new['name'],
'pint': new['vlan_parent_interface'],
'tag': new['vlan_tag'],
'pcp': new['vlan_pcp'],
},
{'prefix': 'vlan_'},
)
if not interface_id:
interface_attrs, new_aliases = self.convert_aliases_to_datastore(new)
await self.middleware.call(
'datastore.update', 'network.interfaces', config['id'],
dict(**(await self.__convert_interface_datastore(new)), **interface_attrs),
{'prefix': 'int_'}
)
filters = [('interface', '=', config['id'])]
prefix = {'prefix': 'alias_'}
for curr in await self.middleware.call('datastore.query', 'network.alias', filters, prefix):
if curr['address'] not in [i['address'] for i in new_aliases]:
# being deleted
await self.middleware.call('datastore.delete', 'network.alias', curr['id'])
else:
for idx, new_alias in enumerate(new_aliases[:]):
if curr['address'] == new_alias['address']:
for i in new_alias.keys():
if curr[i] != new_alias[i]:
# it's being updated
await self.middleware.call(
'datastore.update', 'network.alias', curr['id'], new_alias, prefix
)
new_aliases.pop(idx)
break
else:
# nothing has changed but was included in the response
# so ignore it and remove from list
new_aliases.pop(idx)
# getting here means the remainder of the entries in `new_aliases` are actually
# new aliases being added
for new_alias in new_aliases:
await self.middleware.call(
'datastore.insert',
'network.alias',
dict(interface=config['id'], **new_alias),
{'prefix': 'alias_'}
)
except Exception:
if interface_id:
with contextlib.suppress(Exception):
await self.middleware.call(
'datastore.delete', 'network.interfaces', interface_id
)
raise
return await self.get_instance(new['name'])
@accepts(Str('id'))
@returns(Str('interface_id'))
async def do_delete(self, oid):
"""
Delete Interface of `id`.
"""
verrors = ValidationErrors()
schema = 'interface.delete'
await self.middleware.call('network.common.check_failover_disabled', schema, verrors)
if iface := await self.get_instance(oid):
if iface['type'] == 'LINK_AGGREGATION':
filters = [('type', '=', 'VLAN'), ('vlan_parent_interface', '=', iface['id'])]
if vlans := ', '.join([i['name'] for i in await self.middleware.call('interface.query', filters)]):
verrors.add(schema, f'The following VLANs depend on this interface: {vlans}')
verrors.check()
await self.__save_datastores()
await self.delete_network_interface(oid)
return oid
@private
async def delete_network_interface(self, oid):
for lagg in await self.middleware.call(
'datastore.query', 'network.lagginterface', [('lagg_interface__int_interface', '=', oid)]
):
for lagg_member in await self.middleware.call(
'datastore.query', 'network.lagginterfacemembers', [('lagg_interfacegroup', '=', lagg['id'])]
):
await self.delete_network_interface(lagg_member['lagg_physnic'])
await self.middleware.call('datastore.delete', 'network.lagginterface', lagg['id'])
await self.middleware.call(
'datastore.delete', 'network.vlan', [('vlan_pint', '=', oid)]
)
await self.middleware.call(
'datastore.delete', 'network.vlan', [('vlan_vint', '=', oid)]
)
await self.middleware.call(
'datastore.delete', 'network.interfaces', [('int_interface', '=', oid)]
)
return oid
@accepts()
@returns(IPAddr(null=True))
@pass_app()
async def websocket_local_ip(self, app):
"""Returns the local ip address for this websocket session."""
try:
return app.origin.loc_addr
except AttributeError:
pass
@accepts()
@returns(Str(null=True))
@pass_app()
async def websocket_interface(self, app):
"""
Returns the interface this websocket is connected to.
"""
local_ip = await self.websocket_local_ip(app)
if local_ip is None:
return
for iface in await self.middleware.call('interface.query'):
for _ in filter(lambda x: x['address'] == local_ip, iface['aliases'] + iface['state']['aliases']):
return iface
@accepts()
@returns(Dict(*[Str(i.value, enum=[i.value]) for i in XmitHashChoices]))
async def xmit_hash_policy_choices(self):
"""
Available transmit hash policies for the LACP or LOADBALANCE
lagg type interfaces.
"""
return {i.value: i.value for i in XmitHashChoices}
@accepts()
@returns(Dict(*[Str(i.value, enum=[i.value]) for i in LacpduRateChoices]))
async def lacpdu_rate_choices(self):
"""
Available lacpdu rate policies for the LACP lagg type interfaces.
"""
return {i.value: i.value for i in LacpduRateChoices}
@accepts(Dict(
'options',
Bool('bridge_members', default=False),
Bool('lag_ports', default=False),
Bool('vlan_parent', default=True),
List('exclude', default=['epair', 'tap', 'vnet']),
List('exclude_types', items=[Str('type', enum=[type_.name for type_ in InterfaceType])]),
List('include'),
))
@returns(Dict('available_interfaces', additional_attrs=True))
async def choices(self, options):
"""
Choices of available network interfaces.
`bridge_members` will include BRIDGE members.
`lag_ports` will include LINK_AGGREGATION ports.
`vlan_parent` will include VLAN parent interface.
`exclude` is a list of interfaces prefix to remove.
`include` is a list of interfaces that should not be removed.
"""
interfaces = await self.middleware.call('interface.query')
choices = {i['name']: i['description'] or i['name'] for i in interfaces}
for interface in interfaces:
if interface['description'] and interface['description'] != interface['name']:
choices[interface['name']] = f'{interface["name"]}: {interface["description"]}'
if any(interface['name'].startswith(exclude) for exclude in options['exclude']):
choices.pop(interface['name'], None)
if interface['type'] in options['exclude_types']:
choices.pop(interface['name'], None)
if not options['lag_ports']:
if interface['type'] == 'LINK_AGGREGATION':
for port in interface['lag_ports']:
if port not in options['include']:
choices.pop(port, None)
if not options['bridge_members']:
if interface['type'] == 'BRIDGE':
for member in interface['bridge_members']:
if member not in options['include']:
choices.pop(member, None)
if not options['vlan_parent']:
if interface['type'] == 'VLAN':
choices.pop(interface['vlan_parent_interface'], None)
return choices
@accepts(Str('id', null=True, default=None))
@returns(Dict(additional_attrs=True))
async def bridge_members_choices(self, id_):
"""
Return available interface choices that can be added to a `br` (bridge) interface.
`id` is name of existing bridge interface on the system that will have its member
interfaces included.
"""
exclude = {}
include = {}
for interface in await self.middleware.call('interface.query'):
if interface['type'] == 'BRIDGE':
if id_ and id_ == interface['id']:
# means this is an existing br interface that is being updated so we need to
# make sure and return the interfaces members
include.update({i: i for i in interface['bridge_members']})
exclude.update({interface['id']: interface['id']})
else:
# exclude interfaces that are already part of another bridge
exclude.update({i: i for i in interface['bridge_members']})
# adding a bridge as a member to another bridge is not allowed
exclude.update({interface['id']: interface['id']})
elif interface['type'] == 'LINK_AGGREGATION':
# exclude interfaces that are already part of a bond interface
exclude.update({i: i for i in interface['lag_ports']})
# add the interface to inclusion list and it will be discarded
# if it was also added to the exclusion list
include.update({interface['id']: interface['id']})
return {k: v for k, v in include.items() if k not in exclude}
@accepts(Str('id', null=True, default=None))
@returns(Dict(additional_attrs=True))
async def lag_ports_choices(self, id_):
"""
Return available interface choices that can be added to a `bond` (lag) interface.
`id` is name of existing bond interface on the system that will have its member
interfaces included.
"""
exclude = {}
include = {}
configured_ifaces = await self.middleware.call('interface.query_names_only')
for interface in await self.middleware.call('interface.query'):
if interface['type'] == 'LINK_AGGREGATION':
if id_ and id_ == interface['id']:
# means this is an existing bond interface that is being updated so we need to
# make sure and return the interfaces members
include.update({i: i for i in interface['lag_ports']})
exclude.update({interface['id']: interface['id']})
else:
# exclude interfaces that are already part of another bond
exclude.update({i: i for i in interface['lag_ports']})
# it's perfectly normal to add a bond as a member interface to another bond
include.update({interface['id']: interface['id']})
elif interface['type'] == 'VLAN':
# adding a vlan or the vlan's parent interface to a bond is not allowed
exclude.update({interface['id']: interface['id']})
exclude.update({interface['vlan_parent_interface']: interface['vlan_parent_interface']})
elif interface['type'] == 'BRIDGE':
# adding a br interface to a bond is not allowed
exclude.update({interface['id']: interface['id']})
# exclude interfaces that are already part of a bridge interface
exclude.update({i: i for i in interface['bridge_members']})
elif interface['id'] in configured_ifaces:
# only remaining type of interface is PHYSICAL but if this is
# an interface that has already been configured then we obviously
# don't want to allow it to be added to a bond (user will need
# to wipe the config of said interface before it can be added)
exclude.update({interface['id']: interface['id']})
# add the interface to inclusion list and it will be discarded
# if it was also added to the exclusion list
include.update({interface['id']: interface['id']})
return {k: v for k, v in include.items() if k not in exclude}
@accepts()
@returns(Dict(additional_attrs=True))
async def vlan_parent_interface_choices(self):
"""
Return available interface choices for `vlan_parent_interface` attribute.
"""
return await self.middleware.call('interface.choices', {
'bridge_members': True,
'lag_ports': False,
'vlan_parent': True,
'exclude_types': [InterfaceType.BRIDGE.value, InterfaceType.VLAN.value],
})
@private
async def sync(self, wait_dhcp=False):
"""
Sync interfaces configured in database to the OS.
"""
await self.middleware.call_hook('interface.pre_sync')
# The VRRP event thread just reads directly from the database
# so there is no reason to actually configure the interfaces
# on the OS first. We can update the thread since the db has
# already been updated by the time this is called.
await self.middleware.call('vrrpthread.set_non_crit_ifaces')
interfaces = [i['int_interface'] for i in (await self.middleware.call('datastore.query', 'network.interfaces'))]
cloned_interfaces = []
parent_interfaces = []
sync_interface_opts = defaultdict(dict)
# First of all we need to create the virtual interfaces
# LAGG comes first and then VLAN
laggs = await self.middleware.call('datastore.query', 'network.lagginterface')
for lagg in laggs:
name = lagg['lagg_interface']['int_interface']
members = await self.middleware.call('datastore.query', 'network.lagginterfacemembers',
[('lagg_interfacegroup_id', '=', lagg['id'])],
{'order_by': ['lagg_ordernum']})
cloned_interfaces.append(name)
try:
await self.middleware.call(
'interface.lag_setup', lagg, members, parent_interfaces, sync_interface_opts
)
except Exception:
self.logger.error('Error setting up LAG %s', name, exc_info=True)
vlans = await self.middleware.call('datastore.query', 'network.vlan')
for vlan in vlans:
cloned_interfaces.append(vlan['vlan_vint'])
try:
await self.middleware.call('interface.vlan_setup', vlan, parent_interfaces)
except Exception:
self.logger.error('Error setting up VLAN %s', vlan['vlan_vint'], exc_info=True)
run_dhcp = []
# Set VLAN interfaces MTU last as they are restricted by underlying interfaces MTU
for interface in sorted(
filter(lambda i: not i.startswith('br'), interfaces), key=lambda x: x.startswith('vlan')
):
try:
if await self.sync_interface(interface, sync_interface_opts[interface]):
run_dhcp.append(interface)
except Exception:
self.logger.error('Failed to configure {}'.format(interface), exc_info=True)
bridges = await self.middleware.call('datastore.query', 'network.bridge')
for bridge in bridges:
name = bridge['interface']['int_interface']
cloned_interfaces.append(name)
try:
await self.middleware.call('interface.bridge_setup', bridge, parent_interfaces)
except Exception:
self.logger.error('Error setting up bridge %s', name, exc_info=True)
# Finally sync bridge interface
try:
if await self.sync_interface(name, sync_interface_opts[name]):
run_dhcp.append(name)
except Exception:
self.logger.error('Failed to configure {}'.format(name), exc_info=True)
if run_dhcp:
# update dhclient.conf before we run dhclient to ensure the hostname/fqdn
# and/or the supersede routers config options are set properly
await self.middleware.call('etc.generate', 'dhclient')
await asyncio.wait([
self.middleware.create_task(self.run_dhcp(interface, wait_dhcp)) for interface in run_dhcp
])
self.logger.info('Interfaces in database: {}'.format(', '.join(interfaces) or 'NONE'))
internal_interfaces = tuple(await self.middleware.call('interface.internal_interfaces'))
dhclient_aws = []
for name, iface in await self.middleware.run_in_thread(lambda: list(netif.list_interfaces().items())):
# Skip internal interfaces
if name.startswith(internal_interfaces):
continue
# If there are no interfaces configured we start DHCP on all
if not interfaces:
# We should unconfigure interface first before doing autoconfigure. This can be required for cases
# like the following:
# 1) Fresh install with system having 1 NIC
# 2) Configure static ip for the NIC leaving dhcp checked
# 3) Test changes
# 4) Do not save changes and wait for time out
# 5) Rollback happens where the only nic is removed from database
# 6) If we don't unconfigure, autoconfigure is called which is supposed to start dhclient on the
# interface. However this will result in the static ip still being set.
await self.middleware.call('interface.unconfigure', iface, cloned_interfaces, parent_interfaces)
if not iface.cloned:
# We only autoconfigure physical interfaces because if this is a delete operation
# and the interface that was deleted is a "clone" (vlan/br/bond) interface, then
# interface.unconfigure deletes the interface. Physical interfaces can't be "deleted"
# like virtual interfaces.
dhclient_aws.append(asyncio.ensure_future(
self.middleware.call('interface.autoconfigure', iface, wait_dhcp)
))
else:
# Destroy interfaces which are not in database
# Skip interfaces in database
if name in interfaces:
continue
await self.middleware.call('interface.unconfigure', iface, cloned_interfaces, parent_interfaces)
if wait_dhcp and dhclient_aws:
await asyncio.wait(dhclient_aws, timeout=30)
# first interface that is configured, we kill dhclient on _all_ interfaces
# but dhclient could have added items to /etc/resolv.conf. To "fix" this
# we run dns.sync which will wipe the contents of resolv.conf and it is
# expected that the end-user fills this out via the network global webUI page
# OR if this is a system that has been freshly migrated from CORE to SCALE
# then we need to make sure that if the user didn't have network configured
# but left interfaces configured as DHCP only, then we need to generate the
# /etc/resolv.conf here. In practice, this is a potential race condition
# here because dhclient could not have received a lease from the dhcp server
# for all the interfaces that have dhclient running. There is, currently,
# no better solution unless we redesigned significant portions of our network
# API to account for this...
await self.middleware.call('dns.sync')
try:
# static routes explicitly defined by the user need to be setup
await self.middleware.call('staticroute.sync')
except Exception:
self.logger.info('Failed to sync static routes', exc_info=True)
try:
# We may need to set up routes again as they may have been removed while changing IPs
await self.middleware.call('route.sync')
except Exception:
self.logger.info('Failed to sync routes', exc_info=True)
await self.middleware.call_hook('interface.post_sync')
@private
async def sync_interface(self, name, options=None):
options = options or {}
try:
data = await self.middleware.call(
'datastore.query', 'network.interfaces',
[('int_interface', '=', name)], {'get': True}
)
except IndexError:
return
aliases = await self.middleware.call(
'datastore.query', 'network.alias',
[('alias_interface_id', '=', data['id'])]
)
return await self.middleware.call('interface.configure', data, aliases, options)
@private
async def run_dhcp(self, name, wait_dhcp):
self.logger.debug('Starting dhclient for {}'.format(name))
try:
await self.middleware.call('interface.dhclient_start', name, wait_dhcp)
except Exception:
self.logger.error('Failed to run DHCP for {}'.format(name), exc_info=True)
@accepts(
Dict(
'ips',
Bool('ipv4', default=True),
Bool('ipv6', default=True),
Bool('ipv6_link_local', default=False),
Bool('loopback', default=False),
Bool('any', default=False),
Bool('static', default=False),
)
)
@returns(List('in_use_ips', items=[Dict(
'in_use_ip',
Str('type', required=True),
IPAddr('address', required=True),
Int('netmask', required=True),
Str('broadcast'),
)]))
def ip_in_use(self, choices):
"""
Get all IPv4 / Ipv6 from all valid interfaces, excluding tap and epair.
`loopback` will return loopback interface addresses.
`any` will return wildcard addresses (0.0.0.0 and ::).
`static` when enabled will ensure we only return static ip's configured.
Returns a list of dicts - eg -
[
{
"type": "INET6",
"address": "fe80::5054:ff:fe16:4aac",
"netmask": 64
},
{
"type": "INET",
"address": "192.168.122.148",
"netmask": 24,
"broadcast": "192.168.122.255"
},
]
"""
list_of_ip = []
static_ips = {}
if choices['static']:
licensed = self.middleware.call_sync('failover.licensed')
for i in self.middleware.call_sync('interface.query'):
if licensed:
for alias in i.get('failover_virtual_aliases') or []:
static_ips[alias['address']] = alias['address']
else:
for alias in i['aliases']:
static_ips[alias['address']] = alias['address']
if choices['any']:
if choices['ipv4']:
list_of_ip.append({
'type': 'INET',
'address': '0.0.0.0',
'netmask': 0,
'broadcast': '255.255.255.255',
})
if choices['ipv6']:
list_of_ip.append({
'type': 'INET6',
'address': '::',
'netmask': 0,
'broadcast': 'ff02::1',
})
ignore_nics = self.middleware.call_sync('interface.internal_interfaces')
if choices['loopback']:
ignore_nics.remove('lo')
static_ips['127.0.0.1'] = '127.0.0.1'
static_ips['::1'] = '::1'
ignore_nics = tuple(ignore_nics)
for iface in filter(lambda x: not x.orig_name.startswith(ignore_nics), list(netif.list_interfaces().values())):
try:
aliases_list = iface.asdict()['aliases']
except FileNotFoundError:
# This happens on freebsd where we have a race condition when the interface
# might no longer possibly exist when we try to retrieve data from it
pass
else:
for alias_dict in filter(lambda d: not choices['static'] or d['address'] in static_ips, aliases_list):
if choices['ipv4'] and alias_dict['type'] == 'INET':
list_of_ip.append(alias_dict)
if choices['ipv6'] and alias_dict['type'] == 'INET6':
if not choices['ipv6_link_local']:
if ipaddress.ip_address(alias_dict['address']) in ipaddress.ip_network('fe80::/64'):
continue
list_of_ip.append(alias_dict)
return list_of_ip
async def configure_http_proxy(middleware, *args, **kwargs):
"""
Configure the `http_proxy` and `https_proxy` environment vars
from the database.
"""
gc = await middleware.call('datastore.config', 'network.globalconfiguration')
http_proxy = gc['gc_httpproxy']
update = {
'http_proxy': http_proxy,
'https_proxy': http_proxy,
}
await middleware.call('core.environ_update', update)
async def attach_interface(middleware, iface):
platform, node_position = await middleware.call('failover.ha_mode')
if iface == 'ntb0' and platform == 'LAJOLLA2' and node_position == 'B':
# The f-series platform is an AMD system. This means it's using a different
# driver for the ntb heartbeat interface (AMD vs Intel). The AMD ntb driver
# operates subtly differently than the Intel driver. If the A controller
# is rebooted, the B controllers ntb0 interface is hot-plugged (i.e. removed).
# When the A controller comes back online, the ntb0 interface is hot-plugged
# (i.e. added). For this platform we need to re-add the ip address.
await middleware.call('failover.internal_interface.sync', 'ntb0', '169.254.10.2')
return
ignore = await middleware.call('interface.internal_interfaces')
if any((i.startswith(iface) for i in ignore)):
return
if await middleware.call('interface.sync_interface', iface):
await middleware.call('interface.run_dhcp', iface, False)
async def udevd_ifnet_hook(middleware, data):
"""
This hook is called on udevd interface type events. It's purpose
is to:
1. if this is a physical interface being added
(all other interface types are ignored)
2. remove any IPs on said interface if they dont
exist in the db and/or start dhcp on it
3. OR add any IPs on said interface if they exist
in the db
"""
if data.get('SUBSYSTEM') != 'net' and data.get('ACTION') != 'add':
return
iface = data.get('INTERFACE')
ignore = netif.CLONED_PREFIXES + netif.INTERNAL_INTERFACES
if iface is None or iface.startswith(ignore):
# if the udevd event for the interface doesn't have a name (doubt this happens on SCALE)
# or if the interface startswith CLONED_PREFIXES, then we return since we only care about
# physical interfaces that are hot-plugged into the system.
return
await attach_interface(middleware, iface)
async def __activate_service_announcements(middleware, event_type, args):
srv = (await middleware.call("network.configuration.config"))["service_announcement"]
await middleware.call("network.configuration.toggle_announcement", srv)
async def setup(middleware):
middleware.event_register('network.config', 'Sent on network configuration changes.')
# Configure http proxy on startup and on network.config events
middleware.create_task(configure_http_proxy(middleware))
middleware.event_subscribe('network.config', configure_http_proxy)
middleware.event_subscribe('system.ready', __activate_service_announcements)
middleware.register_hook('udev.net', udevd_ifnet_hook)
# Only run DNS sync in the first run. This avoids calling the routine again
# on middlewared restart.
if not await middleware.call('system.ready'):
try:
await middleware.call('dns.sync')
except Exception:
middleware.logger.error('Failed to setup DNS', exc_info=True)
| 87,457 | Python | .py | 1,748 | 36.589245 | 120 | 0.560426 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,475 | cron.py | truenas_middleware/src/middlewared/middlewared/plugins/cron.py | import contextlib
import errno
import syslog
from middlewared.schema import accepts, Bool, Cron, Dict, Int, Patch, returns, Str
from middlewared.service import CallError, CRUDService, job, private, ValidationErrors
import middlewared.sqlalchemy as sa
from middlewared.utils.user_context import run_command_with_user_context
class CronJobModel(sa.Model):
__tablename__ = 'tasks_cronjob'
id = sa.Column(sa.Integer(), primary_key=True)
cron_minute = sa.Column(sa.String(100), default="00")
cron_hour = sa.Column(sa.String(100), default="*")
cron_daymonth = sa.Column(sa.String(100), default="*")
cron_month = sa.Column(sa.String(100), default='*')
cron_dayweek = sa.Column(sa.String(100), default="*")
cron_user = sa.Column(sa.String(60))
cron_command = sa.Column(sa.Text())
cron_description = sa.Column(sa.String(200))
cron_enabled = sa.Column(sa.Boolean(), default=True)
cron_stdout = sa.Column(sa.Boolean(), default=True)
cron_stderr = sa.Column(sa.Boolean(), default=False)
class CronJobService(CRUDService):
class Config:
datastore = 'tasks.cronjob'
datastore_prefix = 'cron_'
datastore_extend = 'cronjob.cron_extend'
namespace = 'cronjob'
cli_namespace = 'task.cron_job'
ENTRY = Patch(
'cron_job_create', 'cron_job_entry',
('add', Int('id')),
)
@private
def cron_extend(self, data):
Cron.convert_db_format_to_schedule(data)
return data
@private
async def construct_cron_command(self, schedule, user, command, stdout=True, stderr=True):
return list(
filter(
bool, (
schedule['minute'], schedule['hour'], schedule['dom'], schedule['month'],
schedule['dow'], user,
'PATH="/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin:/root/bin"',
command.replace('\n', ''),
'> /dev/null' if stdout else '', '2> /dev/null' if stderr else ''
)
)
)
@private
async def validate_data(self, data, schema):
verrors = ValidationErrors()
user = data.get('user')
if user:
# Windows users can have spaces in their usernames
# http://www.freebsd.org/cgi/query-pr.cgi?pr=164808
if ' ' in user:
verrors.add(
f'{schema}.user',
'Usernames cannot have spaces'
)
else:
user_data = None
with contextlib.suppress(KeyError):
user_data = await self.middleware.call('user.get_user_obj', {'username': user})
if not user_data:
verrors.add(
f'{schema}.user',
'Specified user does not exist'
)
command = data.get('command')
if not command:
verrors.add(
f'{schema}.command',
'Please specify a command for cronjob task.'
)
return verrors, data
@accepts(
Dict(
'cron_job_create',
Bool('enabled'),
Bool('stderr', default=False),
Bool('stdout', default=True),
Cron(
'schedule',
defaults={'minute': '00'}
),
Str('command', required=True),
Str('description'),
Str('user', required=True),
register=True
)
)
async def do_create(self, data):
"""
Create a new cron job.
`stderr` and `stdout` are boolean values which if `true`, represent that we would like to suppress
standard error / standard output respectively.
.. examples(websocket)::
Create a cron job which executes `touch /tmp/testfile` after every 5 minutes.
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "cronjob.create",
"params": [{
"enabled": true,
"schedule": {
"minute": "5",
"hour": "*",
"dom": "*",
"month": "*",
"dow": "*"
},
"command": "touch /tmp/testfile",
"description": "Test command",
"user": "root",
"stderr": true,
"stdout": true
}]
}
"""
verrors, data = await self.validate_data(data, 'cron_job_create')
verrors.check()
Cron.convert_schedule_to_db_format(data)
data['id'] = await self.middleware.call(
'datastore.insert',
self._config.datastore,
data,
{'prefix': self._config.datastore_prefix}
)
await self.middleware.call('service.restart', 'cron')
return await self.get_instance(data['id'])
async def do_update(self, id_, data):
"""
Update cronjob of `id`.
"""
task_data = await self.query(filters=[('id', '=', id_)], options={'get': True})
original_data = task_data.copy()
task_data.update(data)
verrors, task_data = await self.validate_data(task_data, 'cron_job_update')
verrors.check()
Cron.convert_schedule_to_db_format(task_data)
Cron.convert_schedule_to_db_format(original_data)
if len(set(task_data.items()) ^ set(original_data.items())) > 0:
await self.middleware.call(
'datastore.update',
self._config.datastore,
id_,
task_data,
{'prefix': self._config.datastore_prefix}
)
await self.middleware.call('service.restart', 'cron')
return await self.get_instance(id_)
async def do_delete(self, id_):
"""
Delete cronjob of `id`.
"""
response = await self.middleware.call(
'datastore.delete',
self._config.datastore,
id_
)
await self.middleware.call('service.restart', 'cron')
return response
@accepts(
Int('id'),
Bool('skip_disabled', default=False),
)
@returns()
@job(lock=lambda args: f'cron_job_run_{args[0]}', logs=True, lock_queue_size=1)
def run(self, job, id_, skip_disabled):
"""
Job to run cronjob task of `id`.
"""
def __cron_log(line):
job.logs_fd.write(line)
syslog.syslog(syslog.LOG_INFO, line.decode())
cron_task = self.middleware.call_sync('cronjob.get_instance', id_)
if skip_disabled and not cron_task['enabled']:
raise CallError('Cron job is disabled', errno.EINVAL)
cron_cmd = ' '.join(
self.middleware.call_sync(
'cronjob.construct_cron_command', cron_task['schedule'], cron_task['user'],
cron_task['command'], cron_task['stdout'], cron_task['stderr']
)[7:]
)
job.set_progress(
10,
'Executing Cron Task'
)
syslog.openlog('cron', facility=syslog.LOG_CRON)
syslog.syslog(syslog.LOG_INFO, f'({cron_task["user"]}) CMD ({cron_cmd})')
cp = run_command_with_user_context(
cron_cmd, cron_task['user'], callback=__cron_log,
)
syslog.closelog()
job.set_progress(
85,
'Executed Cron Task'
)
if cp.stdout:
email = (
self.middleware.call_sync('user.query', [['username', '=', cron_task['user']]], {'get': True})
)['email']
stdout = cp.stdout.decode()
if email:
mail_job = self.middleware.call_sync(
'mail.send', {
'subject': 'CronTask Run',
'text': stdout,
'to': [email]
}
)
job.set_progress(
95,
'Sending mail for Cron Task output'
)
mail_job.wait_sync()
if mail_job.error:
job.logs_fd.write(f'Failed to send email for CronTask run: {mail_job.error}'.encode())
else:
job.set_progress(
95,
'Email for root user not configured. Skipping sending mail.'
)
job.logs_fd.write(f'Executed CronTask - {cron_cmd}: {stdout}'.encode())
if cp.returncode:
raise CallError(f'CronTask "{cron_cmd}" exited with {cp.returncode} (non-zero) exit status.')
job.set_progress(
100,
'Execution of Cron Task complete.'
)
| 9,023 | Python | .py | 233 | 26.429185 | 110 | 0.51865 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,476 | ssh.py | truenas_middleware/src/middlewared/middlewared/plugins/ssh.py | import base64
import hashlib
import os
import subprocess
import syslog
import middlewared.sqlalchemy as sa
from middlewared.async_validators import validate_port
from middlewared.common.ports import ServicePortDelegate
from middlewared.schema import accepts, Bool, Dict, Int, List, Patch, returns, Str, ValidationErrors
from middlewared.service import private, SystemServiceService
from middlewared.validators import Range
class SSHModel(sa.Model):
__tablename__ = 'services_ssh'
id = sa.Column(sa.Integer(), primary_key=True)
ssh_bindiface = sa.Column(sa.MultiSelectField(), default=[])
ssh_tcpport = sa.Column(sa.Integer(), default=22)
ssh_password_login_groups = sa.Column(sa.JSON(list))
ssh_passwordauth = sa.Column(sa.Boolean(), default=False)
ssh_kerberosauth = sa.Column(sa.Boolean(), default=False)
ssh_tcpfwd = sa.Column(sa.Boolean(), default=False)
ssh_compression = sa.Column(sa.Boolean(), default=False)
ssh_privatekey = sa.Column(sa.EncryptedText())
ssh_sftp_log_level = sa.Column(sa.String(20))
ssh_sftp_log_facility = sa.Column(sa.String(20))
ssh_host_dsa_key = sa.Column(sa.EncryptedText(), nullable=True)
ssh_host_dsa_key_pub = sa.Column(sa.Text(), nullable=True)
ssh_host_dsa_key_cert_pub = sa.Column(sa.Text(), nullable=True)
ssh_host_ecdsa_key = sa.Column(sa.EncryptedText(), nullable=True)
ssh_host_ecdsa_key_pub = sa.Column(sa.Text(), nullable=True)
ssh_host_ecdsa_key_cert_pub = sa.Column(sa.Text(), nullable=True)
ssh_host_ed25519_key = sa.Column(sa.EncryptedText(), nullable=True)
ssh_host_ed25519_key_pub = sa.Column(sa.Text(), nullable=True)
ssh_host_ed25519_key_cert_pub = sa.Column(sa.Text(), nullable=True)
ssh_host_key = sa.Column(sa.EncryptedText(), nullable=True)
ssh_host_key_pub = sa.Column(sa.Text(), nullable=True)
ssh_host_rsa_key = sa.Column(sa.EncryptedText(), nullable=True)
ssh_host_rsa_key_pub = sa.Column(sa.Text(), nullable=True)
ssh_host_rsa_key_cert_pub = sa.Column(sa.Text(), nullable=True)
ssh_weak_ciphers = sa.Column(sa.JSON(list))
ssh_options = sa.Column(sa.Text())
class SSHService(SystemServiceService):
class Config:
datastore = "services.ssh"
service = "ssh"
datastore_prefix = "ssh_"
cli_namespace = 'service.ssh'
ENTRY = Dict(
'ssh_entry',
List('bindiface', items=[Str('iface')], required=True),
Int('tcpport', validators=[Range(min_=1, max_=65535)], required=True),
List('password_login_groups', items=[Str('group')], required=True),
Bool('passwordauth', required=True),
Bool('kerberosauth', required=True),
Bool('tcpfwd', required=True),
Bool('compression', required=True),
Str(
'sftp_log_level', enum=['', 'QUIET', 'FATAL', 'ERROR', 'INFO', 'VERBOSE', 'DEBUG', 'DEBUG2', 'DEBUG3'],
required=True
),
Str(
'sftp_log_facility', enum=[
'', 'DAEMON', 'USER', 'AUTH', 'LOCAL0', 'LOCAL1', 'LOCAL2', 'LOCAL3', 'LOCAL4',
'LOCAL5', 'LOCAL6', 'LOCAL7'
], required=True
),
List('weak_ciphers', items=[Str('cipher', enum=['AES128-CBC', 'NONE'])], required=True),
Str('options', max_length=None, required=True),
Str('privatekey', required=True, max_length=None),
Str('host_dsa_key', required=True, max_length=None, null=True),
Str('host_dsa_key_pub', required=True, max_length=None, null=True),
Str('host_dsa_key_cert_pub', required=True, max_length=None, null=True),
Str('host_ecdsa_key', required=True, max_length=None, null=True),
Str('host_ecdsa_key_pub', required=True, max_length=None, null=True),
Str('host_ecdsa_key_cert_pub', required=True, max_length=None, null=True),
Str('host_ed25519_key', required=True, max_length=None, null=True),
Str('host_ed25519_key_pub', required=True, max_length=None, null=True),
Str('host_ed25519_key_cert_pub', required=True, max_length=None, null=True),
Str('host_key', required=True, max_length=None, null=True),
Str('host_key_pub', required=True, max_length=None, null=True),
Str('host_rsa_key', required=True, max_length=None, null=True),
Str('host_rsa_key_pub', required=True, max_length=None, null=True),
Str('host_rsa_key_cert_pub', required=True, max_length=None, null=True),
Int('id', required=True),
)
@accepts()
@returns(Dict('ssh_bind_interfaces_choices', additional_attrs=True))
def bindiface_choices(self):
"""
Available choices for the bindiface attribute of SSH service.
"""
return self.middleware.call_sync('interface.choices')
@accepts(
Patch(
'ssh_entry', 'ssh_update',
('rm', {'name': 'id'}),
('rm', {'name': 'privatekey'}),
('rm', {'name': 'host_dsa_key'}),
('rm', {'name': 'host_dsa_key_pub'}),
('rm', {'name': 'host_dsa_key_cert_pub'}),
('rm', {'name': 'host_ecdsa_key'}),
('rm', {'name': 'host_ecdsa_key_pub'}),
('rm', {'name': 'host_ecdsa_key_cert_pub'}),
('rm', {'name': 'host_ed25519_key'}),
('rm', {'name': 'host_ed25519_key_pub'}),
('rm', {'name': 'host_ed25519_key_cert_pub'}),
('rm', {'name': 'host_key'}),
('rm', {'name': 'host_key_pub'}),
('rm', {'name': 'host_rsa_key'}),
('rm', {'name': 'host_rsa_key_pub'}),
('rm', {'name': 'host_rsa_key_cert_pub'}),
('attr', {'update': True}),
), audit='Update SSH configuration',
)
async def do_update(self, data):
"""
Update settings of SSH daemon service.
If `bindiface` is empty it will listen for all available addresses.
.. examples(websocket)::
Make sshd listen only to igb0 interface.
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "ssh.update",
"params": [{
"bindiface": ["igb0"]
}]
}
"""
old = await self.config()
new = old.copy()
new.update(data)
verrors = ValidationErrors()
if new['bindiface']:
iface_choices = await self.middleware.call('ssh.bindiface_choices')
invalid_ifaces = list(filter(lambda x: x not in iface_choices, new['bindiface']))
if invalid_ifaces:
verrors.add(
'ssh_update.bindiface',
f'The following interfaces are not valid: {", ".join(invalid_ifaces)}',
)
verrors.extend(await validate_port(self.middleware, 'ssh_update.tcpport', new['tcpport'], 'ssh'))
verrors.check()
await self._update_service(old, new)
keyfile = "/usr/local/etc/ssh/ssh_host_ecdsa_key.pub"
if os.path.exists(keyfile):
with open(keyfile, "rb") as f:
pubkey = f.read().strip().split(None, 3)[1]
decoded_key = base64.b64decode(pubkey)
key_digest = hashlib.sha256(decoded_key).digest()
ssh_fingerprint = (b"SHA256:" + base64.b64encode(key_digest).replace(b"=", b"")).decode("utf-8")
syslog.openlog(logoption=syslog.LOG_PID, facility=syslog.LOG_USER)
syslog.syslog(syslog.LOG_ERR, 'ECDSA Fingerprint of the SSH KEY: ' + ssh_fingerprint)
syslog.closelog()
return await self.config()
keys = [
(
os.path.join("/etc/ssh", i),
i.replace(".", "_",).replace("-", "_")
)
for i in [
"ssh_host_key",
"ssh_host_key.pub",
"ssh_host_dsa_key",
"ssh_host_dsa_key.pub",
"ssh_host_dsa_key-cert.pub",
"ssh_host_ecdsa_key",
"ssh_host_ecdsa_key.pub",
"ssh_host_ecdsa_key-cert.pub",
"ssh_host_rsa_key",
"ssh_host_rsa_key.pub",
"ssh_host_rsa_key-cert.pub",
"ssh_host_ed25519_key",
"ssh_host_ed25519_key.pub",
"ssh_host_ed25519_key-cert.pub",
]
]
@private
def cleanup_keys(self):
config = self.middleware.call_sync("datastore.config", "services.ssh")
for path, column in self.keys:
if not config[column] and os.path.exists(path):
self.middleware.logger.warning("Removing irrelevant SSH host key %r", path)
os.unlink(path)
@private
def generate_keys(self):
self.middleware.logger.debug("Generating SSH host keys")
p = subprocess.run(
# For each of the key types (rsa, dsa, ecdsa and ed25519) for which host keys do not exist,
# generate the host keys with the default key file path, an empty passphrase, default bits
# for the key type, and default comment.
["ssh-keygen", "-A"],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
encoding="utf-8",
errors="ignore"
)
if p.returncode != 0:
self.middleware.logger.error("Error generating SSH host keys: %s", p.stdout)
@private
def save_keys(self):
update = {}
old = self.middleware.call_sync('datastore.query', 'services_ssh', [], {'get': True})
for path, column in self.keys:
if os.path.exists(path):
with open(path, "rb") as f:
data = base64.b64encode(f.read()).decode("ascii")
if data != old[column]:
update[column] = data
if update:
self.middleware.call_sync('datastore.update', 'services.ssh', old['id'], update, {'ha_sync': False})
class SSHServicePortDelegate(ServicePortDelegate):
name = 'ssh'
namespace = 'ssh'
port_fields = ['tcpport']
title = 'SSH Service'
async def setup(middleware):
await middleware.call('port.register_attachment_delegate', SSHServicePortDelegate(middleware))
if await middleware.call('core.is_starting_during_boot'):
await middleware.call('ssh.cleanup_keys')
await middleware.call('ssh.generate_keys')
| 10,368 | Python | .py | 221 | 37.266968 | 115 | 0.594127 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,477 | etc.py | truenas_middleware/src/middlewared/middlewared/plugins/etc.py | import asyncio
from collections import defaultdict
import imp
import os
from mako import exceptions
from middlewared.service import CallError, Service
from middlewared.utils.io import write_if_changed, FileChanges
from middlewared.utils.mako import get_template
DEFAULT_ETC_PERMS = 0o644
DEFAULT_ETC_XID = 0
class FileShouldNotExist(Exception):
pass
class MakoRenderer(object):
def __init__(self, service):
self.service = service
async def render(self, path, ctx):
try:
# Mako is not asyncio friendly so run it within a thread
def do():
# Get the template by its relative path
tmpl = get_template(os.path.relpath(path, os.path.dirname(os.path.dirname(__file__))) + ".mako")
# Render the template
return tmpl.render(
middleware=self.service.middleware,
service=self.service,
FileShouldNotExist=FileShouldNotExist,
render_ctx=ctx
)
return await self.service.middleware.run_in_thread(do)
except FileShouldNotExist:
raise
except Exception:
self.service.logger.debug('Failed to render mako template: {0}'.format(
exceptions.text_error_template().render()
))
raise
class PyRenderer(object):
def __init__(self, service):
self.service = service
async def render(self, path, ctx):
name = os.path.basename(path)
find = imp.find_module(name, [os.path.dirname(path)])
mod = imp.load_module(name, *find)
args = [self.service, self.service.middleware]
if ctx is not None:
args.append(ctx)
if asyncio.iscoroutinefunction(mod.render):
return await mod.render(*args)
else:
return await self.service.middleware.run_in_thread(mod.render, *args)
class EtcService(Service):
GROUPS = {
'docker': [
{'type': 'py', 'path': 'docker/daemon.json'},
],
'truenas_nvdimm': [
{'type': 'py', 'path': 'truenas_nvdimm', 'checkpoint': 'post_init'},
],
'shadow': {
'ctx': [
{'method': 'user.query', 'args': [[['local', '=', True]]]},
],
'entries': [
{'type': 'mako', 'path': 'shadow', 'group': 'shadow', 'mode': 0o0640},
]
},
'user': {
'ctx': [
{'method': 'user.query', 'args': [[['local', '=', True]]]},
{'method': 'group.query', 'args': [[['local', '=', True]]]},
],
'entries': [
{'type': 'mako', 'path': 'group'},
{'type': 'mako', 'path': 'passwd', 'local_path': 'master.passwd'},
{'type': 'mako', 'path': 'shadow', 'group': 'shadow', 'mode': 0o0640},
{'type': 'mako', 'path': 'local/sudoers', 'mode': 0o440},
{'type': 'mako', 'path': 'aliases', 'local_path': 'mail/aliases'},
{'type': 'py', 'path': 'web_ui_root_login_alert'},
]
},
'netdata': [
{'type': 'mako', 'path': 'netdata/netdata.conf', 'checkpoint': 'pool_import'},
{'type': 'mako', 'path': 'netdata/charts.d/exclude_netdata.conf', 'checkpoint': 'pool_import'},
{'type': 'mako', 'path': 'netdata/exporting.conf'},
{'type': 'mako', 'path': 'netdata/python.d/smart_log.conf'},
],
'fstab': [
{'type': 'mako', 'path': 'fstab'},
{'type': 'py', 'path': 'fstab_configure', 'checkpoint': 'post_init'}
],
'ipa': {
'ctx': [
{'method': 'directoryservices.status'}
],
'entries': [
{'type': 'py', 'path': 'ipa/default_conf'},
{'type': 'py', 'path': 'ipa/ca.crt'},
{'type': 'py', 'path': 'ipa/smb.keytab', 'mode': 0o600}
]
},
'kerberos': {
'ctx': [
{'method': 'directoryservices.status'},
{'method': 'kerberos.config'},
{'method': 'kerberos.realm.query'}
],
'entries': [
{'type': 'py', 'path': 'krb5.conf', 'mode': 0o644},
{'type': 'py', 'path': 'krb5.keytab', 'mode': 0o600},
]
},
'cron': [
{'type': 'mako', 'path': 'cron.d/middlewared', 'checkpoint': 'pool_import'},
],
'grub': [
{'type': 'py', 'path': 'grub', 'checkpoint': 'post_init'},
],
'fips': [
{'type': 'py', 'path': 'fips', 'checkpoint': None},
],
'keyboard': [
{'type': 'mako', 'path': 'default/keyboard'},
{'type': 'mako', 'path': 'vconsole.conf'},
],
'ldap': [
{'type': 'mako', 'path': 'local/openldap/ldap.conf'},
{'type': 'mako', 'path': 'sssd/sssd.conf', 'mode': 0o0600},
],
'dhclient': [
{'type': 'mako', 'path': 'dhcp/dhclient.conf', 'local_path': 'dhclient.conf'},
],
'nfsd': {
'ctx': [
{
'method': 'sharing.nfs.query',
'args': [
[('enabled', '=', True), ('locked', '=', False)],
{'extra': {'use_cached_locked_datasets': False}}
],
},
{'method': 'nfs.config'},
],
'entries': [
{'type': 'mako', 'path': 'nfs.conf.d/local.conf'},
{'type': 'mako', 'path': 'default/rpcbind'},
{'type': 'mako', 'path': 'idmapd.conf'},
{'type': 'mako', 'path': 'exports', 'checkpoint': 'interface_sync'},
]
},
'pam': {
'ctx': [
{'method': 'activedirectory.config'},
{'method': 'ldap.config'},
],
'entries': [
{'type': 'mako', 'path': 'pam.d/common-account'},
{'type': 'mako', 'path': 'pam.d/common-auth'},
{'type': 'mako', 'path': 'pam.d/common-password'},
{'type': 'mako', 'path': 'pam.d/common-session-noninteractive'},
{'type': 'mako', 'path': 'pam.d/common-session'},
{'type': 'mako', 'path': 'security/pam_winbind.conf'},
]
},
'pam_middleware': {
'ctx': [
{'method': 'datastore.config', 'args': ['system.settings']},
{'method': 'api_key.query', 'args': [[['revoked', '=', False]]]}
],
'entries': [
{'type': 'mako', 'path': 'pam.d/middleware'},
{'type': 'mako', 'path': 'pam.d/middleware-api-key'},
{'type': 'py', 'path': 'pam_tdb'},
]
},
'ftp': {
'ctx': [
{'method': 'ftp.config'},
{'method': 'user.query', 'args': [[["builtin", "=", True], ["username", "!=", "ftp"]]]},
{'method': 'network.configuration.config'}
],
'entries': [
{'type': 'mako', 'path': 'proftpd/proftpd.conf'},
{'type': 'mako', 'path': 'proftpd/proftpd.motd'},
{'type': 'mako', 'path': 'proftpd/tls.conf'},
{'type': 'mako', 'path': 'ftpusers'},
],
},
'kdump': [
{'type': 'mako', 'path': 'default/kdump-tools'},
],
'rc': [
{'type': 'py', 'path': 'systemd'},
],
'sysctl': [
{'type': 'mako', 'path': 'sysctl.d/tunables.conf'},
],
'smartd': [
{'type': 'mako', 'path': 'default/smartmontools'},
{'type': 'py', 'path': 'smartd'},
],
'ssl': [
{'type': 'py', 'path': 'generate_ssl_certs'},
],
'scst': [
{'type': 'mako', 'path': 'scst.conf', 'checkpoint': 'pool_import', 'mode': 0o600},
{'type': 'mako', 'path': 'scst.env', 'checkpoint': 'pool_import', 'mode': 0o744},
],
'scst_targets': [
{'type': 'mako', 'path': 'initiators.allow', 'checkpoint': 'pool_import'},
{'type': 'mako', 'path': 'initiators.deny', 'checkpoint': 'pool_import'},
],
'udev': [
{'type': 'py', 'path': 'udev'},
],
'nginx': [
{'type': 'mako', 'path': 'local/nginx/nginx.conf', 'checkpoint': 'interface_sync'}
],
'keepalived': [
{
'type': 'mako',
'path': 'keepalived/keepalived.conf',
'user': 'root', 'group': 'root', 'mode': 0o644,
'local_path': 'keepalived.conf',
},
],
'motd': [
{'type': 'mako', 'path': 'motd'}
],
'mdns': {
'ctx': [
{'method': 'interface.query'},
{'method': 'smb.config'},
{'method': 'ups.config'},
{'method': 'system.general.config'},
{'method': 'service.started_or_enabled', 'args': ['cifs']},
{'method': 'service.started_or_enabled', 'args': ['ups'], 'ctx_prefix': 'ups'}
],
'entries': [
{'type': 'mako', 'path': 'local/avahi/avahi-daemon.conf', 'checkpoint': None},
{'type': 'py', 'path': 'local/avahi/services/ADISK.service', 'checkpoint': None},
{'type': 'py', 'path': 'local/avahi/services/DEV_INFO.service', 'checkpoint': None},
{'type': 'py', 'path': 'local/avahi/services/HTTP.service', 'checkpoint': None},
{'type': 'py', 'path': 'local/avahi/services/SMB.service', 'checkpoint': None},
{'type': 'py', 'path': 'local/avahi/services/nut.service', 'checkpoint': None},
]
},
'nscd': [
{'type': 'mako', 'path': 'nscd.conf'},
],
'nss': [
{'type': 'mako', 'path': 'nsswitch.conf'},
],
'wsd': [
{'type': 'mako', 'path': 'local/wsdd.conf', 'checkpoint': 'post_init'},
],
'ups': [
{'type': 'py', 'path': 'local/nut/ups_config'},
{'type': 'mako', 'path': 'local/nut/ups.conf', 'owner': 'root', 'group': 'nut', 'mode': 0o440},
{'type': 'mako', 'path': 'local/nut/upsd.conf', 'owner': 'root', 'group': 'nut', 'mode': 0o440},
{'type': 'mako', 'path': 'local/nut/upsd.users', 'owner': 'root', 'group': 'nut', 'mode': 0o440},
{'type': 'mako', 'path': 'local/nut/upsmon.conf', 'owner': 'root', 'group': 'nut', 'mode': 0o440},
{'type': 'mako', 'path': 'local/nut/upssched.conf', 'owner': 'root', 'group': 'nut', 'mode': 0o440},
{'type': 'mako', 'path': 'local/nut/nut.conf', 'owner': 'root', 'group': 'nut', 'mode': 0o440},
{'type': 'py', 'path': 'local/nut/ups_perms'}
],
'smb': {
'ctx': [
{'method': 'smb.generate_smb_configuration'},
],
'entries': [
{'type': 'mako', 'path': 'local/smb4.conf'},
]
},
'snmpd': [
{'type': 'mako', 'path': 'snmp/snmpd.conf',
'local_path': 'local/snmpd.conf', 'owner': 'root', 'group': 'Debian-snmp', 'mode': 0o640
},
],
'syslogd': {
'ctx': [
{'method': 'system.advanced.config'},
{'method': 'nfs.config'},
],
'entries': [
{'type': 'mako', 'path': 'syslog-ng/syslog-ng.conf'},
{'type': 'mako', 'path': 'syslog-ng/conf.d/tndestinations.conf'},
{'type': 'mako', 'path': 'syslog-ng/conf.d/tnfilters.conf'},
{'type': 'mako', 'path': 'syslog-ng/conf.d/tnaudit.conf', 'mode': 0o600},
]
},
'hosts': [{'type': 'mako', 'path': 'hosts', 'mode': 0o644, 'checkpoint': 'pre_interface_sync'}],
'hostname': [{'type': 'py', 'path': 'hostname', 'checkpoint': 'pre_interface_sync'}],
'ssh': {
"ctx": [
{'method': 'ssh.config'},
{'method': 'activedirectory.config'},
{'method': 'ldap.config'},
{'method': 'auth.twofactor.config'},
{'method': 'interface.query'},
{'method': 'system.advanced.login_banner'},
],
"entries": [
{'type': 'mako', 'path': 'local/ssh/sshd_config', 'checkpoint': 'interface_sync'},
{'type': 'mako', 'path': 'pam.d/sshd', 'local_path': 'pam.d/sshd_linux'},
{'type': 'mako', 'path': 'local/users.oath', 'mode': 0o0600, 'checkpoint': 'pool_import'},
{'type': 'py', 'path': 'local/ssh/config'},
{'type': 'mako', 'path': 'login_banner', 'mode': 0o600},
]
},
'ntpd': [
{'type': 'mako', 'path': 'chrony/chrony.conf'}
],
'localtime': [
{'type': 'py', 'path': 'localtime_config'}
],
'kmip': [
{'type': 'mako', 'path': 'pykmip/pykmip.conf'}
],
'truecommand': [
{'type': 'mako', 'path': 'wireguard/ix-truecommand.conf'},
],
'libvirt': [
{'type': 'py', 'path': 'libvirt', 'checkpoint': None},
],
'libvirt_guests': [
{'type': 'mako', 'path': 'default/libvirt-guests', 'checkpoint': None},
],
'subids': [
{'type': 'mako', 'path': 'subuid', 'checkpoint': None},
{'type': 'mako', 'path': 'subgid', 'checkpoint': None},
],
}
LOCKS = defaultdict(asyncio.Lock)
checkpoints = ['initial', 'interface_sync', 'post_init', 'pool_import', 'pre_interface_sync']
class Config:
private = True
def __init__(self, *args, **kwargs):
super(EtcService, self).__init__(*args, **kwargs)
self.files_dir = os.path.realpath(
os.path.join(os.path.dirname(__file__), '..', 'etc_files')
)
self._renderers = {
'mako': MakoRenderer(self),
'py': PyRenderer(self),
}
async def gather_ctx(self, methods):
rv = {}
for m in methods:
method = m['method']
args = m.get('args', [])
prefix = m.get('ctx_prefix', None)
key = f'{prefix}.{method}' if prefix else method
rv[key] = await self.middleware.call(method, *args)
return rv
def get_perms_and_ownership(self, entry):
user_name = entry.get('owner')
group_name = entry.get('group')
mode = entry.get('mode', DEFAULT_ETC_PERMS)
uid = self.middleware.call_sync('user.get_builtin_user_id', user_name) if user_name else DEFAULT_ETC_XID
gid = self.middleware.call_sync('group.get_builtin_group_id', group_name) if group_name else DEFAULT_ETC_XID
return {'uid': uid, 'gid': gid, 'perms': mode}
def make_changes(self, full_path, entry, rendered):
mode = entry.get('mode', DEFAULT_ETC_PERMS)
def opener(path, flags):
return os.open(path, os.O_CREAT | os.O_RDWR, mode=mode)
outfile_dirname = os.path.dirname(full_path)
if outfile_dirname != '/etc':
os.makedirs(outfile_dirname, exist_ok=True)
payload = self.get_perms_and_ownership(entry)
try:
changes = write_if_changed(full_path, rendered, **payload)
except Exception:
changes = 0
self.logger.warning('%s: failed to write changes to configuration file', full_path, exc_info=True)
if (unexpected_changes := changes & ~FileChanges.CONTENTS):
self.logger.error(
'%s: unexpected changes [%s] were made to configuration file that may '
'allow unauthorized user to alter service behavior', full_path,
', '.join(FileChanges.dump(unexpected_changes))
)
return changes
async def generate(self, name, checkpoint=None):
group = self.GROUPS.get(name)
if group is None:
raise ValueError('{0} group not found'.format(name))
output = []
async with self.LOCKS[name]:
if isinstance(group, dict):
ctx = await self.gather_ctx(group['ctx'])
entries = group['entries']
else:
ctx = None
entries = group
for entry in entries:
renderer = self._renderers.get(entry['type'])
if renderer is None:
raise ValueError(f'Unknown type: {entry["type"]}')
if checkpoint:
entry_checkpoint = entry.get('checkpoint', 'initial')
if entry_checkpoint != checkpoint:
continue
path = os.path.join(self.files_dir, entry.get('local_path') or entry['path'])
entry_path = entry['path']
if entry_path.startswith('local/'):
entry_path = entry_path[len('local/'):]
outfile = f'/etc/{entry_path}'
try:
rendered = await renderer.render(path, ctx)
except FileShouldNotExist:
try:
await self.middleware.run_in_thread(os.unlink, outfile)
self.logger.debug(f'{entry["type"]}:{entry["path"]} file removed.')
output.append({
'path': outfile,
'status': 'REMOVED',
'changes': FileChanges.dump(FileChanges.CONTENTS)
})
except FileNotFoundError:
# Nothing to log
pass
continue
except Exception:
self.logger.error(f'Failed to render {entry["type"]}:{entry["path"]}', exc_info=True)
continue
if rendered is None:
# TODO: scripts that write config files internally should be refacorted
# to return bytes or str so that we can properly monitor for changes
continue
changes = await self.middleware.run_in_thread(self.make_changes, outfile, entry, rendered)
if not changes:
self.logger.trace('No new changes for %s', outfile)
else:
output.append({
'path': outfile,
'status': 'CHANGED',
'changes': FileChanges.dump(changes)
})
return output
async def generate_checkpoint(self, checkpoint):
if checkpoint not in await self.get_checkpoints():
raise CallError(f'"{checkpoint}" not recognised')
for name in self.GROUPS.keys():
try:
await self.generate(name, checkpoint)
except Exception:
self.logger.error(f'Failed to generate {name} group', exc_info=True)
async def get_checkpoints(self):
return self.checkpoints
async def __event_system_ready(middleware, event_type, args):
middleware.create_task(middleware.call('etc.generate_checkpoint', 'post_init'))
async def pool_post_import(middleware, pool):
if pool is None:
await middleware.call('etc.generate_checkpoint', 'pool_import')
async def setup(middleware):
middleware.event_subscribe('system.ready', __event_system_ready)
# Generate `etc` files before executing other post-boot-time-pool-import actions.
# There are no explicit requirements for that, we are just preserving execution order
# when moving checkpoint generation to pool.post_import hook.
middleware.register_hook('pool.post_import', pool_post_import, order=-1000)
| 20,137 | Python | .py | 459 | 31.023965 | 116 | 0.482012 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,478 | pwenc.py | truenas_middleware/src/middlewared/middlewared/plugins/pwenc.py | import base64
import fcntl
import os
import threading
from contextlib import contextmanager
from Cryptodome.Cipher import AES
from Cryptodome.Util import Counter
from middlewared.service import Service
from middlewared.utils.path import pathref_open
PWENC_BLOCK_SIZE = 32
PWENC_FILE_SECRET = os.environ.get('FREENAS_PWENC_SECRET', '/data/pwenc_secret')
PWENC_FILE_SECRET_MODE = 0o600
PWENC_PADDING = b'{'
PWENC_CHECK = 'Donuts!'
class PWEncService(Service):
secret = None
secret_path = PWENC_FILE_SECRET
lock = threading.RLock()
class Config:
private = True
def file_secret_path(self):
return self.secret_path
def _reset_passwords(self):
for table, field in (
('directoryservice_ldap', 'ldap_bindpw'),
('services_ups', 'ups_monpwd'),
('system_email', 'em_pass'),
):
self.middleware.call_sync('datastore.sql', f'UPDATE {table} SET {field} = \'\'')
@staticmethod
def _secret_opener(path, flags):
with pathref_open(os.path.dirname(path), force=True, mode=0o755) as secret_path:
return os.open(os.path.basename(path), flags, dir_fd=secret_path)
def _reset_pwenc_check_field(self):
settings = self.middleware.call_sync('datastore.config', 'system.settings')
self.middleware.call_sync('datastore.update', 'system.settings', settings['id'], {
'stg_pwenc_check': self.encrypt(PWENC_CHECK),
})
@contextmanager
def _lock_secrets(self, fd):
with self.lock:
fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
try:
yield fd
finally:
fcntl.lockf(fd, fcntl.LOCK_UN)
def _read_secret(self):
with open(self.secret_path, 'rb', opener=self._secret_opener) as f:
self.secret = f.read()
def _write_secret(self, secret, reset_passwords):
with open(self.secret_path, 'wb', opener=self._secret_opener) as f:
with self._lock_secrets(f.fileno()):
os.fchmod(f.fileno(), PWENC_FILE_SECRET_MODE)
f.write(secret)
f.flush()
os.fsync(f.fileno())
self.reset_secret_cache()
self._reset_pwenc_check_field()
if reset_passwords:
self._reset_passwords()
def generate_secret(self, reset_passwords=True):
self._write_secret(os.urandom(PWENC_BLOCK_SIZE), reset_passwords)
def check(self):
try:
settings = self.middleware.call_sync('datastore.config', 'system.settings')
except IndexError:
self.middleware.call_sync('datastore.insert', 'system.settings', {})
settings = self.middleware.call_sync('datastore.config', 'system.settings')
return self.decrypt(settings['stg_pwenc_check']) == PWENC_CHECK
@classmethod
def get_secret(cls):
if cls.secret is None:
cls._read_secret(cls)
return cls.secret
@classmethod
def reset_secret_cache(cls):
cls.secret = None
def encrypt(self, data):
return encrypt(data)
def decrypt(self, encrypted, _raise=False):
return decrypt(encrypted, _raise)
async def setup(middleware):
if not await middleware.call('pwenc.check'):
middleware.logger.debug('Generating new pwenc secret')
await middleware.call('pwenc.generate_secret')
def encrypt(data):
data = data.encode('utf8')
def pad(x):
return x + (PWENC_BLOCK_SIZE - len(x) % PWENC_BLOCK_SIZE) * PWENC_PADDING
nonce = os.urandom(8)
enc_service = PWEncService
cipher = AES.new(enc_service.get_secret(), AES.MODE_CTR, counter=Counter.new(64, prefix=nonce))
encoded = base64.b64encode(nonce + cipher.encrypt(pad(data)))
return encoded.decode()
def decrypt(encrypted, _raise=False):
if not encrypted:
return ''
enc_service = PWEncService
try:
encrypted = base64.b64decode(encrypted)
nonce = encrypted[:8]
encrypted = encrypted[8:]
cipher = AES.new(enc_service.get_secret(), AES.MODE_CTR, counter=Counter.new(64, prefix=nonce))
return cipher.decrypt(encrypted).rstrip(PWENC_PADDING).decode('utf8')
except Exception:
if _raise:
raise
return ''
| 4,342 | Python | .py | 108 | 32.055556 | 103 | 0.640305 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,479 | init_shutdown_script.py | truenas_middleware/src/middlewared/middlewared/plugins/init_shutdown_script.py | import asyncio
import os
import stat
import subprocess
from middlewared.schema import Bool, Dict, Int, Patch, Str, ValidationErrors, accepts
from middlewared.service import CRUDService, job, private
from middlewared.service_exception import CallError
import middlewared.sqlalchemy as sa
from middlewared.utils import run
from middlewared.validators import Range
class InitShutdownScriptModel(sa.Model):
__tablename__ = 'tasks_initshutdown'
id = sa.Column(sa.Integer(), primary_key=True)
ini_type = sa.Column(sa.String(15), default='command')
ini_command = sa.Column(sa.String(300))
ini_script = sa.Column(sa.String(255), nullable=True)
ini_when = sa.Column(sa.String(15))
ini_enabled = sa.Column(sa.Boolean(), default=True)
ini_timeout = sa.Column(sa.Integer(), default=10)
ini_comment = sa.Column(sa.String(255))
class InitShutdownScriptService(CRUDService):
class Config:
datastore = 'tasks.initshutdown'
datastore_prefix = 'ini_'
datastore_extend = 'initshutdownscript.init_shutdown_script_extend'
cli_namespace = 'system.init_shutdown_script'
ENTRY = Patch(
'init_shutdown_script_create', 'init_shutdown_script_entry',
('add', Int('id', required=True)),
)
@accepts(Dict(
'init_shutdown_script_create',
Str('type', enum=['COMMAND', 'SCRIPT'], required=True),
Str('command', null=True, default=''),
Str('script', null=True, default=''),
Str('when', enum=['PREINIT', 'POSTINIT', 'SHUTDOWN'], required=True),
Bool('enabled', default=True),
Int('timeout', default=10),
Str('comment', default='', validators=[Range(max_=255)]),
register=True,
))
async def do_create(self, data):
"""
Create an initshutdown script task.
`type` indicates if a command or script should be executed at `when`.
There are three choices for `when`:
1) PREINIT - This is early in the boot process before all the services have started
2) POSTINIT - This is late in the boot process when most of the services have started
3) SHUTDOWN - This is on shutdown
`timeout` is an integer value which indicates time in seconds which the system should wait for the execution
of script/command. It should be noted that a hard limit for a timeout is configured by the base OS, so when
a script/command is set to execute on SHUTDOWN, the hard limit configured by the base OS is changed adding
the timeout specified by script/command so it can be ensured that it executes as desired and is not interrupted
by the base OS's limit.
"""
await self.validate(data, 'init_shutdown_script_create')
await self.init_shutdown_script_compress(data)
data['id'] = await self.middleware.call(
'datastore.insert',
self._config.datastore,
data,
{'prefix': self._config.datastore_prefix}
)
return await self.get_instance(data['id'])
async def do_update(self, id_, data):
"""
Update initshutdown script task of `id`.
"""
old = await self.get_instance(id_)
new = old.copy()
new.update(data)
await self.validate(new, 'init_shutdown_script_update')
await self.init_shutdown_script_compress(new)
await self.middleware.call(
'datastore.update',
self._config.datastore,
id_,
new,
{'prefix': self._config.datastore_prefix}
)
return await self.get_instance(new['id'])
async def do_delete(self, id_):
"""
Delete init/shutdown task of `id`.
"""
return await self.middleware.call('datastore.delete', self._config.datastore, id_)
@private
async def init_shutdown_script_extend(self, data):
data['type'] = data['type'].upper()
data['when'] = data['when'].upper()
return data
@private
async def init_shutdown_script_compress(self, data):
data['type'] = data['type'].lower()
data['when'] = data['when'].lower()
return data
@private
async def validate(self, data, schema_name):
verrors = ValidationErrors()
if data['type'] == 'COMMAND' and not data.get('command'):
verrors.add(f'{schema_name}.command', 'This field is required')
elif data['type'] == 'SCRIPT':
if not data.get('script'):
verrors.add(f'{schema_name}.script', 'This field is required')
else:
try:
obj = await self.middleware.call('filesystem.stat', data['script'])
except CallError as e:
verrors.add(f'{schema_name}.script', e.errmsg, e.errno)
except Exception as e:
verrors.add(f'{schema_name}.script', str(e))
else:
if obj['type'] != 'FILE':
verrors.add(f'{schema_name}.script', 'Script must be a regular file not {obj["type"]!r}')
elif not bool(obj['mode'] & stat.S_IXUSR):
verrors.add(f'{schema_name}.script', 'Script must have execute bit set for the user')
verrors.check()
@private
def get_cmd(self, task):
if task['type'] == 'COMMAND':
return task['command']
elif task['type'] == 'SCRIPT' and os.path.exists(task['script']):
return f'exec {task["script"]}'
@private
async def execute_task(self, task):
cmd = await self.middleware.run_in_thread(self.get_cmd, task)
if not cmd:
return
try:
proc = await run(['sh', '-c', cmd], stderr=subprocess.STDOUT, check=False)
if proc.returncode:
self.logger.debug('Failed to execute %r with error %r', cmd, proc.stdout.decode())
except Exception:
self.logger.debug('Unexpected failure executing %r', cmd, exc_info=True)
@private
@accepts(Str('when'))
@job()
async def execute_init_tasks(self, job, when):
tasks = await self.middleware.call('initshutdownscript.query', [
['enabled', '=', True],
['when', '=', when]
])
tasks_len = len(tasks)
for idx, task in enumerate(tasks):
cmd = task['command'] if task['type'] == 'COMMAND' else task['script']
try:
await asyncio.wait_for(self.middleware.create_task(self.execute_task(task)), timeout=task['timeout'])
except asyncio.TimeoutError:
self.logger.debug('Timed out running %s: %r', task['type'], cmd)
finally:
job.set_progress((100 / tasks_len) * (idx + 1))
job.set_progress(100, f'Completed tasks for {when}')
| 6,877 | Python | .py | 152 | 35.763158 | 119 | 0.611194 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,480 | nvidia.py | truenas_middleware/src/middlewared/middlewared/plugins/nvidia.py | import contextlib
import os
import shutil
import subprocess
import tempfile
import requests
from middlewared.service import job, Service
from middlewared.utils.gpu import get_gpus
from middlewared.utils.rootfs import ReadonlyRootfsManager
HEADERS = {"User-Agent": "curl/7.88.1"}
class NvidiaService(Service):
class Config:
private = True
def present(self):
adv_config = self.middleware.call_sync("system.advanced.config")
for gpu in get_gpus():
if gpu["addr"]["pci_slot"] in adv_config["isolated_gpu_pci_ids"]:
continue
if gpu["vendor"] == "NVIDIA":
return True
return False
def installed(self):
with open("/proc/modules") as f:
lines = f.readlines()
for line in lines:
if line.split()[0] == "nvidia":
return True
return False
@job(lock="nvidia", description=lambda *args: "Installing NVIDIA drivers")
def install(self, job, start_docker=None):
if self.installed():
return
self.middleware.call_sync("network.general.will_perform_activity", "catalog")
with ReadonlyRootfsManager() as rrm:
job.set_progress(0, "Temporarily making root filesystem writeable")
rrm.make_writeable()
job.set_progress(1, "Adding NVIDIA repository")
self._add_nvidia_repository()
with self._install_packages(
job,
["gcc", "make", "pkg-config"],
["libvulkan1", "nvidia-container-toolkit", "vulkan-validationlayers"],
):
# `/tmp` is `nonexec`, we'll have to use another directory
with tempfile.TemporaryDirectory(dir="/root") as td:
path = self._download(job, td)
self._install_driver(job, td, path)
if start_docker or self.middleware.call_sync("service.started", "docker"):
job.set_progress(90, "Restarting docker")
self.middleware.call_sync("service.restart", "docker")
def _add_nvidia_repository(self):
if not os.path.exists("/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg"):
r = requests.get("https://nvidia.github.io/libnvidia-container/gpgkey")
r.raise_for_status()
subprocess.run(["gpg", "--dearmor", "-o", "/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg"],
input=r.content, capture_output=True, check=True)
with open("/etc/apt/sources.list.d/nvidia-container-toolkit.list", "w") as f:
f.write("deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] "
"https://nvidia.github.io/libnvidia-container/stable/deb/$(ARCH) /")
@contextlib.contextmanager
def _install_packages(self, job, temporary, permanent):
kwargs = dict(capture_output=True, check=True, text=True)
try:
job.set_progress(5, "Updating apt cache")
subprocess.run(["apt", "update"], **kwargs)
try:
job.set_progress(10, "Installing apt packages")
subprocess.run(["apt", "-y", "install"] + temporary + permanent, **kwargs)
yield
finally:
job.set_progress(80, "Removing apt packages")
subprocess.run(["apt", "-y", "remove"] + temporary, **kwargs)
subprocess.run(["apt", "-y", "autoremove"], **kwargs)
finally:
shutil.rmtree("/var/cache/apt", ignore_errors=True)
def _download(self, job, path):
prefix = "https://download.nvidia.com/XFree86/Linux-x86_64"
r = requests.get(f"{prefix}/latest.txt", headers=HEADERS, timeout=10)
r.raise_for_status()
version = r.text.split()[0]
filename = f"NVIDIA-Linux-x86_64-{version}-no-compat32.run"
result = f"{path}/{filename}"
with requests.get(f"{prefix}/{version}/{filename}", headers=HEADERS, stream=True, timeout=10) as r:
r.raise_for_status()
progress = 0
total = int(r.headers["Content-Length"])
with open(result, "wb") as f:
for chunk in r.iter_content(chunk_size=24 * 1024 * 1024):
job.set_progress(
10 + int(progress / total * 10),
"Downloading drivers",
)
progress += len(chunk)
f.write(chunk)
os.chmod(result, 0o755)
return result
def _install_driver(self, job, td, path):
job.set_progress(20, "Installing driver")
subprocess.run([path, "--tmpdir", td, "-s"], capture_output=True, check=True, text=True)
| 4,783 | Python | .py | 99 | 36.636364 | 114 | 0.589677 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,481 | smb.py | truenas_middleware/src/middlewared/middlewared/plugins/smb.py | import asyncio
import codecs
import errno
import middlewared.sqlalchemy as sa
import os
import re
from pathlib import Path
import uuid
import unicodedata
from copy import deepcopy
from middlewared.common.attachment import LockableFSAttachmentDelegate
from middlewared.common.listen import SystemServiceListenMultipleDelegate
from middlewared.schema import Bool, Dict, IPAddr, List, NetbiosName, NetbiosDomain, Ref, returns, SID, Str, Int, Patch
from middlewared.schema import Path as SchemaPath
# List schema defaults to [], supplying NOT_PROVIDED avoids having audit update that
# defaults for ignore_list or watch_list from overrwriting previous value
from middlewared.schema.utils import NOT_PROVIDED
from middlewared.service import accepts, job, pass_app, private, SharingService
from middlewared.service import ConfigService, ValidationError, ValidationErrors
from middlewared.service_exception import CallError, MatchNotFound
from middlewared.plugins.smb_.constants import (
NETIF_COMPLETE_SENTINEL,
CONFIGURED_SENTINEL,
SMB_AUDIT_DEFAULTS,
INVALID_SHARE_NAME_CHARACTERS,
LOGLEVEL_MAP,
RESERVED_SHARE_NAMES,
SMBHAMODE,
SMBCmd,
SMBPath,
SMBSharePreset
)
from middlewared.plugins.smb_.constants import SMBBuiltin # noqa (imported so may be imported from here)
from middlewared.plugins.smb_.sharesec import remove_share_acl
from middlewared.plugins.smb_.util_param import smbconf_getparm, lpctx_validate_global_parm
from middlewared.plugins.smb_.util_net_conf import reg_delshare, reg_listshares, reg_setparm
from middlewared.plugins.smb_.util_smbconf import generate_smb_conf_dict
from middlewared.plugins.smb_.utils import apply_presets, is_time_machine_share, smb_strip_comments
from middlewared.plugins.idmap_.idmap_constants import SID_LOCAL_USER_PREFIX, SID_LOCAL_GROUP_PREFIX
from middlewared.utils import filter_list, run
from middlewared.utils.directoryservices.constants import DSStatus, DSType
from middlewared.utils.mount import getmnttree
from middlewared.utils.path import FSLocation, path_location, is_child_realpath
from middlewared.utils.privilege import credential_has_full_admin
from middlewared.utils.tdb import TDBError
class SMBModel(sa.Model):
__tablename__ = 'services_cifs'
id = sa.Column(sa.Integer(), primary_key=True)
cifs_srv_netbiosname = sa.Column(sa.String(120))
cifs_srv_netbiosalias = sa.Column(sa.String(120), nullable=True)
cifs_srv_workgroup = sa.Column(sa.String(120))
cifs_srv_description = sa.Column(sa.String(120))
cifs_srv_unixcharset = sa.Column(sa.String(120), default="UTF-8")
cifs_srv_loglevel = sa.Column(sa.String(120), default="0")
cifs_srv_syslog = sa.Column(sa.Boolean(), default=False)
cifs_srv_aapl_extensions = sa.Column(sa.Boolean(), default=False)
cifs_srv_localmaster = sa.Column(sa.Boolean(), default=False)
cifs_srv_guest = sa.Column(sa.String(120), default="nobody")
cifs_srv_filemask = sa.Column(sa.String(120))
cifs_srv_dirmask = sa.Column(sa.String(120))
cifs_srv_smb_options = sa.Column(sa.Text())
cifs_srv_bindip = sa.Column(sa.MultiSelectField())
cifs_SID = sa.Column(sa.String(120), nullable=True)
cifs_srv_ntlmv1_auth = sa.Column(sa.Boolean(), default=False)
cifs_srv_enable_smb1 = sa.Column(sa.Boolean(), default=False)
cifs_srv_admin_group = sa.Column(sa.String(120), nullable=True, default="")
cifs_srv_next_rid = sa.Column(sa.Integer(), nullable=False)
cifs_srv_secrets = sa.Column(sa.EncryptedText(), nullable=True)
cifs_srv_multichannel = sa.Column(sa.Boolean, default=False)
cifs_srv_encryption = sa.Column(sa.String(120), nullable=True)
class SMBService(ConfigService):
class Config:
service = 'cifs'
service_verb = 'restart'
datastore = 'services.cifs'
datastore_extend = 'smb.smb_extend'
datastore_prefix = 'cifs_srv_'
cli_namespace = 'service.smb'
role_prefix = 'SHARING_SMB'
@private
def is_configured(self):
return os.path.exists(CONFIGURED_SENTINEL)
@private
def set_configured(self):
with open(CONFIGURED_SENTINEL, "w"):
pass
@private
async def smb_extend(self, smb):
"""Extend smb for netbios."""
smb['netbiosname_local'] = smb['netbiosname']
smb['netbiosalias'] = (smb['netbiosalias'] or '').split()
smb['loglevel'] = LOGLEVEL_MAP.get(smb['loglevel'])
smb['encryption'] = smb['encryption'] or 'DEFAULT'
smb.pop('secrets', None)
return smb
@accepts()
async def unixcharset_choices(self):
return await self.generate_choices(
['UTF-8', 'ISO-8859-1', 'ISO-8859-15', 'GB2312', 'EUC-JP', 'ASCII']
)
@private
def generate_smb_configuration(self):
if self.middleware.call_sync('failover.status') not in ('SINGLE', 'MASTER'):
return {'netbiosname': 'TN_STANDBY'}
if (ds_type := self.middleware.call_sync('directoryservices.status')['type']) is not None:
ds_type = DSType(ds_type)
match ds_type:
case DSType.AD:
ds_config = self.middleware.call_sync('activedirectory.config')
case DSType.IPA:
ds_config = self.middleware.call_sync('ldap.config')
try:
ipa_domain = self.middleware.call_sync('directoryservices.connection.ipa_get_smb_domain_info')
except Exception:
ipa_domain = None
try:
ipa_config = self.middleware.call_sync('ldap.ipa_config')
except Exception:
self.middleware.logger.warning(
'Failed to retrieve IPA configuration. Disabling IPA SMB support',
exc_info=True
)
ipa_config = {}
ipa_domain = None
ds_config |= {'ipa_domain': ipa_domain, 'ipa_config': ipa_config}
case DSType.LDAP:
ds_config = self.middleware.call_sync('ldap.config')
case _:
ds_config = None
idmap_config = self.middleware.call_sync('idmap.query')
smb_config = self.middleware.call_sync('smb.config')
smb_shares = self.middleware.call_sync('sharing.smb.query')
bind_ip_choices = self.middleware.call_sync('smb.bindip_choices')
is_enterprise = self.middleware.call_sync('system.is_enterprise')
return generate_smb_conf_dict(
ds_type, ds_config, smb_config, smb_shares, bind_ip_choices, idmap_config, is_enterprise
)
@private
async def generate_choices(self, initial):
def key_cp(encoding):
cp = re.compile(r"(?P<name>CP|GB|ISO-8859-|UTF-)(?P<num>\d+)").match(encoding)
if cp:
return tuple((cp.group('name'), int(cp.group('num'), 10)))
else:
return tuple((encoding, float('inf')))
charset = await self.common_charset_choices()
return {
v: v for v in [
c for c in sorted(charset, key=key_cp) if c not in initial
] + initial
}
@accepts()
async def bindip_choices(self):
"""
List of valid choices for IP addresses to which to bind the SMB service.
Addresses assigned by DHCP are excluded from the results.
"""
choices = {}
ha_mode = await self.get_smb_ha_mode()
if ha_mode == 'UNIFIED':
master, backup, init = await self.middleware.call('failover.vip.get_states')
for master_iface in await self.middleware.call('interface.query', [["id", "in", master + backup]]):
for i in master_iface['failover_virtual_aliases']:
choices[i['address']] = i['address']
return choices
for i in await self.middleware.call('interface.ip_in_use'):
choices[i['address']] = i['address']
return choices
@accepts()
async def domain_choices(self):
"""
List of domains visible to winbindd. Returns empty list if winbindd is
stopped.
"""
domains = await self.middleware.call('idmap.known_domains')
return [dom['netbios_domain'] for dom in domains]
@private
async def common_charset_choices(self):
def check_codec(encoding):
try:
return encoding.upper() if codecs.lookup(encoding) else False
except LookupError:
return False
proc = await run(['/usr/bin/iconv', '-l'], check=False)
output = proc.stdout.decode()
encodings = set()
for line in output.splitlines():
enc = [e for e in line.split() if check_codec(e)]
if enc:
cp = enc[0]
for e in enc:
if e in ('UTF-8', 'ASCII', 'GB2312', 'HZ-GB-2312', 'CP1361'):
cp = e
break
encodings.add(cp)
return encodings
@private
async def store_ldap_admin_password(self):
"""
This is required if the LDAP directory service is enabled. The ldap admin dn and
password are stored in private/secrets.tdb file.
"""
ldap = await self.middleware.call('ldap.config')
if not ldap['enable']:
return True
set_pass = await run([SMBCmd.SMBPASSWD.value, '-w', ldap['bindpw']], check=False)
if set_pass.returncode != 0:
self.logger.debug(f"Failed to set set ldap bindpw in secrets.tdb: {set_pass.stdout.decode()}")
return False
return True
@private
def getparm(self, parm, section):
"""
Get a parameter from the smb4.conf file. This is more reliable than
'testparm --parameter-name'. testparm will fail in a variety of
conditions without returning the parameter's value.
"""
return smbconf_getparm(parm, section)
@private
async def get_next_rid(self, objtype, id_):
base_rid = 20000 if objtype == 'USER' else 200000
return base_rid + id_
@private
async def setup_directories(self):
def create_dirs(spec, path):
try:
os.chmod(path, spec.mode())
if os.stat(path).st_uid != 0:
self.logger.warning("%s: invalid owner for path. Correcting.", path)
os.chown(path, 0, 0)
except FileNotFoundError:
if spec.is_dir():
os.mkdir(path, spec.mode())
await self.reset_smb_ha_mode()
await self.middleware.call('etc.generate', 'smb')
for p in SMBPath:
if p == SMBPath.STUBCONF:
continue
path = p.platform()
try:
if not await self.middleware.call('filesystem.acl_is_trivial', path):
self.logger.warning("Inappropriate ACL detected on path [%s] stripping ACL", path)
stripacl = await run(['setfacl', '-b', path], check=False)
if stripacl.returncode != 0:
self.logger.warning("Failed to strip ACL from path %s: %s", path,
stripacl.stderr.decode())
except CallError:
# Currently only time CallError is raise here is on ENOENT, which may be expected
pass
await self.middleware.run_in_thread(create_dirs, p, path)
@private
async def import_conf_to_registry(self):
drop = await run([SMBCmd.NET.value, 'conf', 'drop'], check=False)
if drop.returncode != 0:
self.logger.warning('failed to drop existing share config: %s',
drop.stderr.decode())
load = await run([SMBCmd.NET.value, 'conf', 'import',
SMBPath.SHARECONF.platform()], check=False)
if load.returncode != 0:
self.logger.warning('failed to load share config: %s',
load.stderr.decode())
@private
async def netif_wait(self, timeout=120):
"""
Wait for for the ix-netif sentinel file
and confirm connectivity with some targeted tests.
All must be completed before the timeout.
"""
found_sentinel = False
while timeout >= 0 and not found_sentinel:
if await self.middleware.run_in_thread(os.path.exists, NETIF_COMPLETE_SENTINEL):
found_sentinel = True
timeout -= 1
if timeout <= 0:
self.logger.warning('Failed to get netif completion sentinal.')
elif not found_sentinel:
await asyncio.sleep(1)
"""
Confirm at least one network interface is UP
"""
while timeout >= 0:
if any((
i['state']['link_state'] == 'LINK_STATE_UP' for i in await self.middleware.call('interface.query')
)):
break
else:
timeout -= 1
if timeout <= 0:
self.logger.warning('Failed to detect any connected network interfaces.')
else:
await asyncio.sleep(1)
@private
@job(lock="smb_configure")
async def configure(self, job, create_paths=True):
"""
Many samba-related tools will fail if they are unable to initialize
a messaging context, which will happen if the samba-related directories
do not exist or have incorrect permissions.
"""
job.set_progress(0, 'Setting up SMB directories.')
if create_paths:
await self.setup_directories()
job.set_progress(10, 'Generating stub SMB config.')
await self.middleware.call('etc.generate', 'smb')
"""
smb4.conf registry setup. The smb config is split between five
different middleware plugins (smb, idmap, ad, ldap, sharing.smb).
This initializes them in the above order so that configuration errors
do not occur.
"""
job.set_progress(25, 'generating SMB, idmap, and directory service config.')
await self.middleware.call('etc.generate', 'smb')
"""
We cannot continue without network.
Wait here until we see the ix-netif completion sentinel.
"""
job.set_progress(20, 'Wait for ix-netif completion.')
await self.netif_wait()
"""
Since some NSS modules will default to setting home directory to /var/empty,
verify that this path is immutable during setup for SMB service (prior to
initializing directory services).
"""
try:
is_immutable = await self.middleware.call('filesystem.is_immutable', '/var/empty')
if not is_immutable:
await self.middleware.call('filesystem.set_immutable', True, '/var/empty')
except Exception:
self.logger.warning("Failed to set immutable flag on /var/empty", exc_info=True)
job.set_progress(30, 'Setting up server SID.')
await self.middleware.call('smb.set_system_sid')
job.set_progress(40, 'Synchronizing passdb and groupmap.')
await self.middleware.call('etc.generate', 'user')
pdb_job = await self.middleware.call("smb.synchronize_passdb", True)
grp_job = await self.middleware.call("smb.synchronize_group_mappings", True)
await pdb_job.wait()
await grp_job.wait()
"""
The following steps ensure that we cleanly import our SMB shares
into the registry.
"""
await self.middleware.call('smb.set_configured')
job.set_progress(60, 'generating SMB share configuration.')
await self.middleware.call('sharing.smb.sync_registry')
"""
It is possible that system dataset was migrated or an upgrade
wiped our secrets.tdb file. Re-import directory service secrets
if they are missing from the current running configuration.
"""
job.set_progress(65, 'Initializing directory services')
ad_enabled = (await self.middleware.call('activedirectory.config'))['enable']
if ad_enabled:
ldap_enabled = False
else:
ldap_enabled = (await self.middleware.call('ldap.config'))['enable']
ds_job = await self.middleware.call(
"directoryservices.initialize",
{"activedirectory": ad_enabled, "ldap": ldap_enabled}
)
await ds_job.wait()
job.set_progress(70, 'Checking SMB server status.')
if await self.middleware.call("service.started_or_enabled", "cifs"):
job.set_progress(80, 'Restarting SMB service.')
await self.middleware.call("service.restart", "cifs", {"ha_propagate": False})
# Ensure that winbind is running once we configure SMB service
await self.middleware.call('service.restart', 'idmap', {'ha_propagate': False})
job.set_progress(100, 'Finished configuring SMB.')
@private
async def configure_wait(self):
"""
This method is possibly called by cifs service and idmap service start
depending on whether system dataset setup was successful. Although
a partially configured system dataset is a somewhat undefined state,
it's best to at least try to get the SMB service working properly.
Callers use response here to determine whether to make the start / restart
operation a no-op.
"""
if await self.middleware.call("smb.is_configured"):
return True
in_progress = await self.middleware.call("core.get_jobs", [
["method", "=", "smb.configure"],
["state", "=", "RUNNING"]
])
if in_progress:
return False
if not await self.middleware.call('systemdataset.sysdataset_path'):
return False
self.logger.warning(
"SMB service was not properly initialized. "
"Attempting to configure SMB service."
)
conf_job = await self.middleware.call("smb.configure")
await conf_job.wait(raise_error=True)
return True
@private
async def get_smb_ha_mode(self):
try:
return await self.middleware.call('cache.get', 'SMB_HA_MODE')
except KeyError:
pass
if await self.middleware.call('failover.licensed'):
hamode = SMBHAMODE['UNIFIED'].name
else:
hamode = SMBHAMODE['STANDALONE'].name
await self.middleware.call('cache.put', 'SMB_HA_MODE', hamode)
return hamode
@private
async def reset_smb_ha_mode(self):
await self.middleware.call('cache.pop', 'SMB_HA_MODE')
return await self.get_smb_ha_mode()
@private
async def apply_aapl_changes(self):
shares = await self.middleware.call('sharing.smb.query')
for share in shares:
diff = await self.middleware.call(
'sharing.smb.diff_middleware_and_registry', share['name'], share
)
if diff is None:
self.logger.warning("Share [%s] does not exist in registry.",
share['name'])
continue
share_name = share['name'] if not share['home'] else 'homes'
await self.middleware.call('sharing.smb.apply_conf_diff',
share_name, diff)
@private
async def validate_smb(self, new, verrors):
try:
await self.middleware.call('sharing.smb.validate_aux_params',
new['smb_options'],
'smb_update.smb_options')
except ValidationErrors as errs:
verrors.add_child('smb_update.smb_options', errs)
if new.get('unixcharset') and new['unixcharset'] not in await self.unixcharset_choices():
verrors.add(
'smb_update.unixcharset',
'Please provide a valid value for unixcharset'
)
if new['enable_smb1'] and new['encryption'] == 'REQUIRED':
verrors.add(
'smb_update.encryption',
'Encryption may not be set to REQUIRED while SMB1 support is enabled.'
)
for i in ('workgroup', 'netbiosname', 'netbiosalias'):
"""
There are two cases where NetBIOS names must be rejected:
1. They contain invalid characters for NetBIOS protocol
2. The name is identical to the NetBIOS workgroup.
"""
if not new.get(i):
"""
Skip validation on NULL or empty string. If parameter is required for
the particular server configuration, then a separate validation error
will be added in a later validation step.
"""
continue
if i == 'netbiosalias':
for idx, item in enumerate(new[i]):
if item.casefold() == new['workgroup'].casefold():
verrors.add(
f'smb_update.{i}.{idx}',
f'NetBIOS alias [{item}] conflicts with workgroup name.'
)
else:
if i != 'workgroup' and new[i].casefold() == new['workgroup'].casefold():
verrors.add(
f'smb_update.{i}',
f'NetBIOS name [{new[i]}] conflicts with workgroup name.'
)
if new['guest'] is not None:
if new['guest'] == 'root':
verrors.add('smb_update.guest', '"root" is not a permitted guest account')
try:
await self.middleware.call("user.get_user_obj", {"username": new["guest"]})
except KeyError:
verrors.add('smb_update.guest', f'{new["guest"]}: user does not exist')
if new.get('bindip'):
bindip_choices = list((await self.bindip_choices()).keys())
for idx, item in enumerate(new['bindip']):
if item not in bindip_choices:
verrors.add(
f'smb_update.bindip.{idx}',
f'IP address [{item}] is not a configured address for this server'
)
if not new.get('workgroup'):
verrors.add('smb_update.workgroup', 'workgroup field is required.')
if not new.get('netbiosname'):
verrors.add('smb_update.netbiosname', 'NetBIOS name is required.')
for i in ('filemask', 'dirmask'):
if not new[i]:
continue
try:
if int(new[i], 8) & ~0o11777:
raise ValueError('Not an octet')
except (ValueError, TypeError):
verrors.add(f'smb_update.{i}', 'Not a valid mask')
if not new['aapl_extensions']:
filters = [['OR', [['afp', '=', True], ['timemachine', '=', True]]]]
if await self.middleware.call(
'sharing.smb.query', filters, {'count': True, 'select': ['afp', 'timemachine']}
):
verrors.add(
'smb_update.aapl_extensions',
'This option must be enabled when AFP or time machine shares are present'
)
if new['enable_smb1']:
if audited_shares := await self.middleware.call(
'sharing.smb.query', [['audit.enable', '=', True]], {'select': ['audit', 'name']}
):
verrors.add(
'smb_update.enable_smb1',
f'The following SMB shares have auditing enabled: {", ".join([x["name"] for x in audited_shares])}'
)
@accepts(Dict(
'smb_update',
NetbiosName('netbiosname', max_length=15),
NetbiosName('netbiosname_b', max_length=15),
List('netbiosalias', items=[NetbiosName('netbios_alias')]),
NetbiosDomain('workgroup'),
Str('description'),
Bool('enable_smb1'),
Str('unixcharset'),
Str('loglevel', enum=['NONE', 'MINIMUM', 'NORMAL', 'FULL', 'DEBUG']),
Bool('syslog'),
Bool('aapl_extensions'),
Bool('localmaster'),
Str('guest'),
Str('admin_group', required=False, default=None, null=True),
Str('filemask'),
Str('dirmask'),
Bool('ntlmv1_auth'),
Bool('multichannel', default=False),
Str('encryption', enum=['DEFAULT', 'NEGOTIATE', 'DESIRED', 'REQUIRED']),
List('bindip', items=[IPAddr('ip')]),
Str('smb_options', max_length=None),
update=True,
), audit='Update SMB configuration')
@pass_app(rest=True)
async def do_update(self, app, data):
"""
Update SMB Service Configuration.
`netbiosname` defaults to the original hostname of the system.
`netbiosalias` a list of netbios aliases. If Server is joined to an AD domain, additional Kerberos
Service Principal Names will be generated for these aliases.
`workgroup` specifies the NetBIOS workgroup to which the TrueNAS server belongs. This will be
automatically set to the correct value during the process of joining an AD domain.
NOTE: `workgroup` and `netbiosname` should have different values.
`enable_smb1` allows legacy SMB clients to connect to the server when enabled.
`aapl_extensions` enables support for SMB2 protocol extensions for MacOS clients. This is not a
requirement for MacOS support, but is currently a requirement for time machine support.
`localmaster` when set, determines if the system participates in a browser election.
`guest` attribute is specified to select the account to be used for guest access. It defaults to "nobody".
The group specified as the SMB `admin_group` will be automatically added as a foreign group member
of S-1-5-32-544 (builtin\\admins). This will afford the group all privileges granted to a local admin.
Any SMB group may be selected (including AD groups).
`ntlmv1_auth` enables a legacy and insecure authentication method, which may be required for legacy or
poorly-implemented SMB clients.
`encryption` set global server behavior with regard to SMB encrpytion. Options are DEFAULT (which
follows the upstream defaults -- currently identical to NEGOTIATE), NEGOTIATE encrypts SMB transport
only if requested by the SMB client, DESIRED encrypts SMB transport if supported by the SMB client,
REQUIRED only allows encrypted transport to the SMB server. Mandatory SMB encryption is not
compatible with SMB1 server support in TrueNAS.
`smb_options` smb.conf parameters that are not covered by the above supported configuration options may be
added as an smb_option. Not all options are tested or supported, and behavior of smb_options may change
between releases. Stability of smb.conf options is not guaranteed.
"""
old = await self.config()
new = old.copy()
new.update(data)
verrors = ValidationErrors()
# Skip this check if we're joining AD
ds = await self.middleware.call('directoryservices.status')
if ds['type'] == DSType.AD.value and ds['status'] in (
DSStatus.HEALTHY.name, DSStatus.FAULTED.name
):
for i in ('workgroup', 'netbiosname', 'netbiosalias'):
if old[i] != new[i]:
verrors.add(f'smb_update.{i}',
'This parameter may not be changed after joining Active Directory (AD). '
'If it must be changed, the proper procedure is to leave the AD domain '
'and then alter the parameter before re-joining the domain.')
if app and not credential_has_full_admin(app.authenticated_credentials):
if old['smb_options'] != new['smb_options']:
verrors.add(
'smb_update.smb_options',
'Changes to auxiliary parameters for the SMB service are restricted '
'to users with full administrative privileges.'
)
await self.validate_smb(new, verrors)
verrors.check()
await self.compress(new)
await self.middleware.call(
'datastore.update', self._config.datastore,
new['id'], new, {'prefix': 'cifs_srv_'}
)
await self.middleware.call('etc.generate', 'smb')
new_config = await self.config()
await self.reset_smb_ha_mode()
"""
Toggling aapl_extensions will require changes to all shares
on server (enabling vfs_fruit and possibly changing catia params).
"""
if old['aapl_extensions'] != new['aapl_extensions']:
await self.apply_aapl_changes()
if old['netbiosname_local'] != new_config['netbiosname_local']:
await self.middleware.call('smb.set_system_sid')
# we need to update domain field in passdb.tdb
pdb_job = await self.middleware.call('smb.synchronize_passdb')
await pdb_job.wait()
await self.middleware.call('idmap.gencache.flush')
srv = (await self.middleware.call("network.configuration.config"))["service_announcement"]
await self.middleware.call("network.configuration.toggle_announcement", srv)
if new['admin_group'] and new['admin_group'] != old['admin_group']:
job = await self.middleware.call('smb.synchronize_group_mappings')
await job.wait()
await self._service_change(self._config.service, 'restart')
return new_config
@private
async def compress(self, data):
if data['encryption'] == 'DEFAULT':
data['encryption'] = None
data.pop('netbiosname_local', None)
data.pop('netbiosname_b', None)
data.pop('next_rid')
data['loglevel'] = LOGLEVEL_MAP.inv.get(data['loglevel'], 1)
data['netbiosalias'] = ' '.join(data['netbiosalias'])
return data
class SharingSMBModel(sa.Model):
__tablename__ = 'sharing_cifs_share'
id = sa.Column(sa.Integer(), primary_key=True)
cifs_purpose = sa.Column(sa.String(120))
cifs_path = sa.Column(sa.String(255), nullable=True)
cifs_path_suffix = sa.Column(sa.String(255), nullable=False)
cifs_home = sa.Column(sa.Boolean(), default=False)
cifs_name = sa.Column(sa.String(120))
cifs_comment = sa.Column(sa.String(120))
cifs_ro = sa.Column(sa.Boolean(), default=False)
cifs_browsable = sa.Column(sa.Boolean(), default=True)
cifs_recyclebin = sa.Column(sa.Boolean(), default=False)
cifs_guestok = sa.Column(sa.Boolean(), default=False)
cifs_hostsallow = sa.Column(sa.Text())
cifs_hostsdeny = sa.Column(sa.Text())
cifs_auxsmbconf = sa.Column(sa.Text())
cifs_aapl_name_mangling = sa.Column(sa.Boolean())
cifs_abe = sa.Column(sa.Boolean())
cifs_acl = sa.Column(sa.Boolean())
cifs_durablehandle = sa.Column(sa.Boolean())
cifs_streams = sa.Column(sa.Boolean())
cifs_timemachine = sa.Column(sa.Boolean(), default=False)
cifs_timemachine_quota = sa.Column(sa.Integer(), default=0)
cifs_vuid = sa.Column(sa.String(36))
cifs_shadowcopy = sa.Column(sa.Boolean())
cifs_fsrvp = sa.Column(sa.Boolean())
cifs_enabled = sa.Column(sa.Boolean(), default=True)
cifs_share_acl = sa.Column(sa.Text())
cifs_afp = sa.Column(sa.Boolean())
cifs_audit = sa.Column(sa.JSON(dict), default=SMB_AUDIT_DEFAULTS)
class SharingSMBService(SharingService):
share_task_type = 'SMB'
path_field = 'path_local'
allowed_path_types = [FSLocation.EXTERNAL, FSLocation.LOCAL]
class Config:
namespace = 'sharing.smb'
datastore = 'sharing.cifs_share'
datastore_prefix = 'cifs_'
datastore_extend = 'sharing.smb.extend'
cli_namespace = 'sharing.smb'
role_prefix = 'SHARING_SMB'
@accepts(
Dict(
'sharingsmb_create',
Str('purpose', enum=[x.name for x in SMBSharePreset], default=SMBSharePreset.DEFAULT_SHARE.name),
SchemaPath('path', required=True),
Str('path_suffix', default=''),
Bool('home', default=False),
Str('name', max_length=80, required=True),
Str('comment', default=''),
Bool('ro', default=False),
Bool('browsable', default=True),
Bool('timemachine', default=False),
Int('timemachine_quota', default=0),
Bool('recyclebin', default=False),
Bool('guestok', default=False),
Bool('abe', default=False),
List('hostsallow'),
List('hostsdeny'),
Bool('aapl_name_mangling', default=False),
Bool('acl', default=True),
Bool('durablehandle', default=True),
Bool('shadowcopy', default=True),
Bool('streams', default=True),
Bool('fsrvp', default=False),
Str('auxsmbconf', max_length=None, default=''),
Bool('enabled', default=True),
Bool('afp', default=False),
Dict(
'audit',
Bool('enable'),
List('watch_list', default=NOT_PROVIDED),
List('ignore_list', default=NOT_PROVIDED)
),
register=True
),
audit='SMB share create', audit_extended=lambda data: data['name']
)
@pass_app(rest=True)
async def do_create(self, app, data):
"""
Create a SMB Share.
`purpose` applies common configuration presets depending on intended purpose.
`path` path to export over the SMB protocol.
`timemachine` when set, enables Time Machine backups for this share.
`ro` when enabled, prohibits write access to the share.
`guestok` when enabled, allows access to this share without a password.
`hostsallow` is a list of hostnames / IP addresses which have access to this share.
`hostsdeny` is a list of hostnames / IP addresses which are not allowed access to this share. If a handful
of hostnames are to be only allowed access, `hostsdeny` can be passed "ALL" which means that it will deny
access to ALL hostnames except for the ones which have been listed in `hostsallow`.
`acl` enables support for storing the SMB Security Descriptor as a Filesystem ACL.
`streams` enables support for storing alternate datastreams as filesystem extended attributes.
`fsrvp` enables support for the filesystem remote VSS protocol. This allows clients to create
ZFS snapshots through RPC.
`shadowcopy` enables support for the volume shadow copy service.
`audit` object contains configuration parameters related to SMB share auditing. It contains the
following keys: `enable`, `watch_list` and `ignore_list`. Enable is boolean and controls whether
audit messages will be generated for the share. `watch_list` is a list of groups for which to
generate audit messages (defaults to all groups). `ignore_list` is a list of groups to ignore
when auditing. If conflict arises between watch_list and ignore_list (based on user group
membershipt), then watch_list will take precedence and ops will be audited.
NOTE: auditing may not be enabled if SMB1 support is enabled for the server.
`auxsmbconf` is a string of additional smb4.conf parameters not covered by the system's API.
"""
audit_info = deepcopy(SMB_AUDIT_DEFAULTS) | data.get('audit')
data['audit'] = audit_info
verrors = ValidationErrors()
if app and not credential_has_full_admin(app.authenticated_credentials):
if data['auxsmbconf']:
verrors.add(
'smb_update.auxsmbconf',
'Changes to auxiliary parameters for SMB shares are restricted '
'to users with full administrative privileges.'
)
await self.add_path_local(data)
await self.validate(data, 'sharingsmb_create', verrors)
await self.legacy_afp_check(data, 'sharingsmb_create', verrors)
verrors.check()
data['vuid'] = str(uuid.uuid4())
compressed = await self.compress(data)
data['id'] = await self.middleware.call(
'datastore.insert', self._config.datastore, compressed,
{'prefix': self._config.datastore_prefix})
data['auxsmbconf'] = smb_strip_comments(data['auxsmbconf'])
try:
await self.middleware.call('sharing.smb.reg_addshare', data)
except CallError as e:
if e.errno != errno.EINVAL:
raise e from None
# Share contained garbage auxiliary parameters treat as ValidationError
await self.middleware.call('datastore.delete', self._config.datastore, data['id'])
raise ValidationError('sharingsmb_create.auxsmbconf', e.errmsg)
do_global_reload = await self.must_reload_globals(data)
if do_global_reload:
ds = await self.middleware.call('directoryservices.status')
if ds['type'] == DSType.AD.value and ds['status'] == DSStatus.HEALTHY.name:
if data['home']:
await self.middleware.call('etc.generate', 'smb')
await self.middleware.call('idmap.clear_idmap_cache')
await self._service_change('cifs', 'restart')
else:
await self._service_change('cifs', 'reload')
if is_time_machine_share(data):
await self.middleware.call('service.reload', 'mdns', {'ha_propagate': False})
return await self.get_instance(data['id'])
@private
async def apply_share_changes(self, old_is_locked, new_is_locked, oldname, newname, old, new):
if oldname != newname:
await self.middleware.call('smb.sharesec.flush_share_info')
if not old_is_locked and not new_is_locked:
if oldname != newname:
# This is disruptive change. Share is actually being removed and replaced.
# Forcibly closes any existing SMB sessions.
await self.toggle_share(oldname, False)
try:
await self.middleware.run_in_thread(reg_delshare, oldname)
except MatchNotFound:
pass
except Exception:
self.logger.warning('Failed to remove stale share [%s]',
old['name'], exc_info=True)
await self.middleware.call('sharing.smb.reg_addshare', new)
else:
diff = await self.middleware.call(
'sharing.smb.diff_middleware_and_registry', new['name'], new
)
if diff is None:
# This is special return when share doesn't exist
await self.middleware.call('sharing.smb.reg_addshare', new)
else:
share_name = new['name'] if not new['home'] else 'homes'
await self.middleware.call('sharing.smb.apply_conf_diff',
share_name, diff)
elif old_is_locked and not new_is_locked:
"""
Since the old share was not in our running configuration, we need
to add it.
"""
await self.toggle_share(newname, True)
elif not old_is_locked and new_is_locked:
await self.toggle_share(newname, False)
if new['enabled'] != old['enabled']:
if not new['enabled']:
await self.toggle_share(newname, False)
@accepts(
Int('id'),
Patch(
'sharingsmb_create',
'sharingsmb_update',
('attr', {'update': True})
),
audit='SMB share update',
audit_callback=True,
)
@pass_app(rest=True)
async def do_update(self, app, audit_callback, id_, data):
"""
Update SMB Share of `id`.
"""
old = await self.get_instance(id_)
audit_callback(old['name'])
verrors = ValidationErrors()
old_audit = old['audit']
new = old.copy()
new.update(data)
new['audit'] = old_audit | data.get('audit', {})
oldname = 'homes' if old['home'] else old['name']
newname = 'homes' if new['home'] else new['name']
await self.add_path_local(new)
await self.validate(new, 'sharingsmb_update', verrors, old=old)
await self.legacy_afp_check(new, 'sharingsmb_update', verrors)
check_mdns = False
if app and not credential_has_full_admin(app.authenticated_credentials):
if old['auxsmbconf'] != new['auxsmbconf']:
verrors.add(
'smb_update.auxsmbconf',
'Changes to auxiliary parameters for SMB shares are restricted '
'to users with full administrative privileges.'
)
verrors.check()
guest_changed = old['guestok'] != new['guestok']
old_is_locked = (await self.get_instance(id_))['locked']
if old['path'] != new['path']:
new_is_locked = await self.middleware.call('pool.dataset.path_in_locked_datasets', new['path'])
else:
new_is_locked = old_is_locked
compressed = await self.compress(new)
await self.middleware.call(
'datastore.update', self._config.datastore, id_, compressed,
{'prefix': self._config.datastore_prefix})
new['auxsmbconf'] = smb_strip_comments(new['auxsmbconf'])
if not new_is_locked:
"""
Enabling AAPL SMB2 extensions globally affects SMB shares. If this
happens, the SMB service _must_ be restarted. Skip this step if dataset
underlying the new path is encrypted.
"""
do_global_reload = guest_changed or await self.must_reload_globals(new)
else:
do_global_reload = False
if old_is_locked and new_is_locked:
"""
Configuration change only impacts a locked SMB share. From standpoint of
running config, this is a no-op. No need to restart or reload service.
"""
return await self.get_instance(id_)
try:
await self.apply_share_changes(old_is_locked, new_is_locked, oldname, newname, old, new)
except CallError as e:
if e.errno != errno.EINVAL:
raise e from None
compressed = await self.compress(old)
await self.middleware.call(
'datastore.update', self._config.datastore, id_, compressed,
{'prefix': self._config.datastore_prefix}
)
raise ValidationError('sharingsmb_update.auxsmbconf', e.errmsg)
if new['enabled'] != old['enabled']:
check_mdns = True
# Homes shares require pam restrictions to be enabled (global setting)
# so that we auto-generate the home directory via pam_mkhomedir.
# Hence, we need to redo the global settings after changing homedir.
if new.get('home') is not None and old['home'] != new['home']:
do_global_reload = True
if do_global_reload:
ds = await self.middleware.call('directoryservices.status')
if ds['type'] == DSType.AD.value and ds['status'] == DSStatus.HEALTHY.name:
if new['home'] or old['home']:
await self.middleware.call('idmap.clear_idmap_cache')
await self._service_change('cifs', 'restart')
else:
await self._service_change('cifs', 'reload')
if check_mdns or old['timemachine'] != new['timemachine']:
await self.middleware.call('service.reload', 'mdns')
return await self.get_instance(id_)
@accepts(Int('id'), audit='SMB share delete', audit_callback=True)
async def do_delete(self, audit_callback, id_):
"""
Delete SMB Share of `id`. This will forcibly disconnect SMB clients
that are accessing the share.
"""
share = await self.get_instance(id_)
audit_callback(share['name'])
result = await self.middleware.call('datastore.delete', self._config.datastore, id_)
share_name = 'homes' if share['home'] else share['name']
share_list = await self.middleware.run_in_thread(reg_listshares)
if share_name in share_list:
await self.toggle_share(share_name, False)
try:
await self.middleware.run_in_thread(remove_share_acl, share_name)
except RuntimeError as e:
# TDB library sets arg0 to TDB errno and arg1 to TDB strerr
if e.args[0] != TDBError.NOEXIST:
self.logger.warning('%s: Failed to remove share ACL', share_name, exc_info=True)
except Exception:
self.logger.debug('Failed to delete share ACL for [%s].', share_name, exc_info=True)
try:
await self.middleware.run_in_thread(reg_delshare, share_name)
except MatchNotFound:
pass
except Exception:
self.logger.warn('Failed to remove registry entry for [%s].', share_name, exc_info=True)
if is_time_machine_share(share):
await self.middleware.call('service.reload', 'mdns', {'ha_propagate': False})
if share_name == 'homes':
await self.middleware.call('etc.generate', 'smb')
return result
@private
async def legacy_afp_check(self, data, schema, verrors):
to_check = Path(data['path']).resolve(strict=False)
legacy_afp = await self.query([
("afp", "=", True),
("enabled", "=", True),
("id", "!=", data.get("id"))
])
for share in legacy_afp:
if share['afp'] == data['afp']:
continue
s = Path(share['path']).resolve(strict=(not share['locked']))
if s.is_relative_to(to_check) or to_check.is_relative_to(s):
verrors.add(
f"{schema}.afp",
"Compatibility settings for legacy AFP shares (paths that once hosted "
"AFP shares that have been converted to SMB shares) must be "
"consistent with the legacy AFP compatibility settings of any existing SMB "
f"share that exports the same paths. The new share [{data['name']}] conflicts "
f"with share [{share['name']}] on path [{share['path']}]."
)
@private
async def must_reload_globals(self, data):
"""
Check whether the combination of payload and current SMB settings requires
that we reconfigure the SMB server globally. There are currently two situations
where this will happen:
1) guest access is enabled on a share for the first time. In this case, the SMB
server must be reconfigured to allow mapping of bad users to the guest account.
2) vfs_fruit (currently in the form of time machine) is enabled on an SMB share.
Support for SMB2/3 apple extensions is negotiated on client's first SMB tree
connection. This means that settings are de-facto global in scope and we must
reload.
"""
if data['guestok']:
"""
Verify that running configuration has required setting for guest access.
"""
guest_mapping = await self.middleware.call('smb.getparm', 'map to guest', 'GLOBAL')
if guest_mapping != 'Bad User':
return True
if data['home']:
return True
return False
@private
async def close_share(self, share_name):
c = await run([SMBCmd.SMBCONTROL.value, 'smbd', 'close-share', share_name], check=False)
if c.returncode != 0:
if "Can't find pid" in c.stderr.decode():
# smbd is not running. Don't log error message.
return
self.logger.warn('Failed to close smb share [%s]: [%s]',
share_name, c.stderr.decode().strip())
@private
async def toggle_share(self, share_name, available):
if not available:
await self.close_share(share_name)
await self.middleware.run_in_thread(reg_setparm, {
'service': share_name,
'parameters': {'available': {'parsed': available}}
})
@private
async def validate_aux_params(self, data, schema_name):
"""
libsmbconf expects to be provided with key-value pairs.
"""
verrors = ValidationErrors()
aux_blacklist = [
'state directory',
'private directory',
'lock directory',
'lock dir',
'config backend',
'private dir',
'log level',
'cache directory',
'clustering',
'ctdb socket',
'socket options',
'include',
'wide links',
'insecure wide links',
'zfs_core:zfs_block_cloning',
'zfs_core:zfs_integrity_streams',
]
freebsd_vfs_objects = [
'noacl',
]
for entry in data.splitlines():
if entry == '' or entry.startswith(('#', ';')):
continue
kv = entry.split('=', 1)
if len(kv) != 2:
verrors.add(
f'{schema_name}.auxsmbconf',
f'Auxiliary parameters must be in the format of "key = value": {entry}'
)
continue
if kv[0].strip() in aux_blacklist:
"""
This one checks our ever-expanding enumeration of badness.
Parameters are blacklisted if incorrect values can prevent smbd from starting.
"""
verrors.add(
f'{schema_name}.auxsmbconf',
f'{kv[0]} is a blacklisted auxiliary parameter. Changes to this parameter '
'are not permitted.'
)
if kv[0].strip() == 'vfs objects':
for i in kv[1].split():
if i in freebsd_vfs_objects:
verrors.add(
f'{schema_name}.auxsmbconf',
f'[{i}] is not a permitted VFS object on SCALE.'
)
if schema_name == 'smb_update.smb_options' and ':' not in kv[0]:
"""
lib/param doesn't validate params containing a colon.
"""
param = kv[0].strip()
value = kv[1].strip()
try:
await self.middleware.run_in_thread(
lpctx_validate_global_parm, param, value
)
except RuntimeError:
verrors.add(
f'{schema_name}.auxsmbconf',
f'{param}: unable to set parameter to value: [{value}]'
)
verrors.check()
@private
def validate_mount_info(self, verrors, schema, path):
def validate_child(mnt):
if '@' in mnt['mount_source']:
return
child_acltype = get_acl_type(mnt['super_opts'])
if child_acltype != current_acltype:
verrors.add(
schema,
f'ACL type mismatch with child mountpoint at {mnt["mountpoint"]}: '
f'{this_mnt["mount_source"]} - {current_acltype}, {mnt["mount_source"]} - {child_acltype}'
)
if mnt['fs_type'] != 'zfs':
verrors.add(
schema, f'{mnt["mountpoint"]}: child mount is not a ZFS dataset.'
)
if 'XATTR' not in mnt['super_opts']:
verrors.add(
schema, f'{mnt["mountpoint"]}: extended attribute support is disabled on child mount.'
)
for c in mnt['children']:
validate_child(c)
def get_acl_type(sb_info):
if 'NFS4ACL' in sb_info:
return 'NFSV4'
if 'POSIXACL' in sb_info:
return 'POSIX'
return 'OFF'
st = self.middleware.call_sync('filesystem.stat', path)
if st['type'] == 'SYMLINK':
verrors.add(schema, f'{path}: is symbolic link.')
return
this_mnt = getmnttree(st['mount_id'])
if this_mnt['fs_type'] != 'zfs':
verrors.add(schema, f'{this_mnt["fstype"]}: path is not a ZFS dataset')
if not is_child_realpath(path, this_mnt['mountpoint']):
verrors.add(
schema,
f'Mountpoint {this_mnt["mountpoint"]} not within path {path}. '
'This may indicate that the path of the SMB share contains a '
'symlink component.'
)
if 'XATTR' not in this_mnt['super_opts']:
verrors.add(schema, 'Extended attribute support is required for SMB shares')
current_acltype = get_acl_type(this_mnt['super_opts'])
for child in this_mnt['children']:
validate_child(child)
@private
async def get_path_field(self, data):
if self.path_field in data:
return data[self.path_field]
resolved = await self.add_path_local({'path': data['path']})
return resolved[self.path_field]
@private
async def validate_external_path(self, verrors, name, path):
proxy_list = path.split(',')
for proxy in proxy_list:
if len(proxy.split('\\')) != 2:
verrors.add(name, f'{proxy}: DFS proxy must be of format SERVER\\SHARE')
if proxy.startswith('\\') or proxy.endswith('\\'):
verrors.add(name, f'{proxy}: DFS proxy must be of format SERVER\\SHARE')
if len(proxy_list) == 0:
verrors.add(name, 'At least one DFS proxy must be specified')
@private
async def validate_local_path(self, verrors, name, path):
await super().validate_local_path(verrors, name, path)
"""
This is a very rough check is to prevent users from sharing unsupported
filesystems over SMB as behavior with our default VFS options in such
a situation is undefined.
"""
try:
await self.middleware.run_in_thread(
self.validate_mount_info, verrors, name, path
)
except CallError as e:
if e.errno == errno.ENOENT and e.errmsg == f'Path {path} not found':
verrors.add(name, 'Path does not exist.')
else:
raise
@private
async def validate_share_name(self, name, schema_name, verrors, exist_ok=True):
# Standards for SMB share name are defined in MS-FSCC 2.1.6
# We are slighly more strict in that blacklist all unicode control characters
if name.lower() in RESERVED_SHARE_NAMES:
verrors.add(
f'{schema_name}.name',
f'{name} is a reserved section name, please select another one'
)
if len(name) == 0:
verrors.add(
f'{schema_name}.name',
'Share name may not be an empty string.'
)
invalid_characters = INVALID_SHARE_NAME_CHARACTERS & set(name)
if invalid_characters:
verrors.add(
f'{schema_name}.name',
f'Share name contains the following invalid characters: {", ".join(invalid_characters)}'
)
if any(unicodedata.category(char) == 'Cc' for char in name):
verrors.add(
f'{schema_name}.name', 'Share name contains unicode control characters.'
)
if not exist_ok and await self.query([['name', 'C=', name]], {'select': ['name']}):
verrors.add(
f'{schema_name}.name', 'Share with this name already exists.', errno.EEXIST
)
@private
async def validate(self, data, schema_name, verrors, old=None):
"""
Path is a required key in almost all cases. There is a special edge case for LDAP
[homes] shares. In this case we allow an empty path. Samba interprets this to mean
that the path should be dynamically set to the user's home directory on the LDAP server.
Local user auth to SMB shares is prohibited when LDAP is enabled with a samba schema.
"""
if await self.home_exists(data['home'], schema_name, verrors, old):
verrors.add(f'{schema_name}.home',
'Only one share is allowed to be a home share.')
if await self.query([['name', 'C=', data['name']], ['id', '!=', data.get('id', 0)]]):
verrors.add(f'{schema_name}.name', 'Share names are case-insensitive and must be unique')
await self.validate_path_field(data, schema_name, verrors)
if data['auxsmbconf']:
try:
await self.validate_aux_params(data['auxsmbconf'],
f'{schema_name}.auxsmbconf')
except ValidationErrors as errs:
verrors.add_child(f'{schema_name}.auxsmbconf', errs)
if not data['acl'] and not await self.middleware.call('filesystem.acl_is_trivial', data['path']):
verrors.add(
f'{schema_name}.acl',
f'ACL detected on {data["path"]}. ACLs must be stripped prior to creation '
'of SMB share.'
)
if data.get('name') is not None:
await self.validate_share_name(data['name'], schema_name, verrors)
if data.get('path_suffix') and len(data['path_suffix'].split('/')) > 2:
verrors.add(f'{schema_name}.name',
'Path suffix may not contain more than two components.')
if data['timemachine'] and data['enabled']:
ngc = await self.middleware.call('network.configuration.config')
if not ngc['service_announcement']['mdns']:
verrors.add(
f'{schema_name}.timemachine',
'mDNS must be enabled in order to use an SMB share as a time machine target.'
)
smb_config = await self.middleware.call('smb.config')
if data['audit']['enable']:
if smb_config['enable_smb1']:
verrors.add(
f'{schema_name}.audit.enable',
'SMB auditing is not supported if SMB1 protocol is enabled'
)
for key in ['watch_list', 'ignore_list']:
for idx, group in enumerate(data['audit'][key]):
try:
await self.middleware.call('group.get_group_obj', {'groupname': group})
except KeyError:
verrors.add(f'{schema_name}.audit.{key}.{idx}',
f'{group}: group does not exist.')
if data['afp'] and not smb_config['aapl_extensions']:
verrors.add(
f'{schema_name}.afp',
'Apple SMB2/3 protocol extension support is required by this parameter. '
'This feature may be enabled in the general SMB server configuration.'
)
if data['timemachine'] or data['purpose'] in ('TIMEMACHINE', 'ENHANCED_TIMEMACHINE'):
if not smb_config['aapl_extensions']:
verrors.add(
f'{schema_name}.timemachine',
'Apple SMB2/3 protocol extension support is required by this parameter. '
'This feature may be enabled in the general SMB server configuration.'
)
@private
@accepts(Dict('share_validate_payload', Str('name')), roles=['READONLY_ADMIN'])
async def share_precheck(self, data):
verrors = ValidationErrors()
ad_enabled = (await self.middleware.call('activedirectory.config'))['enable']
if not ad_enabled:
local_smb_user_cnt = await self.middleware.call(
'user.query',
[['smb', '=', True], ['local', '=', True]],
{'count': True}
)
if local_smb_user_cnt == 0:
verrors.add(
'sharing.smb.share_precheck',
'TrueNAS server must be joined to Active Directory or have '
'at least one local SMB user before creating an SMB share.'
)
if data.get('name') is not None:
await self.validate_share_name(data['name'], 'sharing.smb.share_precheck', verrors, False)
verrors.check()
@private
async def home_exists(self, home, schema_name, verrors, old=None):
if not home:
return
home_filters = [('home', '=', True)]
if old:
home_filters.append(('id', '!=', old['id']))
return await self.middleware.call(
'datastore.query', self._config.datastore,
home_filters, {'prefix': self._config.datastore_prefix}
)
@private
async def add_path_local(self, data):
data['path_local'] = data['path']
return data
@private
async def extend(self, data):
data['hostsallow'] = data['hostsallow'].split()
data['hostsdeny'] = data['hostsdeny'].split()
if data['fsrvp']:
data['shadowcopy'] = True
if 'share_acl' in data:
data.pop('share_acl')
for key, val in [('enable', False), ('watch_list', []), ('ignore_list', [])]:
if key not in data['audit']:
data['audit'][key] = val
if data['purpose'] in ('TIMEMACHINE', 'ENHANCED_TIMEMACHINE'):
# backstop to ensure all checks for time machine being enabled succeed
data['timemachine'] = True
return await self.add_path_local(data)
@private
async def compress(self, data_in):
original_aux = data_in['auxsmbconf']
data = apply_presets(data_in)
data['hostsallow'] = ' '.join(data['hostsallow'])
data['hostsdeny'] = ' '.join(data['hostsdeny'])
data.pop(self.locked_field, None)
data.pop('path_local', None)
data['auxsmbconf'] = original_aux
return data
@accepts(roles=['SHARING_SMB_READ'])
async def presets(self):
"""
Retrieve pre-defined configuration sets for specific use-cases. These parameter
combinations are often non-obvious, but beneficial in these scenarios.
"""
return {x.name: x.value for x in SMBSharePreset}
@accepts(Dict(
'smb_share_acl',
Str('share_name', required=True),
List('share_acl', items=[
Dict(
'aclentry',
SID('ae_who_sid', default=None),
Dict(
'ae_who_id',
Str('id_type', enum=['USER', 'GROUP', 'BOTH']),
Int('id')
),
Str('ae_perm', enum=['FULL', 'CHANGE', 'READ'], required=True),
Str('ae_type', enum=['ALLOWED', 'DENIED'], required=True)
),
], default=[{'ae_who_sid': 'S-1-1-0', 'ae_perm': 'FULL', 'ae_type': 'ALLOWED'}]),
register=True
), roles=['SHARING_SMB_WRITE'], audit='Setacl SMB share', audit_extended=lambda data: data['share_name'])
@returns(Ref('smb_share_acl'))
async def setacl(self, data):
"""
Set an ACL on `share_name`. This only impacts access through the SMB protocol.
Either ae_who_sid, ae_who_id must, ae_who_str be specified for each ACL entry in the
share_acl. If multiple are specified, preference is in the following order: SID,
unix id, name.
`share_name` the name of the share
`share_acl` a list of ACL entries (dictionaries) with the following keys:
`ae_who_sid` who the ACL entry applies to expressed as a Windows SID
`ae_who_id` Unix ID information for user or group to which the ACL entry applies.
`ae_perm` string representation of the permissions granted to the user or group.
FULL - grants read, write, execute, delete, write acl, and change owner.
CHANGE - grants read, write, execute, and delete.
READ - grants read and execute.
`ae_type` can be ALLOWED or DENIED.
"""
verrors = ValidationErrors()
normalized_acl = []
for idx, entry in enumerate(data['share_acl']):
sid = None
normalized_entry = {
'ae_perm': entry['ae_perm'],
'ae_type': entry['ae_type'],
'ae_who_sid': entry.get('ae_who_sid')
}
if not set(entry.keys()) & set(['ae_who_str', 'ae_who_id']):
verrors.add(
f'sharing_smb_setacl.share_acl.{idx}.sid',
'Either a SID or Unix ID must be specified for ACL entry.'
)
continue
if normalized_entry['ae_who_sid']:
if normalized_entry['ae_who_sid'].startswith((SID_LOCAL_USER_PREFIX, SID_LOCAL_GROUP_PREFIX)):
verrors.add(
f'sharing_smb_setacl.share_acl.{idx}.sid',
'SID entries for SMB Share ACLs may not be specially-encoded Unix User IDs or Groups.'
)
else:
normalized_acl.append(normalized_entry)
continue
match entry['ae_who_id']['id_type']:
case 'USER':
method = 'user.query'
key = 'uid'
case 'GROUP' | 'BOTH':
method = 'group.query'
key = 'gid'
case _:
raise ValueError(f'{entry["ae_who_id"]["id_type"]}: unexpected ID type')
sid = (await self.middleware.call(method, [[key, '=', entry['ae_who_id']['id']]], {'get': True}))['sid']
if sid is None:
verrors.add(
f'sharing_smb_setacl.share_acl.{idx}.ae_who_id',
'User or group does must exist and be an SMB account.'
)
continue
if sid.startswith((SID_LOCAL_USER_PREFIX, SID_LOCAL_GROUP_PREFIX)):
verrors.add(
f'sharing_smb_setacl.share_acl.{idx}.ae_who_id',
'User or group must be explicitly configured as an SMB '
'account in order to be used in an SMB share ACL.'
)
normalized_entry['ae_who_sid'] = sid
normalized_acl.append(normalized_entry)
if data['share_name'].upper() == 'HOMES':
share_filter = [['home', '=', True]]
else:
share_filter = [['name', 'C=', data['share_name']]]
try:
await self.middleware.call(
'sharing.smb.query', share_filter, {'get': True, 'select': ['home', 'name']}
)
except MatchNotFound:
verrors.add(
'smb_share_acl.share_name',
'Share does not exist'
)
verrors.check()
if not normalized_acl:
try:
await self.middleware.run_in_thread(remove_share_acl, data['share_name'])
except RuntimeError as e:
# TDB library sets arg0 to TDB errno and arg1 to TDB strerr
if e.args[0] != TDBError.NOEXIST:
raise
else:
await self.middleware.call('smb.sharesec.setacl', {
'share_name': data['share_name'],
'share_acl': normalized_acl
})
return await self.getacl({'share_name': data['share_name']})
@accepts(Dict(
'smb_getacl',
Str('share_name', required=True)
), roles=['SHARING_SMB_READ'], audit='Getacl SMB share', audit_extended=lambda data: data['share_name'])
@returns(Ref('smb_share_acl'))
async def getacl(self, data):
verrors = ValidationErrors()
if data['share_name'].upper() == 'HOMES':
share_filter = [['home', '=', True]]
else:
share_filter = [['name', 'C=', data['share_name']]]
try:
await self.middleware.call(
'sharing.smb.query', share_filter, {'get': True, 'select': ['home', 'name']}
)
except MatchNotFound:
verrors.add(
'sharing_smb_getacl.share_name',
'Share does not exist'
)
verrors.check()
acl = await self.middleware.call('smb.sharesec.getacl', data['share_name'])
sids = set([x['ae_who_sid'] for x in acl['share_acl'] if x['ae_who_sid'] != 'S-1-1-0'])
if sids:
try:
conv = await self.middleware.call('idmap.convert_sids', list(sids))
except CallError as e:
# ENOTCONN means that winbindd is not running
if e.errno != errno.ENOTCONN:
raise
conv = {'mapped': {}}
else:
conv = None
for entry in acl['share_acl']:
if entry.get('ae_who_sid') == 'S-1-1-0':
entry['ae_who_id'] = None
entry['ae_who_str'] = 'everyone@'
continue
if not (unix_entry := conv['mapped'].get(entry['ae_who_sid'])):
entry['ae_who_id'] = None
entry['ae_who_str'] = None
continue
entry['ae_who_id'] = {
'id_type': unix_entry['id_type'],
'id': unix_entry['id']
}
entry['ae_who_str'] = await self.middleware.call(
'idmap.id_to_name',
unix_entry['id'],
unix_entry['id_type']
)
return acl
@private
@job(lock='sync_smb_registry')
async def sync_registry(self, job):
"""
Synchronize registry config with the share configuration in the truenas config
file. This method simply reconciles lists of shares, removing from and adding to
the registry as-needed.
"""
if not await self.middleware.run_in_thread(os.path.exists, SMBPath.GLOBALCONF.platform()):
self.logger.warning("smb.conf does not exist. Skipping registry synchronization."
"This may indicate that SMB service has not completed initialization.")
return
db_shares = await self.query()
for share in db_shares:
if share['home']:
share['name'] = 'HOMES'
active_shares = filter_list(db_shares, [
('locked', '=', False),
('enabled', '=', True),
('path', '!=', '')
])
registry_shares = await self.middleware.run_in_thread(reg_listshares)
cf_db = set([x['name'].casefold() for x in db_shares])
cf_active = set([x['name'].casefold() for x in active_shares])
cf_reg = set([x.casefold() for x in registry_shares])
to_add = cf_active - cf_reg
to_del = cf_reg - cf_active
to_preserve = cf_db & to_del
to_sync = cf_active - to_add
for share in to_add:
share_conf = filter_list(active_shares, [['name', 'C=', share]])
if path_location(share_conf[0][self.path_field]) is FSLocation.LOCAL:
if not await self.middleware.run_in_thread(os.path.exists, share_conf[0]['path']):
self.logger.warning("Path [%s] for share [%s] does not exist. "
"Refusing to add share to SMB configuration.",
share_conf[0]['path'], share_conf[0]['name'])
continue
try:
await self.middleware.call('sharing.smb.reg_addshare', share_conf[0])
except ValueError:
self.logger.warning("Share [%s] has invalid configuration.", share, exc_info=True)
except Exception:
self.logger.warning("Failed to add SMB share [%s] while synchronizing registry config",
share, exc_info=True)
for share in to_del:
await self.middleware.call('sharing.smb.toggle_share', share, False)
if share in to_preserve:
continue
try:
await self.middleware.run_in_thread(reg_delshare, share)
except Exception:
self.middleware.logger.warning('Failed to remove stale share [%s]',
share, exc_info=True)
for share in to_sync:
share_conf = filter_list(active_shares, [['name', 'C=', share]])
conf_diff = await self.middleware.call('sharing.smb.diff_middleware_and_registry', share, share_conf[0])
try:
await self.middleware.call('sharing.smb.apply_conf_diff', share, conf_diff)
except Exception:
self.middleware.logger.warning('Failed to sync configuration for share %s', share, exc_info=True)
async def pool_post_import(middleware, pool):
"""
Makes sure to reload SMB if a pool is imported and there are shares configured for it.
"""
if pool is None:
"""
By the time the post-import hook is called, the smb.configure should have
already completed and initialized the SMB service.
"""
await middleware.call('smb.disable_acl_if_trivial')
middleware.create_task(middleware.call('sharing.smb.sync_registry'))
return
smb_is_configured = await middleware.call("smb.is_configured")
if not smb_is_configured:
middleware.logger.warning(
"Skipping SMB share config sync because SMB service "
"has not been fully initialized."
)
return
path = f'/mnt/{pool["name"]}'
if await middleware.call('sharing.smb.query', [
('OR', [
('path', '=', path),
('path', '^', f'{path}/'),
])
], {'extra': {'use_cached_locked_datasets': False}}):
await middleware.call('smb.disable_acl_if_trivial')
middleware.create_task(middleware.call('sharing.smb.sync_registry'))
class SMBFSAttachmentDelegate(LockableFSAttachmentDelegate):
name = 'smb'
title = 'SMB Share'
service = 'cifs'
service_class = SharingSMBService
async def restart_reload_services(self, attachments):
"""
libsmbconf will handle any required notifications to clients if
shares are added or deleted.
mDNS may need to be reloaded if a time machine share is located on
the share being attached.
"""
await self.middleware.call('smb.disable_acl_if_trivial')
smb_is_configured = await self.middleware.call("smb.is_configured")
if not smb_is_configured:
self.logger.warning(
"Skipping SMB share config sync because SMB service "
"has not been fully initialized."
)
return
reg_sync = await self.middleware.call('sharing.smb.sync_registry')
await reg_sync.wait()
await self.middleware.call('service.reload', 'mdns')
async def is_child_of_path(self, resource, path, check_parent, exact_match):
return await super().is_child_of_path(resource, path, check_parent, exact_match) if resource.get(
self.path_field
) else False
async def setup(middleware):
await middleware.call(
'interface.register_listen_delegate',
SystemServiceListenMultipleDelegate(middleware, 'smb', 'bindip'),
)
await middleware.call('pool.dataset.register_attachment_delegate', SMBFSAttachmentDelegate(middleware))
middleware.register_hook('pool.post_import', pool_post_import, sync=True)
| 76,742 | Python | .py | 1,589 | 36.261171 | 119 | 0.584202 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,482 | cloud_sync.py | truenas_middleware/src/middlewared/middlewared/plugins/cloud_sync.py | from middlewared.alert.base import Alert, AlertCategory, AlertClass, AlertLevel, OneShotAlertClass
from middlewared.api import api_method
from middlewared.api.current import (CloudCredentialEntry,
CloudCredentialCreateArgs, CloudCredentialCreateResult,
CloudCredentialUpdateArgs, CloudCredentialUpdateResult,
CloudCredentialDeleteArgs, CloudCredentialDeleteResult,
CloudCredentialVerifyArgs, CloudCredentialVerifyResult)
from middlewared.common.attachment import LockableFSAttachmentDelegate
from middlewared.plugins.cloud.crud import CloudTaskServiceMixin
from middlewared.plugins.cloud.model import CloudTaskModelMixin, cloud_task_schema
from middlewared.plugins.cloud.path import get_remote_path, check_local_path
from middlewared.plugins.cloud.remotes import REMOTES, remote_classes
from middlewared.rclone.remote.storjix import StorjIxError
from middlewared.schema import accepts, Bool, Cron, Dict, Int, Password, Patch, Str
from middlewared.service import (
CallError, CRUDService, ValidationError, ValidationErrors, item_method, job, pass_app, private, TaskPathService,
)
import middlewared.sqlalchemy as sa
from middlewared.utils import Popen, run
from middlewared.utils.lang import undefined
from middlewared.utils.path import FSLocation
from middlewared.utils.service.task_state import TaskStateMixin
from middlewared.utils.time_utils import utc_now
from middlewared.validators import validate_schema
import aiorwlock
import asyncio
import base64
import codecs
from collections import namedtuple
import configparser
from Cryptodome import Random
from Cryptodome.Cipher import AES
from Cryptodome.Util import Counter
import enum
import json
import logging
import os
import re
import shlex
import subprocess
import tempfile
RE_TRANSF1 = re.compile(r"Transferred:\s*(?P<progress_1>.+), (?P<progress>[0-9]+)%$")
RE_TRANSF2 = re.compile(r"Transferred:\s*(?P<progress_1>.+, )(?P<progress>[0-9]+)%, (?P<progress_2>.+)$")
RE_CHECKS = re.compile(r"Checks:\s*(?P<checks>[0-9 /]+)(, (?P<progress>[0-9]+)%)?$")
OAUTH_URL = "https://www.truenas.com/oauth"
RcloneConfigTuple = namedtuple("RcloneConfigTuple", ["config_path", "remote_path", "extra_args"])
logger = logging.getLogger(__name__)
class RcloneConfig:
def __init__(self, cloud_sync):
self.cloud_sync = cloud_sync
self.provider = REMOTES[self.cloud_sync["credentials"]["provider"]]
self.config = None
self.tmp_file = None
self.tmp_file_filter = None
async def __aenter__(self):
self.tmp_file = tempfile.NamedTemporaryFile(mode="w+")
# Make sure only root can read it as there is sensitive data
os.chmod(self.tmp_file.name, 0o600)
config = dict(self.cloud_sync["credentials"]["attributes"], type=self.provider.rclone_type)
config = dict(config, **await self.provider.get_credentials_extra(self.cloud_sync["credentials"]))
if "pass" in config:
config["pass"] = rclone_encrypt_password(config["pass"])
remote_path = None
extra_args = await self.provider.get_task_extra_args(self.cloud_sync)
if "attributes" in self.cloud_sync:
config.update(dict(self.cloud_sync["attributes"], **await self.provider.get_task_extra(self.cloud_sync)))
for k, v in list(config.items()):
if v is undefined:
config.pop(k)
remote_path = get_remote_path(self.provider, self.cloud_sync["attributes"])
remote_path = f"remote:{remote_path}"
if self.cloud_sync["encryption"]:
self.tmp_file.write("[encrypted]\n")
self.tmp_file.write("type = crypt\n")
self.tmp_file.write(f"remote = {remote_path}\n")
self.tmp_file.write("filename_encryption = {}\n".format(
"standard" if self.cloud_sync["filename_encryption"] else "off"))
self.tmp_file.write("password = {}\n".format(
rclone_encrypt_password(self.cloud_sync["encryption_password"])))
if self.cloud_sync["encryption_salt"]:
self.tmp_file.write("password2 = {}\n".format(
rclone_encrypt_password(self.cloud_sync["encryption_salt"])))
remote_path = "encrypted:/"
rclone_filter = [
"- .zfs",
"- .zfs/**",
]
if self.cloud_sync.get("path"):
if os.path.dirname(self.cloud_sync.get("path").rstrip("/")) == "/mnt":
rclone_filter.extend([
"- /ix-applications",
"- /ix-apps",
"- /ix-applications/**",
"- /ix-apps/**",
])
for item in self.cloud_sync.get("exclude") or []:
rclone_filter.append(f"- {item}")
if self.cloud_sync.get("include"):
for item in self.cloud_sync["include"]:
rclone_filter.append(f"+ {item}")
rclone_filter.append("- *")
self.tmp_file_filter = tempfile.NamedTemporaryFile(mode="w+")
self.tmp_file_filter.write("\n".join(rclone_filter))
self.tmp_file_filter.flush()
extra_args.extend(["--filter-from", self.tmp_file_filter.name])
self.tmp_file.write("[remote]\n")
for k, v in config.items():
if isinstance(v, bool):
v = json.dumps(v)
self.tmp_file.write(f"{k} = {v}\n")
self.tmp_file.flush()
self.config = config
return RcloneConfigTuple(self.tmp_file.name, remote_path, extra_args)
async def __aexit__(self, exc_type, exc_val, exc_tb):
if self.config is not None:
await self.provider.cleanup(self.cloud_sync, self.config)
if self.tmp_file:
self.tmp_file.close()
if self.tmp_file_filter:
self.tmp_file_filter.close()
async def rclone(middleware, job, cloud_sync, dry_run):
await middleware.call("network.general.will_perform_activity", "cloud_sync")
path = cloud_sync["path"]
await check_local_path(middleware, path)
# Use a temporary file to store rclone file
async with RcloneConfig(cloud_sync) as config:
args = [
"rclone",
"--config", config.config_path,
"-v",
"--stats", "1s",
]
if cloud_sync["attributes"].get("fast_list"):
args.append("--fast-list")
if cloud_sync["follow_symlinks"]:
args.extend(["-L"])
if cloud_sync["transfers"]:
args.extend(["--transfers", str(cloud_sync["transfers"])])
if cloud_sync["bwlimit"]:
args.extend(["--bwlimit", " ".join([
f"{limit['time']},{str(limit['bandwidth']) + 'b' if limit['bandwidth'] else 'off'}"
for limit in cloud_sync["bwlimit"]
])])
if dry_run:
args.extend(["--dry-run"])
args += config.extra_args
args += shlex.split(cloud_sync["args"])
args += [cloud_sync["transfer_mode"].lower()]
if cloud_sync["create_empty_src_dirs"]:
args.extend(["--create-empty-src-dirs"])
snapshot = None
if cloud_sync["direction"] == "PUSH":
if cloud_sync["snapshot"]:
dataset, recursive = get_dataset_recursive(
await middleware.call("zfs.dataset.query", [["type", "=", "FILESYSTEM"]]),
cloud_sync["path"],
)
snapshot_name = (
f"cloud_sync-{cloud_sync.get('id', 'onetime')}-{utc_now().strftime('%Y%m%d%H%M%S')}"
)
snapshot = {"dataset": dataset["name"], "name": snapshot_name}
await middleware.call("zfs.snapshot.create", dict(snapshot, recursive=recursive))
relpath = os.path.relpath(path, dataset["properties"]["mountpoint"]["value"])
path = os.path.normpath(os.path.join(
dataset["properties"]["mountpoint"]["value"], ".zfs", "snapshot", snapshot_name, relpath
))
args.extend([path, config.remote_path])
else:
args.extend([config.remote_path, path])
env = {}
for k, v in (
[(k, v) for (k, v) in cloud_sync.items()
if k in ["id", "description", "direction", "transfer_mode", "encryption", "filename_encryption",
"encryption_password", "encryption_salt", "snapshot"]] +
list(cloud_sync["credentials"]["attributes"].items()) +
list(cloud_sync["attributes"].items())
):
if type(v) in (bool,):
env[f"CLOUD_SYNC_{k.upper()}"] = str(int(v))
if type(v) in (int, str):
env[f"CLOUD_SYNC_{k.upper()}"] = str(v)
env["CLOUD_SYNC_PATH"] = path
await run_script(job, env, cloud_sync["pre_script"], "Pre-script")
job.middleware.logger.trace("Running %r", args)
proc = await Popen(
args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
check_cloud_sync = asyncio.ensure_future(rclone_check_progress(job, proc))
cancelled_error = None
try:
try:
await proc.wait()
except asyncio.CancelledError as e:
cancelled_error = e
try:
await middleware.call("service.terminate_process", proc.pid)
except CallError as e:
job.middleware.logger.warning(f"Error terminating rclone on cloud sync abort: {e!r}")
finally:
await asyncio.wait_for(check_cloud_sync, None)
if snapshot:
await middleware.call("zfs.snapshot.delete", f"{snapshot['dataset']}@{snapshot['name']}")
if cancelled_error is not None:
raise cancelled_error
if proc.returncode != 0:
message = "".join(job.internal_data.get("messages", []))
if message and proc.returncode != 1:
if message and not message.endswith("\n"):
message += "\n"
message += f"rclone failed with exit code {proc.returncode}"
raise CallError(message)
await run_script(job, env, cloud_sync["post_script"], "Post-script")
refresh_credentials = REMOTES[cloud_sync["credentials"]["provider"]].refresh_credentials
if refresh_credentials:
credentials_attributes = cloud_sync["credentials"]["attributes"].copy()
updated = False
ini = configparser.ConfigParser()
ini.read(config.config_path)
for key, value in ini["remote"].items():
if (key in refresh_credentials and
key in credentials_attributes and
credentials_attributes[key] != value):
logger.debug("Updating credentials attributes key %r", key)
credentials_attributes[key] = value
updated = True
if updated:
await middleware.call("cloudsync.credentials.update", cloud_sync["credentials"]["id"], {
"attributes": credentials_attributes
})
async def run_script(job, env, hook, script_name):
hook = hook.strip()
if not hook:
return
if hook.startswith("#!"):
shebang = shlex.split(hook.splitlines()[0][2:].strip())
else:
shebang = ["/bin/bash"]
# It is ok to do synchronous I/O here since we are operating in ramfs which will never block
with tempfile.NamedTemporaryFile("w+") as f:
os.chmod(f.name, 0o700)
f.write(hook)
f.flush()
proc = await Popen(
shebang + [f.name],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=dict(os.environ, **env),
)
future = asyncio.ensure_future(run_script_check(job, proc, script_name))
await proc.wait()
await asyncio.wait_for(future, None)
if proc.returncode != 0:
raise CallError(f"{script_name} failed with exit code {proc.returncode}")
async def run_script_check(job, proc, name):
while True:
read = await proc.stdout.readline()
if read == b"":
break
await job.logs_fd_write(f"[{name}] ".encode("utf-8") + read)
# Prevents clogging job logs with progress reports every second
class RcloneVerboseLogCutter:
PREFIXES = (
re.compile(r"([0-9]{4}/[0-9]{2}/[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2} |<6>)INFO {2}:\s*$"),
re.compile(r"Transferred:\s+"),
re.compile(r"Errors:\s+"),
re.compile(r"Checks:\s+"),
re.compile(r"Elapsed time:\s+"),
re.compile(r"Transferring:"),
re.compile(r" * .+"),
)
def __init__(self, interval):
self.interval = interval
self.buffer = []
self.counter = 0
def notify(self, line):
if self.buffer:
# We are currently reading progress message
self.buffer.append(line)
if line.rstrip("\n"):
# We are still reading message
matches = any(prefix.match(line) for prefix in self.PREFIXES)
if matches:
# Good, consuming this line to buffer and yet not writing it to logs
return None
else:
# This was unexpected form of progress message (or not a progress message at all)
new_buffer = []
if self.PREFIXES[0].match(line):
# This line can be start of new progress message, ejecting it from buffer
self.buffer = self.buffer[:-1]
# And adding to new buffer
new_buffer = [line]
# Writing buffer to logs
try:
return self.flush()
finally:
self.buffer = new_buffer
else:
# This message ends with newline
try:
if self.counter % self.interval == 0:
# Every {counter} times we still write this buffer to logs
return "".join(self.buffer)
else:
return None
finally:
# Resetting state, ready to consume next line
self.buffer = []
self.counter += 1
else:
# We are not reading progress message
if self.PREFIXES[0].match(line):
# This is the first line of progress message
self.buffer.append(line)
return None
else:
return line
def flush(self):
try:
return "".join(self.buffer)
finally:
self.buffer = []
async def rclone_check_progress(job, proc):
cutter = RcloneVerboseLogCutter(300)
dropbox__restricted_content = False
try:
progress1 = None
transferred1 = None
progress2 = None
transferred2 = None
progress3 = None
checks = None
while True:
read = (await proc.stdout.readline()).decode("utf-8", "ignore")
if read == "":
break
job.internal_data.setdefault("messages", [])
job.internal_data["messages"] = job.internal_data["messages"][-4:] + [read]
if "failed to open source object: path/restricted_content/" in read:
job.internal_data["dropbox__restricted_content"] = True
dropbox__restricted_content = True
result = cutter.notify(read)
if result:
await job.logs_fd_write(result.encode("utf-8", "ignore"))
if reg := RE_TRANSF1.search(read):
progress1 = int(reg.group("progress"))
transferred1 = reg.group("progress_1")
if reg := RE_TRANSF2.search(read):
progress2 = int(reg.group("progress"))
transferred2 = reg.group("progress_1") + reg.group("progress_2")
if reg := RE_CHECKS.search(read):
progress3 = int(reg.group("progress"))
checks = f'checks: {reg.group("checks")}'
progresses = list(filter(lambda v: v is not None, [progress1, progress2, progress3]))
if progresses:
job.set_progress(min(progresses), ', '.join(filter(None, [transferred1, transferred2, checks])))
finally:
result = cutter.flush()
if result:
await job.logs_fd_write(result.encode("utf-8", "ignore"))
if dropbox__restricted_content:
message = (
"Dropbox sync failed due to restricted content being present in one of the folders. This may include\n"
"copyrighted content or the DropBox manual PDF that appears in the home directory after signing up.\n"
"All other files were synchronized, but no deletions were performed as synchronization is considered\n"
"unsuccessful. Please inspect logs to determine which files are considered restricted and exclude them\n"
"from your synchronization. If you think that files are restricted erroneously, contact\n"
"Dropbox Support: https://www.dropbox.com/support\n"
)
job.internal_data["messages"] = [message]
await job.logs_fd_write(("\n" + message).encode("utf-8", "ignore"))
def rclone_encrypt_password(password):
key = bytes([0x9c, 0x93, 0x5b, 0x48, 0x73, 0x0a, 0x55, 0x4d,
0x6b, 0xfd, 0x7c, 0x63, 0xc8, 0x86, 0xa9, 0x2b,
0xd3, 0x90, 0x19, 0x8e, 0xb8, 0x12, 0x8a, 0xfb,
0xf4, 0xde, 0x16, 0x2b, 0x8b, 0x95, 0xf6, 0x38])
iv = Random.new().read(AES.block_size)
counter = Counter.new(128, initial_value=int(codecs.encode(iv, "hex"), 16))
cipher = AES.new(key, AES.MODE_CTR, counter=counter)
encrypted = iv + cipher.encrypt(password.encode("utf-8"))
return base64.urlsafe_b64encode(encrypted).decode("ascii").rstrip("=")
def get_dataset_recursive(datasets, directory):
datasets = [
dict(dataset, prefixlen=len(
os.path.dirname(os.path.commonprefix(
[dataset["properties"]["mountpoint"]["value"] + "/", directory + "/"]))
))
for dataset in datasets
if dataset["properties"]["mountpoint"]["value"] != "none"
]
dataset = sorted(
[
dataset
for dataset in datasets
if (directory + "/").startswith(dataset["properties"]["mountpoint"]["value"] + "/")
],
key=lambda dataset: dataset["prefixlen"],
reverse=True
)[0]
return dataset, any(
(ds["properties"]["mountpoint"]["value"] + "/").startswith(directory + "/")
for ds in datasets
if ds != dataset
)
class _FsLockCore(aiorwlock._RWLockCore):
def _release(self, lock_type):
if self._r_state == 0 and self._w_state == 0:
self._fs_manager._remove_lock(self._fs_path)
return super()._release(lock_type)
class _FsLock(aiorwlock.RWLock):
core = _FsLockCore
class FsLockDirection(enum.Enum):
READ = 0
WRITE = 1
class FsLockManager:
_lock = _FsLock
def __init__(self):
self.locks = {}
def lock(self, path, direction):
path = os.path.normpath(path)
for k in self.locks:
if os.path.commonpath([k, path]) in [k, path]:
return self._choose_lock(self.locks[k], direction)
self.locks[path] = self._lock()
self.locks[path]._reader_lock._lock._fs_manager = self
self.locks[path]._reader_lock._lock._fs_path = path
return self._choose_lock(self.locks[path], direction)
def _choose_lock(self, lock, direction):
if direction == FsLockDirection.READ:
return lock.reader_lock
if direction == FsLockDirection.WRITE:
return lock.writer_lock
raise ValueError(direction)
def _remove_lock(self, path):
self.locks.pop(path)
class CloudSyncTaskFailedAlertClass(AlertClass, OneShotAlertClass):
category = AlertCategory.TASKS
level = AlertLevel.ERROR
title = "Cloud Sync Task Failed"
text = "Cloud sync task \"%(name)s\" failed."
async def create(self, args):
return Alert(CloudSyncTaskFailedAlertClass, args, key=args["id"])
async def delete(self, alerts, query):
return list(filter(
lambda alert: alert.key != str(query),
alerts
))
async def load(self, alerts):
task_ids = {str(task["id"]) for task in await self.middleware.call("cloudsync.query")}
return [alert for alert in alerts if alert.key in task_ids]
def lsjson_error_excerpt(error):
excerpt = error.split("\n")[0]
excerpt = re.sub(r"^[0-9]{4}/[0-9]{2}/[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2} ", "", excerpt)
excerpt = excerpt.replace("Failed to create file system for \"remote:\": ", "")
excerpt = excerpt.replace("ERROR : : error listing: ", "")
return excerpt
class CloudCredentialModel(sa.Model):
__tablename__ = 'system_cloudcredentials'
id = sa.Column(sa.Integer(), primary_key=True)
name = sa.Column(sa.String(100))
provider = sa.Column(sa.String(50))
attributes = sa.Column(sa.JSON(encrypted=True))
class CredentialsService(CRUDService):
class Config:
namespace = "cloudsync.credentials"
datastore = "system.cloudcredentials"
cli_namespace = "task.cloud_sync.credential"
role_prefix = "CLOUD_SYNC"
entry = CloudCredentialEntry
@api_method(CloudCredentialVerifyArgs, CloudCredentialVerifyResult, roles=["CLOUD_SYNC_WRITE"])
async def verify(self, data):
"""
Verify if `attributes` provided for `provider` are authorized by the `provider`.
"""
await self.middleware.call("network.general.will_perform_activity", "cloud_sync")
data = dict(data, name="")
await self._validate("cloud_sync_credentials_create", data)
async with RcloneConfig({"credentials": data}) as config:
proc = await run(["rclone", "--config", config.config_path, "--contimeout", "15s", "--timeout", "30s",
"lsjson", "remote:"],
check=False, encoding="utf8")
if proc.returncode == 0:
return {"valid": True}
else:
return {"valid": False, "error": proc.stderr, "excerpt": lsjson_error_excerpt(proc.stderr)}
@api_method(CloudCredentialCreateArgs, CloudCredentialCreateResult)
async def do_create(self, data):
"""
Create Cloud Sync Credentials.
`attributes` is a dictionary of valid values which will be used to authorize with the `provider`.
"""
await self._validate("cloud_sync_credentials_create", data)
data["id"] = await self.middleware.call(
"datastore.insert",
"system.cloudcredentials",
data,
)
return data
@api_method(CloudCredentialUpdateArgs, CloudCredentialUpdateResult)
async def do_update(self, id_, data):
"""
Update Cloud Sync Credentials of `id`.
"""
old = await self.get_instance(id_)
new = old.copy()
new.update(data)
await self._validate("cloud_sync_credentials_update", new, id_)
await self.middleware.call(
"datastore.update",
"system.cloudcredentials",
id_,
new,
)
data["id"] = id_
return data
@api_method(CloudCredentialDeleteArgs, CloudCredentialDeleteResult)
async def do_delete(self, id_):
"""
Delete Cloud Sync Credentials of `id`.
"""
tasks = await self.middleware.call(
"cloudsync.query", [["credentials.id", "=", id_]], {"select": ["id", "credentials", "description"]}
)
if tasks:
raise CallError(f"This credential is used by cloud sync task {tasks[0]['description'] or tasks[0]['id']}")
return await self.middleware.call(
"datastore.delete",
"system.cloudcredentials",
id_,
)
async def _validate(self, schema_name, data, id_=None):
verrors = ValidationErrors()
await self._ensure_unique(verrors, schema_name, "name", data["name"], id_)
if data["provider"] not in REMOTES:
verrors.add(f"{schema_name}.provider", "Invalid provider")
else:
provider = REMOTES[data["provider"]]
attributes_verrors = validate_schema(provider.credentials_schema, data["attributes"])
verrors.add_child(f"{schema_name}.attributes", attributes_verrors)
verrors.check()
class CloudSyncModel(CloudTaskModelMixin, sa.Model):
__tablename__ = 'tasks_cloudsync'
direction = sa.Column(sa.String(10))
transfer_mode = sa.Column(sa.String(20))
encryption = sa.Column(sa.Boolean())
filename_encryption = sa.Column(sa.Boolean())
encryption_password = sa.Column(sa.EncryptedText())
encryption_salt = sa.Column(sa.EncryptedText())
create_empty_src_dirs = sa.Column(sa.Boolean())
follow_symlinks = sa.Column(sa.Boolean())
class CloudSyncService(TaskPathService, CloudTaskServiceMixin, TaskStateMixin):
local_fs_lock_manager = FsLockManager()
remote_fs_lock_manager = FsLockManager()
share_task_type = 'CloudSync'
allowed_path_types = [FSLocation.CLUSTER, FSLocation.LOCAL]
task_state_methods = ['cloudsync.sync', 'cloudsync.restore']
class Config:
datastore = "tasks.cloudsync"
datastore_extend = "cloudsync.extend"
datastore_extend_context = "cloudsync.extend_context"
cli_namespace = "task.cloud_sync"
role_prefix = "CLOUD_SYNC"
ENTRY = Patch(
'cloud_sync_create',
'cloud_sync_entry',
('add', Int('id')),
("replace", Dict("credentials", additional_attrs=True, private_keys=["attributes"])),
("add", Dict("job", additional_attrs=True, null=True)),
("add", Bool("locked")),
)
@private
async def extend_context(self, rows, extra):
return {
"task_state": await self.get_task_state_context(),
}
@private
async def extend(self, cloud_sync, context):
cloud_sync["credentials"] = cloud_sync.pop("credential")
if job := await self.get_task_state_job(context["task_state"], cloud_sync["id"]):
cloud_sync["job"] = job
Cron.convert_db_format_to_schedule(cloud_sync)
return cloud_sync
@private
async def _compress(self, cloud_sync):
cloud_sync["credential"] = cloud_sync.pop("credentials")
Cron.convert_schedule_to_db_format(cloud_sync)
cloud_sync.pop('job', None)
cloud_sync.pop(self.locked_field, None)
return cloud_sync
@private
async def _basic_validate(self, verrors, name, data):
if data["encryption"]:
if not data["encryption_password"]:
verrors.add(f"{name}.encryption_password", "This field is required when encryption is enabled")
await super()._basic_validate(verrors, name, data)
@private
async def _validate(self, app, verrors, name, data):
await super()._validate(app, verrors, name, data)
if data["snapshot"]:
if data["direction"] != "PUSH":
verrors.add(f"{name}.snapshot", "This option can only be enabled for PUSH tasks")
if data["transfer_mode"] == "MOVE":
verrors.add(f"{name}.snapshot", "This option can not be used for MOVE transfer mode")
@private
async def _validate_folder(self, verrors, name, data):
if data["direction"] == "PULL":
folder = data["attributes"]["folder"].rstrip("/")
if folder:
folder_parent = os.path.normpath(os.path.join(folder, ".."))
if folder_parent == ".":
folder_parent = ""
folder_basename = os.path.basename(folder)
ls = await self.list_directory(dict(
credentials=data["credentials"],
encryption=data["encryption"],
filename_encryption=data["filename_encryption"],
encryption_password=data["encryption_password"],
encryption_salt=data["encryption_salt"],
attributes=dict(data["attributes"], folder=folder_parent),
args=data["args"],
))
for item in ls:
if item["Name"] == folder_basename:
if not item["IsDir"]:
verrors.add(f"{name}.attributes.folder", "This is not a directory")
break
else:
verrors.add(f"{name}.attributes.folder", "Directory does not exist")
if data["direction"] == "PUSH":
credentials = await self._get_credentials(data["credentials"])
provider = REMOTES[credentials["provider"]]
if provider.readonly:
verrors.add(f"{name}.direction", "This remote is read-only")
@accepts(Dict(
"cloud_sync_create",
*cloud_task_schema,
Str("direction", enum=["PUSH", "PULL"], required=True),
Str("transfer_mode", enum=["SYNC", "COPY", "MOVE"], required=True),
Bool("encryption", default=False),
Bool("filename_encryption", default=False),
Password("encryption_password", default=""),
Str("encryption_salt", default=""),
Bool("create_empty_src_dirs", default=False),
Bool("follow_symlinks", default=False),
register=True,
))
@pass_app(rest=True)
async def do_create(self, app, cloud_sync):
"""
Creates a new cloud_sync entry.
.. examples(websocket)::
Create a new cloud_sync using amazon s3 attributes, which is supposed to run every hour.
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "cloudsync.create",
"params": [{
"description": "s3 sync",
"path": "/mnt/tank",
"credentials": 1,
"minute": "00",
"hour": "*",
"daymonth": "*",
"month": "*",
"attributes": {
"bucket": "mybucket",
"folder": ""
},
"enabled": true
}]
}
"""
verrors = ValidationErrors()
await self._validate(app, verrors, "cloud_sync_create", cloud_sync)
verrors.check()
await self._validate_folder(verrors, "cloud_sync_create", cloud_sync)
verrors.check()
cloud_sync = await self._compress(cloud_sync)
cloud_sync["id"] = await self.middleware.call("datastore.insert", "tasks.cloudsync", cloud_sync)
await self.middleware.call("service.restart", "cron")
return await self.get_instance(cloud_sync["id"])
@accepts(Int("id"), Patch("cloud_sync_create", "cloud_sync_update", ("attr", {"update": True})))
@pass_app(rest=True)
async def do_update(self, app, id_, data):
"""
Updates the cloud_sync entry `id` with `data`.
"""
cloud_sync = await self.get_instance(id_)
# credentials is a foreign key for now
if cloud_sync["credentials"]:
cloud_sync["credentials"] = cloud_sync["credentials"]["id"]
cloud_sync.update(data)
verrors = ValidationErrors()
await self._validate(app, verrors, "cloud_sync_update", cloud_sync)
verrors.check()
await self._validate_folder(verrors, "cloud_sync_update", cloud_sync)
verrors.check()
cloud_sync = await self._compress(cloud_sync)
await self.middleware.call("datastore.update", "tasks.cloudsync", id_, cloud_sync)
await self.middleware.call("service.restart", "cron")
return await self.get_instance(id_)
@accepts(Int("id"))
async def do_delete(self, id_):
"""
Deletes cloud_sync entry `id`.
"""
await self.middleware.call("cloudsync.abort", id_)
await self.middleware.call("alert.oneshot_delete", "CloudSyncTaskFailed", id_)
rv = await self.middleware.call("datastore.delete", "tasks.cloudsync", id_)
await self.middleware.call("service.restart", "cron")
return rv
@accepts(Int("credentials_id"), Str("name"), roles=["CLOUD_SYNC_WRITE"])
async def create_bucket(self, credentials_id, name):
"""
Creates a new bucket `name` using ` credentials_id`.
"""
credentials = await self._get_credentials(credentials_id)
if not credentials:
raise CallError("Invalid credentials")
provider = REMOTES[credentials["provider"]]
if not provider.can_create_bucket:
raise CallError("This provider can't create buckets")
try:
await provider.create_bucket(credentials, name)
except StorjIxError as e:
raise ValidationError("cloudsync.create_bucket", e.errmsg, e.errno)
@accepts(Int("credentials_id"), roles=["CLOUD_SYNC_WRITE"])
async def list_buckets(self, credentials_id):
credentials = await self._get_credentials(credentials_id)
if not credentials:
raise CallError("Invalid credentials")
provider = REMOTES[credentials["provider"]]
if not provider.buckets:
raise CallError("This provider does not use buckets")
if provider.custom_list_buckets:
return [
{
"Path": bucket["name"],
"Name": bucket["name"],
"Size": -1,
"MimeType": "inode/directory",
"ModTime": bucket["time"],
"IsDir": True,
"IsBucket": True,
"Enabled": bucket["enabled"],
}
for bucket in await provider.list_buckets(credentials)
]
return await self.ls({"credentials": credentials}, "")
@accepts(Dict(
"cloud_sync_ls",
Int("credentials", required=True),
Bool("encryption", default=False),
Bool("filename_encryption", default=False),
Str("encryption_password", default=""),
Str("encryption_salt", default=""),
Dict("attributes", required=True, additional_attrs=True),
Str("args", default=""),
), roles=["CLOUD_SYNC_WRITE"])
async def list_directory(self, cloud_sync):
"""
List contents of a remote bucket / directory.
If remote supports buckets, path is constructed by two keys "bucket"/"folder" in `attributes`.
If remote does not support buckets, path is constructed using "folder" key only in `attributes`.
"folder" is directory name and "bucket" is bucket name for remote.
Path examples:
S3 Service
`bucketname/directory/name`
Dropbox Service
`directory/name`
`credentials` is a valid id of a Cloud Sync Credential which will be used to connect to the provider.
"""
verrors = ValidationErrors()
await self._basic_validate(verrors, "cloud_sync", dict(cloud_sync))
verrors.check()
credentials = await self._get_credentials(cloud_sync["credentials"])
path = get_remote_path(REMOTES[credentials["provider"]], cloud_sync["attributes"])
return await self.ls(dict(cloud_sync, credentials=credentials), path)
@private
async def ls(self, config, path):
await self.middleware.call("network.general.will_perform_activity", "cloud_sync")
decrypt_filenames = config.get("encryption") and config.get("filename_encryption")
async with RcloneConfig(config) as config:
proc = await run(["rclone", "--config", config.config_path, "lsjson", "remote:" + path],
check=False, encoding="utf8", errors="ignore")
if proc.returncode == 0:
result = json.loads(proc.stdout)
for item in result:
item["Enabled"] = True
if decrypt_filenames:
if result:
decrypted_names = {}
proc = await run((["rclone", "--config", config.config_path, "cryptdecode", "encrypted:"] +
[item["Name"] for item in result]),
check=False, encoding="utf8", errors="ignore")
for line in proc.stdout.splitlines():
try:
encrypted, decrypted = line.rstrip("\r\n").split(" \t ", 1)
except ValueError:
continue
if decrypted != "Failed to decrypt":
decrypted_names[encrypted] = decrypted
for item in result:
if item["Name"] in decrypted_names:
item["Decrypted"] = decrypted_names[item["Name"]]
return result
else:
raise CallError(proc.stderr, extra={"excerpt": lsjson_error_excerpt(proc.stderr)})
@item_method
@accepts(
Int("id"),
Dict(
"cloud_sync_sync_options",
Bool("dry_run", default=False),
register=True,
),
roles=["CLOUD_SYNC_WRITE"],
)
@job(lock=lambda args: "cloud_sync:{}".format(args[-1]), lock_queue_size=1, logs=True, abortable=True)
async def sync(self, job, id_, options):
"""
Run the cloud_sync job `id`, syncing the local data to remote.
"""
cloud_sync = await self.get_instance(id_)
if cloud_sync["locked"]:
await self.middleware.call("cloudsync.generate_locked_alert", id_)
raise CallError("Dataset is locked")
await self._sync(cloud_sync, options, job)
@accepts(
Patch("cloud_sync_create", "cloud_sync_sync_onetime"),
Patch("cloud_sync_sync_options", "cloud_sync_sync_onetime_options"),
roles=["CLOUD_SYNC_WRITE"],
)
@job(logs=True, abortable=True)
async def sync_onetime(self, job, cloud_sync, options):
"""
Run cloud sync task without creating it.
"""
verrors = ValidationErrors()
# Forbid unprivileged users to execute scripts as root this way.
for k in ["pre_script", "post_script"]:
if cloud_sync[k]:
verrors.add(
f"cloud_sync_sync_onetime.{k}",
"This option may not be used for onetime cloud sync operations",
)
await self._validate(None, verrors, "cloud_sync_sync_onetime", cloud_sync)
verrors.check()
await self._validate_folder(verrors, "cloud_sync_sync_onetime", cloud_sync)
verrors.check()
cloud_sync["credentials"] = await self._get_credentials(cloud_sync["credentials"])
await self._sync(cloud_sync, options, job)
async def _sync(self, cloud_sync, options, job):
credentials = cloud_sync["credentials"]
local_path = cloud_sync["path"]
local_direction = FsLockDirection.READ if cloud_sync["direction"] == "PUSH" else FsLockDirection.WRITE
remote_path = get_remote_path(REMOTES[credentials["provider"]], cloud_sync["attributes"])
remote_direction = FsLockDirection.READ if cloud_sync["direction"] == "PULL" else FsLockDirection.WRITE
directions = {
FsLockDirection.READ: "reading",
FsLockDirection.WRITE: "writing",
}
job.set_progress(0, f"Locking local path {local_path!r} for {directions[local_direction]}")
async with self.local_fs_lock_manager.lock(local_path, local_direction):
job.set_progress(0, f"Locking remote path {remote_path!r} for {directions[remote_direction]}")
async with self.remote_fs_lock_manager.lock(f"{credentials['id']}/{remote_path}", remote_direction):
job.set_progress(0, "Starting")
try:
await rclone(self.middleware, job, cloud_sync, options["dry_run"])
if "id" in cloud_sync:
await self.middleware.call("alert.oneshot_delete", "CloudSyncTaskFailed", cloud_sync["id"])
except Exception:
if "id" in cloud_sync:
await self.middleware.call("alert.oneshot_create", "CloudSyncTaskFailed", {
"id": cloud_sync["id"],
"name": cloud_sync["description"],
})
raise
@item_method
@accepts(Int("id"), roles=["CLOUD_SYNC_WRITE"])
async def abort(self, id_):
"""
Aborts cloud sync task.
"""
cloud_sync = await self.get_instance(id_)
if cloud_sync["job"] is None:
return False
if cloud_sync["job"]["state"] not in ["WAITING", "RUNNING"]:
return False
await self.middleware.call("core.job_abort", cloud_sync["job"]["id"])
return True
@accepts(roles=["CLOUD_SYNC_READ"])
async def providers(self):
"""
Returns a list of dictionaries of supported providers for Cloud Sync Tasks.
`credentials_schema` is JSON schema for credentials attributes.
`task_schema` is JSON schema for task attributes.
`buckets` is a boolean value which is set to "true" if provider supports buckets.
Example of a single provider:
[
{
"name": "AMAZON_CLOUD_DRIVE",
"title": "Amazon Cloud Drive",
"credentials_schema": [
{
"property": "client_id",
"schema": {
"title": "Amazon Application Client ID",
"_required_": true,
"type": "string"
}
},
{
"property": "client_secret",
"schema": {
"title": "Application Key",
"_required_": true,
"type": "string"
}
}
],
"credentials_oauth": null,
"buckets": false,
"bucket_title": "Bucket",
"task_schema": []
}
]
"""
return sorted(
[
{
"name": provider.name,
"title": provider.title,
"credentials_schema": [
{
"property": field.name,
"schema": field.to_json_schema()
}
for field in provider.credentials_schema
],
"credentials_oauth": (
f"{OAUTH_URL}/{(provider.credentials_oauth_name or provider.name.lower())}"
if provider.credentials_oauth else None
),
"buckets": provider.buckets,
"bucket_title": provider.bucket_title,
"task_schema": [
{
"property": field.name,
"schema": field.to_json_schema()
}
for field in provider.task_schema + self._common_task_schema(provider)
],
}
for provider in REMOTES.values()
],
key=lambda provider: provider["title"].lower()
)
for cls in remote_classes:
for method_name in cls.extra_methods:
setattr(CloudSyncService, f"{cls.name.lower()}_{method_name}", getattr(cls, method_name))
class CloudSyncFSAttachmentDelegate(LockableFSAttachmentDelegate):
name = 'cloudsync'
title = 'CloudSync Task'
service_class = CloudSyncService
resource_name = 'path'
async def restart_reload_services(self, attachments):
await self.middleware.call('service.restart', 'cron')
async def setup(middleware):
await middleware.call('pool.dataset.register_attachment_delegate', CloudSyncFSAttachmentDelegate(middleware))
await middleware.call('network.general.register_activity', 'cloud_sync', 'Cloud sync')
await middleware.call('cloudsync.persist_task_state_on_job_complete')
| 45,460 | Python | .py | 975 | 34.628718 | 118 | 0.575449 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,483 | account.py | truenas_middleware/src/middlewared/middlewared/plugins/account.py | from collections import defaultdict
import errno
import glob
import json
import os
import pam
import shlex
import shutil
import stat
import wbclient
from pathlib import Path
from collections import defaultdict
from contextlib import suppress
from sqlalchemy.orm import relationship
from dataclasses import asdict
from middlewared.api import api_method
from middlewared.api.current import *
from middlewared.service import (
CallError, CRUDService, ValidationErrors, no_auth_required, no_authz_required, pass_app, private, filterable, job
)
from middlewared.service_exception import MatchNotFound
import middlewared.sqlalchemy as sa
from middlewared.utils import run, filter_list
from middlewared.utils.crypto import generate_nt_hash, sha512_crypt
from middlewared.utils.directoryservices.constants import DSType, DSStatus
from middlewared.utils.filesystem.copy import copytree, CopyTreeConfig
from middlewared.utils.nss import pwd, grp
from middlewared.utils.nss.nss_common import NssModule
from middlewared.utils.privilege import credential_has_full_admin, privileges_group_mapping
from middlewared.async_validators import check_path_resides_within_volume
from middlewared.utils.sid import db_id_to_rid, DomainRid
from middlewared.plugins.account_.constants import (
ADMIN_UID, ADMIN_GID, SKEL_PATH, DEFAULT_HOME_PATH, DEFAULT_HOME_PATHS
)
from middlewared.plugins.smb_.constants import SMBBuiltin
from middlewared.plugins.idmap_.idmap_constants import (
BASE_SYNTHETIC_DATASTORE_ID,
IDType,
)
from middlewared.plugins.idmap_ import idmap_winbind
from middlewared.plugins.idmap_ import idmap_sss
def pw_checkname(verrors, attribute, name):
"""
Makes sure the provided `name` is a valid unix name.
"""
if name.startswith('-'):
verrors.add(
attribute,
'Name must begin with an alphanumeric character and not a '
'"-".'
)
if name.find('$') not in (-1, len(name) - 1):
verrors.add(
attribute,
'The character $ is only allowed as the final character.'
)
invalid_chars = ' ,\t:+&#%^()!@~*?<>=|\\/"'
invalids = []
for char in name:
# invalid_chars nor 8-bit characters are allowed
if (
char in invalid_chars and char not in invalids
) or ord(char) & 0x80:
invalids.append(char)
if invalids:
verrors.add(
attribute,
f'name contains invalid characters: {", ".join(invalids)}'
)
def crypted_password(cleartext, algo='SHA512'):
if algo == 'SHA512':
return sha512_crypt(cleartext)
else:
raise ValueError(f'{algo} is unsupported')
def unixhash_is_valid(unixhash):
return unixhash not in ("x", "*")
def nt_password(cleartext):
return generate_nt_hash(cleartext)
def validate_sudo_commands(commands):
verrors = ValidationErrors()
if 'ALL' in commands and len(commands) != 1:
verrors.add(str(commands.index('ALL')), 'ALL cannot be used with other commands')
return verrors
for i, command in enumerate(commands):
try:
executable = shlex.split(command)[0]
if executable == 'ALL':
continue
if not executable.startswith('/'):
raise ValueError('Executable must be an absolute path')
if os.path.normpath(executable).rstrip('/') != executable.rstrip('/'):
raise ValueError('Executable path must be normalized')
paths = glob.glob(executable)
if not paths:
raise ValueError(f'No paths matching {executable!r} exist')
if not executable.endswith('/'):
for item in paths:
if os.path.isfile(item) and os.access(item, os.X_OK):
break
else:
raise ValueError(f'None of the paths matching {executable!r} is executable')
except ValueError as e:
verrors.add(f'{i}', str(e))
return verrors
def filters_include_ds_accounts(filters):
""" Check for filters limiting to local accounts """
for f in filters:
if len(f) < 3:
# OR -- assume evaluation for this will result in including DS
continue
# Directory services do not provide builtin accounts
# local explicitly denotes not directory service
if f[0] in ('local', 'builtin'):
match f[1]:
case '=':
if f[2] is True:
return False
case '!=':
if f[2] is False:
return False
case _:
pass
return True
class GroupMembershipModel(sa.Model):
__tablename__ = 'account_bsdgroupmembership'
id = sa.Column(sa.Integer(), primary_key=True)
bsdgrpmember_group_id = sa.Column(sa.Integer(), sa.ForeignKey("account_bsdgroups.id", ondelete="CASCADE"))
bsdgrpmember_user_id = sa.Column(sa.Integer(), sa.ForeignKey("account_bsdusers.id", ondelete="CASCADE"))
class UserModel(sa.Model):
__tablename__ = 'account_bsdusers'
id = sa.Column(sa.Integer(), primary_key=True)
bsdusr_uid = sa.Column(sa.Integer())
bsdusr_username = sa.Column(sa.String(16), default='User &', unique=True)
bsdusr_unixhash = sa.Column(sa.String(128), default='*')
bsdusr_smbhash = sa.Column(sa.EncryptedText(), default='*')
bsdusr_home = sa.Column(sa.String(255), default=DEFAULT_HOME_PATH)
bsdusr_shell = sa.Column(sa.String(120), default='/bin/csh')
bsdusr_full_name = sa.Column(sa.String(120))
bsdusr_builtin = sa.Column(sa.Boolean(), default=False)
bsdusr_smb = sa.Column(sa.Boolean(), default=True)
bsdusr_password_disabled = sa.Column(sa.Boolean(), default=False)
bsdusr_ssh_password_enabled = sa.Column(sa.Boolean(), default=False)
bsdusr_locked = sa.Column(sa.Boolean(), default=False)
bsdusr_sudo_commands = sa.Column(sa.JSON(list))
bsdusr_sudo_commands_nopasswd = sa.Column(sa.JSON(list))
bsdusr_group_id = sa.Column(sa.ForeignKey('account_bsdgroups.id'), index=True)
bsdusr_email = sa.Column(sa.String(254), nullable=True)
bsdusr_groups = relationship('GroupModel', secondary=lambda: GroupMembershipModel.__table__)
class UserService(CRUDService):
"""
Manage local users
"""
class Config:
datastore = 'account.bsdusers'
datastore_extend = 'user.user_extend'
datastore_extend_context = 'user.user_extend_context'
datastore_prefix = 'bsdusr_'
cli_namespace = 'account.user'
role_prefix = 'ACCOUNT'
entry = UserEntry
@private
async def user_extend_context(self, rows, extra):
group_roles = await self.middleware.call('group.query', [['local', '=', True]], {'select': ['id', 'roles']})
user_api_keys = defaultdict(list)
for key in await self.middleware.call('api_key.query'):
if not key['local']:
continue
user_api_keys[key['username']].append(key['id'])
return {
'server_sid': await self.middleware.call('smb.local_server_sid'),
'user_2fa_mapping': ({
entry['user']['id']: bool(entry['secret']) for entry in await self.middleware.call(
'datastore.query', 'account.twofactor_user_auth', [['user_id', '!=', None]]
)
}),
'user_api_keys': user_api_keys,
'roles_mapping': {i['id']: i['roles'] for i in group_roles}
}
@private
def _read_authorized_keys(self, homedir):
with suppress(FileNotFoundError):
with open(f'{homedir}/.ssh/authorized_keys') as f:
try:
return f.read().strip()
except UnicodeDecodeError:
self.logger.warning('Invalid encoding detected in authorized_keys file')
@private
async def user_extend(self, user, ctx):
user['groups'] = [g['id'] for g in user['groups']]
# Normalize email, empty is really null
if user['email'] == '':
user['email'] = None
# Get authorized keys
user['sshpubkey'] = await self.middleware.run_in_thread(self._read_authorized_keys, user['home'])
user['immutable'] = user['builtin'] or (user['uid'] == ADMIN_UID)
user['twofactor_auth_configured'] = bool(ctx['user_2fa_mapping'][user['id']])
user_roles = set()
for g in user['groups'] + [user['group']['id']]:
if not (entry := ctx['roles_mapping'].get(g)):
continue
user_roles |= set(entry)
if user['smb']:
sid = f'{ctx["server_sid"]}-{db_id_to_rid(IDType.USER, user["id"])}'
else:
sid = None
user.update({
'local': True,
'id_type_both': False,
'sid': sid,
'roles': list(user_roles),
'api_keys': ctx['user_api_keys'][user['username']]
})
return user
@private
def user_compress(self, user):
to_remove = [
'api_keys',
'local',
'id_type_both',
'sid',
'immutable',
'home_create',
'roles',
'twofactor_auth_configured',
]
for i in to_remove:
user.pop(i, None)
return user
async def query(self, filters, options):
"""
Query users with `query-filters` and `query-options`.
If users provided by Active Directory or LDAP are not desired, then
"local", "=", True should be added to filters.
"""
ds_users = []
options = options or {}
options['extend'] = self._config.datastore_extend
options['extend_context'] = self._config.datastore_extend_context
options['prefix'] = self._config.datastore_prefix
datastore_options = options.copy()
datastore_options.pop('count', None)
datastore_options.pop('get', None)
datastore_options.pop('limit', None)
datastore_options.pop('offset', None)
datastore_options.pop('select', None)
if filters_include_ds_accounts(filters):
ds = await self.middleware.call('directoryservices.status')
if ds['type'] is not None and ds['status'] == DSStatus.HEALTHY.name:
ds_users = await self.middleware.call(
'directoryservices.cache.query', 'USER', filters, options.copy()
)
match DSType(ds['type']):
case DSType.AD:
# For AD users, we will not have 2FA attribute normalized so let's do that
ad_users_2fa_mapping = await self.middleware.call('auth.twofactor.get_ad_users')
for index, user in enumerate(filter(
lambda u: not u['local'] and 'twofactor_auth_configured' not in u, ds_users)
):
ds_users[index]['twofactor_auth_configured'] = bool(ad_users_2fa_mapping.get(user['sid']))
case _:
# FIXME - map twofactor_auth_configured hint for LDAP users
pass
result = await self.middleware.call(
'datastore.query', self._config.datastore, [], datastore_options
)
return await self.middleware.run_in_thread(
filter_list, result + ds_users, filters, options
)
@private
def validate_homedir_mountinfo(self, verrors, schema, home_path):
sfs = self.middleware.call_sync('filesystem.statfs', home_path.as_posix())
if 'RO' in sfs['flags']:
verrors.add(f'{schema}.home', 'Path has the ZFS readonly property set.')
return False
if sfs['fstype'] != 'zfs':
verrors.add(f'{schema}.home', 'Path is not on a ZFS filesystem')
return False
return True
@private
def validate_homedir_path(self, verrors, schema, data, users):
p = Path(data['home'])
if not p.is_absolute():
verrors.add(f'{schema}.home', '"Home Directory" must be an absolute path.')
return False
if p.is_file():
verrors.add(f'{schema}.home', '"Home Directory" cannot be a file.')
return False
if ':' in data['home']:
verrors.add(f'{schema}.home', '"Home Directory" cannot contain colons (:).')
return False
if data['home'] in DEFAULT_HOME_PATHS:
return False
if not p.exists():
if data.get('home_create', False):
verrors.add(
f'{schema}.home',
f'{data["home"]}: path specified to use for home directory creation does not '
'exist. TrueNAS uses the provided path as the parent directory of the '
'newly-created home directory.'
)
else:
verrors.add(
f'{schema}.home',
f'{data["home"]}: path specified to use as home directory does not exist.'
)
if not p.parent.exists():
verrors.add(
f'{schema}.home',
f'{p.parent}: parent path of specified home directory does not exist.'
)
if not verrors:
self.validate_homedir_mountinfo(verrors, schema, p.parent)
elif self.validate_homedir_mountinfo(verrors, schema, p):
if self.middleware.call_sync('filesystem.is_immutable', data['home']):
verrors.add(
f'{schema}.home',
f'{data["home"]}: home directory path is immutable.'
)
in_use = filter_list(users, [('home', '=', data['home'])])
if in_use:
verrors.add(
f'{schema}.home',
f'{data["home"]}: homedir already used by {in_use[0]["username"]}.',
errno.EEXIST
)
if not data['home'].startswith('/mnt'):
verrors.add(
f'{schema}.home',
'"Home Directory" must begin with /mnt or set to '
f'{DEFAULT_HOME_PATH}.'
)
elif data['home'] in ('/mnt', '/mnt/'):
verrors.add(
f'{schema}.home',
'"Home Directory" cannot be at root of "/mnt"'
)
if verrors:
# if we're already going to error out, skip more expensive tests
return False
if not any(
data['home'] == i['path'] or data['home'].startswith(i['path'] + '/')
for i in self.middleware.call_sync('pool.query')
):
verrors.add(
f'{schema}.home',
f'The path for the home directory "({data["home"]})" '
'must include a volume or dataset.'
)
elif self.middleware.call_sync('pool.dataset.path_in_locked_datasets', data['home']):
verrors.add(
f'{schema}.home',
'Path component for "Home Directory" is currently encrypted and locked'
)
elif len(p.resolve().parents) == 2 and not data.get('home_create'):
verrors.add(
f'{schema}.home',
f'The specified path is a ZFS pool mountpoint "({data["home"]})".'
)
return p.exists() and not verrors
@private
def setup_homedir(self, path, username, mode, uid, gid, create=False):
homedir_created = False
if create:
target = os.path.join(path, username)
try:
# We do not raise exception on chmod error here because the
# target path may have RESTRICTED aclmode. Correct permissions
# get set in below `filesystem.setperm` call which strips ACL
# if present to strictly enforce `mode`.
self.middleware.call_sync('filesystem.mkdir', {
'path': target,
'options': {'mode': mode, 'raise_chmod_error': False}
})
except CallError as e:
if e.errno == errno.EEXIST and not os.path.isdir(target):
raise CallError(
'Path for home directory already '
'exists and is not a directory',
errno.EEXIST
)
except OSError as oe:
raise CallError(
'Failed to create the home directory '
f'({target}) for user: {oe}'
)
else:
homedir_created = True
else:
target = path
try:
setperm_job = self.middleware.call_sync('filesystem.setperm', {
'path': target,
'mode': mode,
'uid': uid,
'gid': gid,
'options': {'stripacl': True}
})
setperm_job.wait_sync(raise_error=True)
except Exception:
if homedir_created:
shutil.rmtree(target)
raise
return target
@api_method(UserCreateArgs, UserCreateResult, audit='Create user', audit_extended=lambda data: data['username'])
def do_create(self, data):
"""
Create a new user.
"""
verrors = ValidationErrors()
if (
not data.get('group') and not data.get('group_create')
) or (
data.get('group') is not None and data.get('group_create')
):
verrors.add(
'user_create.group',
'Enter either a group name or create a new group to '
'continue.',
errno.EINVAL
)
group_ids = []
if data.get('group'):
group_ids.append(data['group'])
if data.get('groups'):
group_ids.extend(data['groups'])
self.middleware.call_sync('user.common_validation', verrors, data, 'user_create', group_ids)
if data.get('sshpubkey') and not data['home'].startswith('/mnt'):
verrors.add(
'user_create.sshpubkey',
'The home directory is not writable. Leave this field blank.'
)
verrors.check()
create = data.pop('group_create')
group_created = False
if create:
group = self.middleware.call_sync('group.query', [
('group', '=', data['username']),
('local', '=', True)
])
if group:
group = group[0]
else:
group = self.middleware.call_sync('group.create_internal', {
'name': data['username'],
'smb': False,
'sudo_commands': [],
'sudo_commands_nopasswd': [],
}, False)
group = self.middleware.call_sync('group.query', [
('id', '=', group), ('local', '=', True)
])[0]
group_created = True
data['group'] = group['id']
else:
group = self.middleware.call_sync('group.query', [('id', '=', data['group'])])
if not group:
raise CallError(f'Group {data["group"]} not found')
group = group[0]
if data['smb']:
data['groups'].append((self.middleware.call_sync(
'group.query', [('group', '=', 'builtin_users'), ('local', '=', True)], {'get': True},
))['id'])
if data.get('uid') is None:
data['uid'] = self.middleware.call_sync('user.get_next_uid')
new_homedir = False
home_mode = data.pop('home_mode')
if data['home'] and data['home'] not in DEFAULT_HOME_PATHS:
try:
data['home'] = self.setup_homedir(
data['home'],
data['username'],
home_mode,
data['uid'],
group['gid'],
data['home_create']
)
except Exception:
# Homedir setup failed, we should remove any auto-generated group
if group_created:
self.middleware.call_sync('group.delete', data['group'])
raise
pk = None # Make sure pk exists to rollback in case of an error
data = self.user_compress(data)
try:
self.__set_password(data)
sshpubkey = data.pop('sshpubkey', None) # datastore does not have sshpubkey
pk = self.middleware.call_sync('datastore.insert', 'account.bsdusers', data, {'prefix': 'bsdusr_'})
self.middleware.call_sync(
'datastore.insert', 'account.twofactor_user_auth', {
'secret': None,
'user': pk,
}
)
except Exception:
if pk is not None:
self.middleware.call_sync('datastore.delete', 'account.bsdusers', pk)
if new_homedir:
# Be as atomic as possible when creating the user if
# commands failed to execute cleanly.
shutil.rmtree(data['home'])
raise
self.middleware.call_sync('service.reload', 'ssh')
self.middleware.call_sync('service.reload', 'user')
if data['smb']:
self.middleware.call_sync('smb.update_passdb_user', data | {'id': pk})
if os.path.isdir(SKEL_PATH) and os.path.exists(data['home']) and data['home'] not in DEFAULT_HOME_PATHS:
for f in os.listdir(SKEL_PATH):
if f.startswith('dot'):
dest_file = os.path.join(data['home'], f[3:])
else:
dest_file = os.path.join(data['home'], f)
if not os.path.exists(dest_file):
shutil.copyfile(os.path.join(SKEL_PATH, f), dest_file)
chown_job = self.middleware.call_sync('filesystem.chown', {
'path': dest_file,
'uid': data['uid'],
'gid': group['gid'],
})
chown_job.wait_sync()
data['sshpubkey'] = sshpubkey
try:
self.update_sshpubkey(data['home'], data, group['group'])
except PermissionError as e:
self.logger.warn('Failed to update authorized keys', exc_info=True)
raise CallError(f'Failed to update authorized keys: {e}')
return pk
@api_method(UserUpdateArgs, UserUpdateResult, audit='Update user', audit_callback=True)
@pass_app()
def do_update(self, app, audit_callback, pk, data):
"""
Update attributes of an existing user.
"""
if pk > BASE_SYNTHETIC_DATASTORE_ID:
# datastore ids for directory services are created by adding the
# posix ID to a base value so that we can use getpwuid / getgrgid to
# convert back to a username / group name
try:
username = self.middleware.call_sync(
'user.get_user_obj', {'uid': pk - BASE_SYNTHETIC_DATASTORE_ID}
)['pw_name']
except KeyError:
username = 'UNKNOWN'
audit_callback(username)
raise CallError(
'Users provided by a directory service must be modified through the identity provider '
'(LDAP server or domain controller).', errno.EPERM
)
user = self.middleware.call_sync('user.get_instance', pk)
audit_callback(user['username'])
if app and app.authenticated_credentials.is_user_session:
same_user_logged_in = user['username'] == (self.middleware.call_sync('auth.me', app=app))['pw_name']
else:
same_user_logged_in = False
verrors = ValidationErrors()
if data.get('password_disabled'):
try:
self.middleware.call_sync('privilege.before_user_password_disable', user)
except CallError as e:
verrors.add('user_update.password_disabled', e.errmsg)
if 'group' in data:
group = self.middleware.call_sync('datastore.query', 'account.bsdgroups', [
('id', '=', data['group'])
])
if not group:
verrors.add('user_update.group', f'Group {data["group"]} not found', errno.ENOENT)
group = group[0]
else:
group = user['group']
user['group'] = group['id']
if same_user_logged_in and (
self.middleware.call_sync('auth.twofactor.config')
)['enabled'] and not user['twofactor_auth_configured'] and not data.get('renew_twofactor_secret'):
verrors.add(
'user_update.renew_twofactor_secret',
'Two-factor authentication is enabled globally but not configured for this user.'
)
group_ids = [group['id']]
if data.get('groups'):
group_ids.extend(data['groups'])
else:
group_ids.extend(user['groups'])
self.middleware.call_sync('user.common_validation', verrors, data, 'user_update', group_ids, user)
try:
st = os.stat(user.get("home", DEFAULT_HOME_PATH)).st_mode
old_mode = f'{stat.S_IMODE(st):03o}'
except FileNotFoundError:
old_mode = None
home = data.get('home') or user['home']
had_home = user['home'] not in DEFAULT_HOME_PATHS
has_home = home not in DEFAULT_HOME_PATHS
# root user and admin users are an exception to the rule
if data.get('sshpubkey'):
if not (
user['uid'] in [0, ADMIN_UID] or
self.middleware.call_sync('filesystem.is_dataset_path', home)
):
verrors.add('user_update.sshpubkey', 'Home directory is not writable, leave this blank"')
# Do not allow attributes to be changed for builtin user
if user['immutable']:
if 'home_mode' in data:
verrors.add('user_update.home_mode', 'This attribute cannot be changed')
for i in ('group', 'home', 'username', 'smb'):
if i in data and data[i] != user[i]:
verrors.add(f'user_update.{i}', 'This attribute cannot be changed')
if not user['smb'] and data.get('smb') and not data.get('password'):
# Changing from non-smb user to smb user requires re-entering password.
verrors.add('user_update.smb',
'Password must be changed in order to enable SMB authentication')
verrors.check()
must_change_pdb_entry = False
for k in ('username', 'password', 'locked'):
new_val = data.get(k)
old_val = user.get(k)
if new_val is not None and old_val != new_val:
if k == 'username':
try:
self.middleware.call_sync("smb.remove_passdb_user", old_val, user['sid'])
except Exception:
self.logger.debug("Failed to remove passdb entry for user [%s]",
old_val, exc_info=True)
must_change_pdb_entry = True
if user['smb'] is True and data.get('smb') is False:
try:
must_change_pdb_entry = False
self.middleware.call_sync("smb.remove_passdb_user", user['username'], user['sid'])
except Exception:
self.logger.debug("Failed to remove passdb entry for user [%s]",
user['username'], exc_info=True)
if user['smb'] is False and data.get('smb') is True:
must_change_pdb_entry = True
# Copy the home directory if it changed
home_copy = False
home_old = None
if has_home and 'home' in data:
if data.get('home_create', False):
data['home'] = os.path.join(data['home'], data.get('username') or user['username'])
if had_home and user['home'] != data['home']:
home_copy = True
home_old = user['home']
# After this point user dict has values from data
user.update(data)
mode_to_set = user.get('home_mode')
if not mode_to_set:
mode_to_set = '700' if old_mode is None else old_mode
# squelch any potential problems when this occurs
if has_home:
self.middleware.call_sync('user.recreate_homedir_if_not_exists', user, group, mode_to_set)
home_mode = user.pop('home_mode', None)
if user['immutable']:
home_mode = None
try:
update_sshpubkey_args = [
home_old if home_copy else user['home'], user, group['bsdgrp_group'],
]
self.update_sshpubkey(*update_sshpubkey_args)
except PermissionError as e:
self.logger.warn('Failed to update authorized keys', exc_info=True)
raise CallError(f'Failed to update authorized keys: {e}')
else:
if user['uid'] == 0:
if self.middleware.call_sync('failover.licensed'):
try:
self.middleware.call_sync(
'failover.call_remote', 'user.update_sshpubkey', update_sshpubkey_args
)
except Exception:
self.logger.error('Failed to sync root ssh pubkey to standby node', exc_info=True)
if home_copy:
"""
Background copy of user home directory to new path as the user in question.
"""
self.middleware.call_sync(
'user.do_home_copy', home_old, user['home'], user['username'], home_mode, user['uid']
)
elif has_home and home_mode is not None:
"""
A non-recursive call to set permissions should return almost immediately.
"""
perm_job = self.middleware.call_sync('filesystem.setperm', {
'path': user['home'],
'mode': home_mode,
'options': {'stripacl': True},
})
perm_job.wait_sync()
user.pop('sshpubkey', None)
self.__set_password(user)
user = self.user_compress(user)
self.middleware.call_sync('datastore.update', 'account.bsdusers', pk, user, {'prefix': 'bsdusr_'})
self.middleware.call_sync('service.reload', 'ssh')
self.middleware.call_sync('service.reload', 'user')
if user['smb'] and must_change_pdb_entry:
self.middleware.call_sync('smb.update_passdb_user', user)
return pk
@private
def recreate_homedir_if_not_exists(self, user, group, mode):
# sigh, nothing is stopping someone from removing the homedir
# from the CLI so recreate the original directory in this case
if not os.path.isdir(user['home']):
if os.path.exists(user['home']):
raise CallError(f'{user["home"]!r} already exists and is not a directory')
self.logger.debug('Homedir %r for %r does not exist so recreating it', user['home'], user['username'])
try:
os.makedirs(user['home'])
except Exception:
raise CallError(f'Failed recreating "{user["home"]}"')
else:
self.middleware.call_sync('filesystem.setperm', {
'path': user['home'],
'uid': user['uid'],
'gid': group['bsdgrp_gid'],
'mode': mode,
'options': {'stripacl': True},
}).wait_sync(raise_error=True)
@api_method(UserDeleteArgs, UserDeleteResult, audit='Delete user', audit_callback=True)
def do_delete(self, audit_callback, pk, options):
"""
Delete user `id`.
The `delete_group` option deletes the user primary group if it is not being used by
any other user.
"""
if pk > BASE_SYNTHETIC_DATASTORE_ID:
# datastore ids for directory services are created by adding the
# posix ID to a base value so that we can use getpwuid / getgrgid to
# convert back to a username / group name
try:
username = self.middleware.call_sync(
'user.get_user_obj', {'uid': pk - BASE_SYNTHETIC_DATASTORE_ID}
)['pw_name']
except KeyError:
username = 'UNKNOWN'
audit_callback(username)
raise CallError(
'Users provided by a directory service must be deleted from the identity provider '
'(LDAP server or domain controller).', errno.EPERM
)
user = self.middleware.call_sync('user.get_instance', pk)
audit_callback(user['username'])
if user['builtin']:
raise CallError('Cannot delete a built-in user', errno.EINVAL)
if user['immutable']:
raise CallError('Cannot delete an immutable user', errno.EINVAL)
self.middleware.call_sync('privilege.before_user_delete', user)
if options['delete_group'] and not user['group']['bsdgrp_builtin']:
count = self.middleware.call_sync(
'datastore.query', 'account.bsdgroupmembership',
[('group', '=', user['group']['id'])], {'prefix': 'bsdgrpmember_', 'count': True}
)
count2 = self.middleware.call_sync(
'datastore.query', 'account.bsdusers',
[('group', '=', user['group']['id']), ('id', '!=', pk)], {'prefix': 'bsdusr_', 'count': True}
)
if count == 0 and count2 == 0:
try:
self.middleware.call_sync('group.delete', user['group']['id'])
except Exception:
self.logger.warn(f'Failed to delete primary group of {user["username"]}', exc_info=True)
if user['home'] and user['home'] not in DEFAULT_HOME_PATHS:
try:
shutil.rmtree(os.path.join(user['home'], '.ssh'))
except Exception:
pass
if user['smb']:
self.middleware.call_sync('smb.remove_passdb_user', user['username'], user['sid'])
# TODO: add a hook in CIFS service
cifs = self.middleware.call_sync('datastore.query', 'services.cifs', [], {'prefix': 'cifs_srv_'})
if cifs:
cifs = cifs[0]
if cifs['guest'] == user['username']:
self.middleware.call_sync(
'datastore.update', 'services.cifs', cifs['id'], {'guest': 'nobody'}, {'prefix': 'cifs_srv_'}
)
if attributes := self.middleware.call_sync('datastore.query', 'account.bsdusers_webui_attribute',
[['uid', '=', user['uid']]]):
self.middleware.call_sync('datastore.delete', 'account.bsdusers_webui_attribute', attributes[0]['id'])
self.middleware.call_sync('datastore.delete', 'account.bsdusers', pk)
self.middleware.call_sync('service.reload', 'ssh')
self.middleware.call_sync('service.reload', 'user')
try:
self.middleware.call_sync('idmap.gencache.del_idmap_cache_entry', {
'entry_type': 'UID2SID',
'entry': user['uid']
})
except MatchNotFound:
pass
return pk
@api_method(UserShellChoicesArgs, UserShellChoicesResult)
def shell_choices(self, group_ids):
"""
Return the available shell choices to be used in `user.create` and `user.update`.
`group_ids` is a list of local group IDs for the user.
"""
group_ids = {
g["gid"] for g in self.middleware.call_sync(
"datastore.query",
"account.bsdgroups",
[("id", "in", group_ids)],
{"prefix": "bsdgrp_"},
)
}
shells = {
'/usr/sbin/nologin': 'nologin',
}
if self.middleware.call_sync('privilege.privileges_for_groups', 'local_groups', group_ids):
shells.update(**{
'/usr/bin/cli': 'TrueNAS CLI', # installed via midcli
'/usr/bin/cli_console': 'TrueNAS Console', # installed via midcli
})
with open('/etc/shells') as f:
for shell in filter(lambda x: x.startswith('/usr/bin'), f):
# on scale /etc/shells has duplicate entries like (/bin/sh, /usr/bin/sh) (/bin/bash, /usr/bin/bash) etc.
# The entries that point to the same basename are the same binary.
# The /usr/bin/ path is the "newer" place to put binaries so we'll use those entries.
shell = shell.strip()
shells[shell] = os.path.basename(shell)
return shells
@api_method(UserGetUserObjArgs, UserGetUserObjResult, roles=['ACCOUNT_READ'])
def get_user_obj(self, data):
"""
Returns dictionary containing information from struct passwd for the user specified by either
the username or uid. Bypasses user cache.
NOTE: results will not include nested groups for Active Directory users.
"""
verrors = ValidationErrors()
if not data['username'] and data['uid'] is None:
verrors.add('get_user_obj.username', 'Either "username" or "uid" must be specified.')
if data['username'] and data['uid'] is not None:
verrors.add('get_user_obj.username', '"username" and "uid" may not be simultaneously specified')
verrors.check()
# NOTE: per request from UI team we are overriding default library
# KeyError message with a clearer one
#
# Many callers to user.get_user_obj may be catching KeyError and so
# changing exception type is something that should be approached
# carefully.
if data['username']:
try:
user_obj = pwd.getpwnam(data['username'], as_dict=True)
except KeyError:
raise KeyError(f'{data["username"]}: user with this name does not exist') from None
else:
try:
user_obj = pwd.getpwuid(data['uid'], as_dict=True)
except KeyError:
raise KeyError(f'{data["uid"]}: user with this id does not exist') from None
match user_obj['source']:
case NssModule.FILES.name:
user_obj['source'] = 'LOCAL'
case NssModule.WINBIND.name:
user_obj['source'] = 'ACTIVEDIRECTORY'
case NssModule.SSS.name:
user_obj['source'] = 'LDAP'
case _:
self.logger.error('%s: unknown ID source.', user_obj['source'])
raise ValueError(f'{user_obj["source"]}: unknown ID source. Please file a bug report.')
user_obj['local'] = user_obj['source'] == 'LOCAL'
if data['get_groups']:
user_obj['grouplist'] = os.getgrouplist(user_obj['pw_name'], user_obj['pw_gid'])
else:
user_obj['grouplist'] = None
if data['sid_info']:
sid = None
match user_obj['source']:
case 'LOCAL':
idmap_ctx = None
db_entry = self.middleware.call_sync('user.query', [
['username', '=', user_obj['pw_name']],
['local', '=', True]
], {'select': ['sid']})
if not db_entry:
self.logger.error(
'%s: local user exists on server but does not exist in the '
'the user account table.', user_obj['pw_name']
)
else:
sid = db_entry[0]['sid']
case 'ACTIVEDIRECTORY':
# winbind provides idmapping for AD users
try:
idmap_ctx = idmap_winbind.WBClient()
except wbclient.WBCError as e:
if e.error_code != wbclient.WBC_ERR_WINBIND_NOT_AVAILABLE:
self.logger.error('Failed to retrieve SID for uid: %d',
user_obj['pw_uid'], exc_info=True)
idmap_ctx = None
case 'LDAP':
# SSSD provides ID mapping for IPA domains
idmap_ctx = idmap_sss.SSSClient()
case _:
self.logger.error('%s: unknown ID source.', user_obj['source'])
raise ValueError(f'{user_obj["source"]}: unknown ID source. Please file a bug report.')
if idmap_ctx is not None:
try:
sid = idmap_ctx.uidgid_to_idmap_entry({
'id_type': 'USER',
'id': user_obj['pw_uid']
})['sid']
except MatchNotFound:
# This is a more odd situation. Most likely case is that the user account exists
# in IPA but doesn't have a SID assigned to it. All AD users have SIDs.
sid = None
user_obj['sid'] = sid
else:
user_obj['sid'] = None
return user_obj
@api_method(UserGetNextUidArgs, UserGetNextUidResult, roles=['ACCOUNT_READ'])
async def get_next_uid(self):
"""
Get the next available/free uid.
"""
# We want to create new users from 3000 to avoid potential conflicts - Reference: NAS-117892
last_uid = 2999
builtins = await self.middleware.call(
'datastore.query', 'account.bsdusers',
[('builtin', '=', False)], {'order_by': ['uid'], 'prefix': 'bsdusr_'}
)
for i in builtins:
# If the difference between the last uid and the current one is
# bigger than 1, it means we have a gap and can use it.
if i['uid'] - last_uid > 1:
return last_uid + 1
if i['uid'] > last_uid:
last_uid = i['uid']
return last_uid + 1
@no_auth_required
@api_method(UserHasLocalAdministratorSetUpArgs, UserHasLocalAdministratorSetUpResult)
async def has_local_administrator_set_up(self):
"""
Return whether a local administrator with a valid password exists.
This is used when the system is installed without a password and must be set on
first use/login.
"""
return len(await self.middleware.call('privilege.local_administrators')) > 0
@no_auth_required
@api_method(UserSetupLocalAdministratorArgs, UserSetupLocalAdministratorResult, audit='Set up local administrator')
@pass_app()
async def setup_local_administrator(self, app, username, password, options):
"""
Set up local administrator (this method does not require authentication if local administrator is not already
set up).
"""
if await self.middleware.call('user.has_local_administrator_set_up'):
raise CallError('Local administrator is already set up', errno.EEXIST)
if username == 'truenas_admin':
# first check based on NSS to catch collisions with AD / LDAP users
try:
pwd_obj = await self.middleware.call('user.get_user_obj', {'uid': ADMIN_UID})
raise CallError(
f'A {pwd_obj["source"].lower()} user with uid={ADMIN_UID} already exists, '
'setting up local administrator is not possible',
errno.EEXIST,
)
except KeyError:
pass
try:
pwd_obj = await self.middleware.call('user.get_user_obj', {'username': username})
raise CallError(f'{username!r} {pwd_obj["source"].lower()} user already exists, '
'setting up local administrator is not possible',
errno.EEXIST)
except KeyError:
pass
try:
grp_obj = await self.middleware.call('group.get_group_obj', {'gid': ADMIN_GID})
raise CallError(
f'A {grp_obj["source"].lower()} group with gid={ADMIN_GID} already exists, '
'setting up local administrator is not possible',
errno.EEXIST,
)
except KeyError:
pass
try:
grp_obj = await self.middleware.call('group.get_group_obj', {'groupname': username})
raise CallError(f'{username!r} {grp_obj["source"].lower()} group already exists, '
'setting up local administrator is not possible',
errno.EEXIST)
except KeyError:
pass
# double-check our database in case we have for some reason failed to write to passwd
local_users = await self.middleware.call('user.query', [['local', '=', True]])
local_groups = await self.middleware.call('group.query', [['local', '=', True]])
if filter_list(local_users, [['uid', '=', ADMIN_UID]]):
raise CallError(
f'A user with uid={ADMIN_UID} already exists, setting up local administrator is not possible',
errno.EEXIST,
)
if filter_list(local_users, [['username', '=', username]]):
raise CallError(f'{username!r} user already exists, setting up local administrator is not possible',
errno.EEXIST)
if filter_list(local_groups, [['gid', '=', ADMIN_GID]]):
raise CallError(
f'A group with gid={ADMIN_GID} already exists, setting up local administrator is not possible',
errno.EEXIST,
)
if filter_list(local_groups, [['group', '=', username]]):
raise CallError(f'{username!r} group already exists, setting up local administrator is not possible',
errno.EEXIST)
await run('truenas-set-authentication-method.py', check=True, encoding='utf-8', errors='ignore',
input=json.dumps({'username': username, 'password': password}))
await self.middleware.call('failover.datastore.force_send')
await self.middleware.call('etc.generate', 'user')
@private
@job(lock=lambda args: f'copy_home_to_{args[1]}')
def do_home_copy(self, job, home_old, home_new, username, new_mode, uid):
if home_old in DEFAULT_HOME_PATH:
return
# We need to set permission and strip ACL first before copying files
if new_mode is not None:
perm_job = self.middleware.call_sync('filesystem.setperm', {
'uid': uid,
'path': home_new,
'mode': new_mode,
'options': {'stripacl': True},
})
else:
current_mode = stat.S_IMODE(self.middleware.call_sync('filesystem.stat', home_old)['mode'])
perm_job = self.middleware.call_sync('filesystem.setperm', {
'uid': uid,
'path': home_new,
'mode': f'{current_mode:03o}',
'options': {'stripacl': True},
})
perm_job.wait_sync()
return asdict(copytree(home_old, home_new, CopyTreeConfig(exist_ok=True, job=job)))
@private
async def common_validation(self, verrors, data, schema, group_ids, old=None):
exclude_filter = [('id', '!=', old['id'])] if old else []
combined = data if not old else old | data
users = await self.middleware.call(
'datastore.query',
'account.bsdusers',
exclude_filter,
{'prefix': 'bsdusr_'}
)
if data.get('uid') is not None:
try:
existing_user = await self.middleware.call(
'user.get_user_obj',
{'uid': data['uid']},
)
except KeyError:
pass
else:
verrors.add(
f'{schema}.uid',
f'Uid {data["uid"]} is already used (user {existing_user["pw_name"]} has it)',
errno.EEXIST,
)
if 'username' in data:
pw_checkname(verrors, f'{schema}.username', data['username'])
if filter_list(users, [('username', '=', data['username'])]):
verrors.add(
f'{schema}.username',
f'The username "{data["username"]}" already exists.',
errno.EEXIST
)
if data.get('smb'):
if filter_list(users, [['username', 'C=', data['username']], ['smb', '=', True]]):
verrors.add(
f'{schema}.smb',
f'Username "{data["username"]}" conflicts with existing SMB user. Note that SMB '
f'usernames are case-insensitive.',
errno.EEXIST,
)
if combined['smb'] and not await self.middleware.call('smb.is_configured'):
if (await self.middleware.call('systemdataset.sysdataset_path')) is None:
verrors.add(
f'{schema}.smb',
'System dataset is not mounted at expected path. This may indicate '
'an underlying issue with the pool hosting the system dataset. '
'SMB users may not be configured until this configuration issue is addressed.'
)
else:
verrors.add(
f'{schema}.smb',
'SMB users may not be configured while SMB service backend is unitialized.'
)
if combined['smb'] and combined['password_disabled']:
verrors.add(
f'{schema}.password_disabled', 'Password authentication may not be disabled for SMB users.'
)
password = data.get('password')
if not old and not password and not data.get('password_disabled'):
verrors.add(f'{schema}.password', 'Password is required')
elif data.get('password_disabled') and password:
verrors.add(
f'{schema}.password_disabled',
'Leave "Password" blank when "Disable password login" is checked.'
)
if 'home' in data:
if await self.middleware.run_in_thread(self.validate_homedir_path, verrors, schema, data, users):
await check_path_resides_within_volume(verrors, self.middleware, schema, data['home'])
if 'home_mode' in data:
try:
o = int(data['home_mode'], 8)
assert o & 0o777 == o
if o & (stat.S_IRUSR) == 0:
verrors.add(
f'{schema}.home_mode',
'Home directory must be readable by User.'
)
if o & (stat.S_IXUSR) == 0:
verrors.add(
f'{schema}.home_mode',
'Home directory must be executable by User.'
)
except (AssertionError, ValueError, TypeError):
verrors.add(
f'{schema}.home_mode',
'Please provide a valid value for home_mode attribute'
)
if 'groups' in data:
groups = data.get('groups') or []
if groups and len(groups) > 64:
verrors.add(
f'{schema}.groups',
'A user cannot belong to more than 64 auxiliary groups.'
)
if 'full_name' in data and ':' in data['full_name']:
verrors.add(
f'{schema}.full_name',
'The ":" character is not allowed in a "Full Name".'
)
if 'full_name' in data and '\n' in data['full_name']:
verrors.add(
f'{schema}.full_name',
'The "\\n" character is not allowed in a "Full Name".'
)
if 'shell' in data and data['shell'] not in await self.middleware.call('user.shell_choices', group_ids):
verrors.add(
f'{schema}.shell', 'Please select a valid shell.'
)
if 'sudo_commands' in data:
verrors.add_child(
f'{schema}.sudo_commands',
await self.middleware.run_in_thread(validate_sudo_commands, data['sudo_commands']),
)
if 'sudo_commands_nopasswd' in data:
verrors.add_child(
f'{schema}.sudo_commands_nopasswd',
await self.middleware.run_in_thread(validate_sudo_commands, data['sudo_commands_nopasswd']),
)
def __set_password(self, data):
if 'password' not in data:
return
password = data.pop('password')
if password:
data['unixhash'] = crypted_password(password)
data['smbhash'] = nt_password(password)
else:
data['unixhash'] = '*'
data['smbhash'] = '*'
return data
@private
def update_sshpubkey(self, homedir, user, group):
if 'sshpubkey' not in user:
return
if not os.path.isdir(homedir):
return
sshpath = f'{homedir}/.ssh'
keysfile = f'{sshpath}/authorized_keys'
gid = -1
pubkey = user.get('sshpubkey') or ''
pubkey = pubkey.strip()
if pubkey == '':
try:
os.unlink(keysfile)
except OSError:
pass
return
oldpubkey = ''
try:
with open(keysfile, 'r') as f:
oldpubkey = f.read().strip()
except Exception:
pass
if pubkey == oldpubkey:
return
if not os.path.isdir(sshpath):
# Since this is security sensitive, we allow raising exception here
# if mode fails to be set to 0o700
self.middleware.call_sync('filesystem.mkdir', {'path': sshpath, 'options': {'mode': '700'}})
if not os.path.isdir(sshpath):
raise CallError(f'{sshpath} is not a directory')
# Make extra sure to enforce correct mode on .ssh directory.
# stripping the ACL will allow subsequent chmod calls to succeed even if
# dataset aclmode is restricted.
try:
gid = self.middleware.call_sync('group.get_group_obj', {'groupname': group})['gr_gid']
except Exception:
# leaving gid at -1 avoids altering the GID value.
self.logger.debug("Failed to convert %s to gid", group, exc_info=True)
self.middleware.call_sync('filesystem.setperm', {
'path': sshpath,
'mode': str(700),
'uid': user['uid'],
'gid': gid,
'options': {'recursive': True, 'stripacl': True}
}).wait_sync(raise_error=True)
with open(keysfile, 'w') as f:
os.fchmod(f.fileno(), 0o600)
os.fchown(f.fileno(), user['uid'], gid)
f.write(f'{pubkey}\n')
@no_authz_required
@api_method(UserSetPasswordArgs, UserSetPasswordResult,
audit='Set account password', audit_extended=lambda data: data['username'])
@pass_app(require=True)
async def set_password(self, app, data):
"""
Set the password of the specified `username` to the `new_password`
specified in payload.
ValidationErrors will be raised in the following situations:
* username does not exist
* account is not local to the NAS (Active Directory, LDAP, etc)
* account has password authentication disabled
* account is locked
NOTE: when authenticated session has less than FULL_ADMIN role,
password changes will be rejected if the payload does not match the
currently-authenticated user.
API keys granting access to this endpoint will be able to reset
the password of any user.
"""
verrors = ValidationErrors()
is_full_admin = credential_has_full_admin(app.authenticated_credentials)
authenticated_user = None
if app.authenticated_credentials.is_user_session:
authenticated_user = app.authenticated_credentials.user['username']
username = data['username']
password = data['new_password']
if not is_full_admin and authenticated_user != username:
raise CallError(
f'{username}: currently authenticated credential may not reset '
'password for this user.',
errno.EPERM
)
entry = await self.middleware.call(
'user.query',
[['username', '=', username]],
{'extra': {'additional_information': ['DS']}}
)
if not entry:
# This only happens if authenticated user has FULL_ADMIN privileges
# and so we're not concerned about letting admin know that username is
# bad.
verrors.add(
'user.set_password.username',
f'{username}: user does not exist.'
)
else:
entry = entry[0]
if not entry['local']:
# We don't allow resetting passwords on remote directory service.
verrors.add(
'user.set_password.username',
f'{username}: user is not local to the TrueNAS server.'
)
if not is_full_admin:
if data['old_password'] is None:
verrors.add(
'user.set_password.old_password',
'FULL_ADMIN role is required in order to bypass check for current password.'
)
else:
pam_resp = await self.middleware.call(
'auth.libpam_authenticate', username, data['old_password']
)
if pam_resp['code'] != pam.PAM_SUCCESS:
verrors.add(
'user.set_password.old_password',
f'{username}: failed to validate password.'
)
verrors.check()
if entry['password_disabled']:
verrors.add(
'user.set_password.username',
f'{username}: password authentication disabled for user'
)
if entry['locked']:
verrors.add(
'user.set_password.username',
f'{username}: user account is locked.'
)
verrors.check()
entry = self.__set_password(entry | {'password': password})
await self.middleware.call('datastore.update', 'account.bsdusers', entry['id'], {
'bsdusr_unixhash': entry['unixhash'],
'bsdusr_smbhash': entry['smbhash'],
})
await self.middleware.call('etc.generate', 'shadow')
if entry['smb']:
await self.middleware.call('smb.update_passdb_user', entry)
class GroupModel(sa.Model):
__tablename__ = 'account_bsdgroups'
id = sa.Column(sa.Integer(), primary_key=True)
bsdgrp_gid = sa.Column(sa.Integer())
bsdgrp_group = sa.Column(sa.String(120), unique=True)
bsdgrp_builtin = sa.Column(sa.Boolean(), default=False)
bsdgrp_sudo_commands = sa.Column(sa.JSON(list))
bsdgrp_sudo_commands_nopasswd = sa.Column(sa.JSON(list))
bsdgrp_smb = sa.Column(sa.Boolean(), default=True)
bsdgrp_users = relationship('UserModel', secondary=lambda: GroupMembershipModel.__table__, overlaps='bsdusr_groups')
class GroupService(CRUDService):
class Config:
datastore = 'account.bsdgroups'
datastore_prefix = 'bsdgrp_'
datastore_extend = 'group.group_extend'
datastore_extend_context = 'group.group_extend_context'
cli_namespace = 'account.group'
role_prefix = 'ACCOUNT'
entry = GroupEntry
@private
async def group_extend_context(self, rows, extra):
privileges = await self.middleware.call('datastore.query', 'account.privilege')
users = await self.middleware.call('datastore.query', 'account.bsdusers')
primary_memberships = defaultdict(set)
for u in users:
primary_memberships[u['bsdusr_group']['id']].add(u['id'])
server_sid = await self.middleware.call('smb.local_server_sid')
return {
"privileges": privileges,
"primary_memberships": primary_memberships,
"server_sid": server_sid,
}
@private
async def group_extend(self, group, ctx):
group['name'] = group['group']
group['users'] = list({u['id'] for u in group['users']} | ctx['primary_memberships'][group['id']])
privilege_mappings = privileges_group_mapping(ctx['privileges'], [group['gid']], 'local_groups')
if privilege_mappings['allowlist']:
privilege_mappings['roles'].append('HAS_ALLOW_LIST')
if {'method': '*', 'resource': '*'} in privilege_mappings['allowlist']:
privilege_mappings['roles'].append('FULL_ADMIN')
match group['group']:
case 'builtin_administrators':
sid = f'{ctx["server_sid"]}-{DomainRid.ADMINS}'
case 'builtin_guests':
sid = f'{ctx["server_sid"]}-{DomainRid.GUESTS}'
case _:
if group['smb']:
sid = f'{ctx["server_sid"]}-{db_id_to_rid(IDType.GROUP, group["id"])}'
else:
sid = None
group.update({
'local': True,
'id_type_both': False,
'sid': sid,
'roles': privilege_mappings['roles']
})
return group
@private
async def group_compress(self, group):
to_remove = [
'name',
'local',
'id_type_both',
'sid',
'roles'
]
for i in to_remove:
group.pop(i, None)
return group
@filterable
async def query(self, filters, options):
"""
Query groups with `query-filters` and `query-options`.
"""
ds_groups = []
options = options or {}
options['extend'] = self._config.datastore_extend
options['extend_context'] = self._config.datastore_extend_context
options['prefix'] = self._config.datastore_prefix
datastore_options = options.copy()
datastore_options.pop('count', None)
datastore_options.pop('get', None)
datastore_options.pop('limit', None)
datastore_options.pop('offset', None)
datastore_options.pop('select', None)
if filters_include_ds_accounts(filters):
ds = await self.middleware.call('directoryservices.status')
if ds['type'] is not None and ds['status'] == DSStatus.HEALTHY.name:
ds_groups = await self.middleware.call('directoryservices.cache.query', 'GROUP', filters, options)
result = await self.middleware.call(
'datastore.query', self._config.datastore, [], datastore_options
)
return await self.middleware.run_in_thread(
filter_list, result + ds_groups, filters, options
)
@api_method(GroupCreateArgs, GroupCreateResult, audit='Create group', audit_extended=lambda data: data['name'])
async def do_create(self, data):
"""
Create a new group.
"""
return await self.create_internal(data)
@private
async def create_internal(self, data, reload_users=True):
verrors = ValidationErrors()
await self.__common_validation(verrors, data, 'group_create')
verrors.check()
if data.get('gid') is None:
data['gid'] = await self.get_next_gid()
group = data.copy()
group['group'] = group.pop('name')
group = await self.group_compress(group)
pk = await self.middleware.call('datastore.insert', 'account.bsdgroups', group, {'prefix': 'bsdgrp_'})
if reload_users:
await self.middleware.call('service.reload', 'user')
if data['smb']:
await self.middleware.call('smb.add_groupmap', group | {'id': pk})
return pk
@api_method(GroupUpdateArgs, GroupUpdateResult, audit='Update group', audit_callback=True)
async def do_update(self, audit_callback, pk, data):
"""
Update attributes of an existing group.
"""
if pk > BASE_SYNTHETIC_DATASTORE_ID:
# datastore ids for directory services are created by adding the
# posix ID to a base value so that we can use getpwuid / getgrgid to
# convert back to a username / group name
try:
groupname = (await self.middleware.call(
'group.get_group_obj', {'gid': pk - BASE_SYNTHETIC_DATASTORE_ID}
))['gr_name']
except KeyError:
groupname = 'UNKNOWN'
audit_callback(groupname)
raise CallError(
'Groups provided by a directory service must be modified through the identity provider '
'(LDAP server or domain controller).', errno.EPERM
)
group = await self.get_instance(pk)
audit_callback(group['name'])
if data.get('gid') == group['gid']:
data.pop('gid') # Only check for duplicate GID if we are updating it
verrors = ValidationErrors()
await self.__common_validation(verrors, data, 'group_update', pk=pk)
verrors.check()
old_smb = group['smb']
group.update(data)
new_smb = group['smb']
if 'name' in data and data['name'] != group['group']:
group['group'] = group.pop('name')
if new_smb:
# group renamed. We can simply add over top since group_mapping.tdb is keyed
# by SID value
await self.middleware.call('smb.add_groupmap', group)
else:
group.pop('name', None)
if new_smb and not old_smb:
await self.middleware.call('smb.add_groupmap', group)
elif old_smb and not new_smb:
await self.middleware.call('smb.del_groupmap', group['id'])
if 'users' in group:
primary_users = {
u['id']
for u in await self.middleware.call(
'datastore.query',
'account.bsdusers',
[('bsdusr_group', '=', pk)],
)
}
group['users'] = [u for u in group['users'] if u not in primary_users]
group = await self.group_compress(group)
await self.middleware.call('datastore.update', 'account.bsdgroups', pk, group, {'prefix': 'bsdgrp_'})
await self.middleware.call('service.reload', 'user')
return pk
@api_method(GroupDeleteArgs, GroupDeleteResult, audit='Delete group', audit_callback=True)
async def do_delete(self, audit_callback, pk, options):
"""
Delete group `id`.
The `delete_users` option deletes all users that have this group as their primary group.
"""
if pk > BASE_SYNTHETIC_DATASTORE_ID:
# datastore ids for directory services are created by adding the
# posix ID to a base value so that we can use getpwuid / getgrgid to
# convert back to a username / group name
try:
groupname = (await self.middleware.call(
'group.get_group_obj', {'gid': pk - BASE_SYNTHETIC_DATASTORE_ID}
))['gr_name']
except KeyError:
groupname = 'UNKNOWN'
audit_callback(groupname)
raise CallError(
'Groups provided by a directory service must be deleted from the identity provider '
'(LDAP server or domain controller).', errno.EPERM
)
group = await self.get_instance(pk)
audit_callback(group['name'] + (' and all users that have this group as their primary group'
if options['delete_users'] else ''))
if group['builtin']:
raise CallError('A built-in group cannot be deleted.', errno.EACCES)
await self.middleware.call('privilege.before_group_delete', group)
nogroup = await self.middleware.call('datastore.query', 'account.bsdgroups', [('group', '=', 'nogroup')],
{'prefix': 'bsdgrp_', 'get': True})
for i in await self.middleware.call('datastore.query', 'account.bsdusers', [('group', '=', group['id'])],
{'prefix': 'bsdusr_'}):
if options['delete_users']:
await self.middleware.call('datastore.delete', 'account.bsdusers', i['id'])
else:
await self.middleware.call('datastore.update', 'account.bsdusers', i['id'], {'group': nogroup['id']},
{'prefix': 'bsdusr_'})
await self.middleware.call('datastore.delete', 'account.bsdgroups', pk)
if group['smb']:
await self.middleware.call('smb.del_groupmap', group['id'])
await self.middleware.call('service.reload', 'user')
try:
await self.middleware.call('idmap.gencache.del_idmap_cache_entry', {
'entry_type': 'GID2SID',
'entry': group['gid']
})
except MatchNotFound:
pass
return pk
@api_method(GroupGetNextGidArgs, GroupGetNextGidResult, roles=['ACCOUNT_READ'])
async def get_next_gid(self):
"""
Get the next available/free gid.
"""
used_gids = {
group['bsdgrp_gid']
for group in await self.middleware.call('datastore.query', 'account.bsdgroups')
}
used_gids |= set((await self.middleware.call('privilege.used_local_gids')).keys())
# We should start gid from 3000 to avoid potential conflicts - Reference: NAS-117892
next_gid = 3000
while next_gid in used_gids:
next_gid += 1
return next_gid
@api_method(GroupGetGroupObjArgs, GroupGetGroupObjResult, roles=['ACCOUNT_READ'])
def get_group_obj(self, data):
"""
Returns dictionary containing information from struct grp for the group specified by either
the `groupname` or `gid`.
If `sid_info` is specified then addition SMB / domain information is returned for the
group.
"""
verrors = ValidationErrors()
if not data['groupname'] and data['gid'] is None:
verrors.add('get_group_obj.groupname', 'Either "groupname" or "gid" must be specified')
if data['groupname'] and data['gid'] is not None:
verrors.add('get_group_obj.groupname', '"groupname" and "gid" may not be simultaneously specified')
verrors.check()
# NOTE: per request from UI team we are overriding default library
# KeyError message with a clearer one
#
# Many callers to group.get_group_obj may be catching KeyError and so
# changing exception type is something that should be approached
# carefully.
if data['groupname']:
try:
grp_obj = grp.getgrnam(data['groupname'], as_dict=True)
except KeyError:
raise KeyError(f'{data["groupname"]}: group with this name does not exist') from None
else:
try:
grp_obj = grp.getgrgid(data['gid'], as_dict=True)
except KeyError:
raise KeyError(f'{data["gid"]}: group with this id does not exist') from None
grp_obj['local'] = grp_obj['source'] == NssModule.FILES.name
match grp_obj['source']:
case NssModule.FILES.name:
grp_obj['source'] = 'LOCAL'
case NssModule.WINBIND.name:
grp_obj['source'] = 'ACTIVEDIRECTORY'
case NssModule.SSS.name:
grp_obj['source'] = 'LDAP'
case _:
self.logger.error('%s: unknown ID source.', grp_obj['source'])
raise ValueError(f'{grp_obj["source"]}: unknown ID source. Please file a bug report.')
grp_obj['local'] = grp_obj['source'] == 'LOCAL'
if data['sid_info']:
sid = None
match grp_obj['source']:
case 'LOCAL':
idmap_ctx = None
db_entry = self.middleware.call_sync('group.query', [
['group', '=', grp_obj['gr_name']],
['local', '=', True]
], {'select': ['sid']})
if not db_entry:
self.logger.error(
'%s: local group exists on server but does not exist in the '
'the group account table.', grp_obj['gr_name']
)
else:
sid = db_entry[0]['sid']
case 'ACTIVEDIRECTORY':
# winbind provides idmapping for AD groups
try:
idmap_ctx = idmap_winbind.WBClient()
except wbclient.WBCError as e:
# Library error from libwbclient.
# Don't bother logging if winbind isn't running since
# we have plenty of other places that are logging that
# error condition
if e.error_code != wbclient.WBC_ERR_WINBIND_NOT_AVAILABLE:
self.logger.error('Failed to retrieve SID for gid: %d',
grp_obj['gr_gid'], exc_info=True)
idmap_ctx = None
case 'LDAP':
# SSSD provides ID mapping for IPA domains
idmap_ctx = idmap_sss.SSSClient()
case _:
self.logger.error('%s: unknown ID source.', grp_obj['source'])
raise ValueError(f'{grp_obj["source"]}: unknown ID source. Please file a bug report.')
if idmap_ctx is not None:
try:
sid = idmap_ctx.uidgid_to_idmap_entry({
'id_type': 'GROUP',
'id': grp_obj['gr_gid']
})['sid']
except MatchNotFound:
# This can happen if IPA and group doesn't have SID assigned
sid = None
grp_obj['sid'] = sid
else:
grp_obj['sid'] = None
return grp_obj
async def __common_validation(self, verrors, data, schema, pk=None):
exclude_filter = [('id', '!=', pk)] if pk else []
if data.get('smb') and not await self.middleware.call('smb.is_configured'):
verrors.add(
f'{schema}.smb', 'SMB groups may not be configured while SMB service backend is unitialized.'
)
if 'name' in data:
if data.get('smb'):
if data['name'].upper() in [x.name for x in SMBBuiltin]:
verrors.add(
f'{schema}.name',
f'Group name "{data["name"]}" conflicts with existing SMB Builtin entry. '
f'SMB group mapping is not permitted for this group.',
errno.EEXIST,
)
smb_groups = await self.middleware.call('datastore.query',
'account.bsdgroups',
[('smb', '=', True)] + exclude_filter,
{'prefix': 'bsdgrp_'})
if filter_list(smb_groups, [['group', 'C=', data['name']]]):
verrors.add(
f'{schema}.name',
f'Group name "{data["name"]}" conflicts with existing groupmap entry. '
f'SMB group mapping is not permitted for this group. Note that SMB '
f'group names are case-insensitive.',
errno.EEXIST,
)
existing = await self.middleware.call(
'datastore.query', 'account.bsdgroups',
[('group', '=', data['name'])] + exclude_filter, {'prefix': 'bsdgrp_'}
)
if existing:
verrors.add(
f'{schema}.name',
f'A Group with the name "{data["name"]}" already exists.',
errno.EEXIST,
)
pw_checkname(verrors, f'{schema}.name', data['name'])
if data.get('gid') is not None:
try:
existing = await self.middleware.call(
'group.get_group_obj', {'gid': data['gid']},
)
except KeyError:
pass
else:
verrors.add(
f'{schema}.gid',
f'Gid {data["gid"]} is already used (group {existing["gr_name"]} has it)',
errno.EEXIST,
)
if privilege := (await self.middleware.call('privilege.used_local_gids')).get(data['gid']):
verrors.add(
f'{schema}.gid',
f'A privilege {privilege["name"]!r} already uses this group ID.',
errno.EINVAL,
)
if 'users' in data:
existing = {
i['id']
for i in await self.middleware.call(
'datastore.query',
'account.bsdusers',
[('id', 'in', data['users'])],
)
}
notfound = set(data['users']) - existing
if notfound:
verrors.add(
f'{schema}.users',
f'Following users do not exist: {", ".join(map(str, notfound))}',
)
primary_users = await self.middleware.call(
'datastore.query',
'account.bsdusers',
[('bsdusr_group', '=', pk)],
)
notfound = []
for user in primary_users:
if user['id'] not in data['users']:
notfound.append(user['bsdusr_username'])
if notfound:
verrors.add(
f'{schema}.users',
f'This group is primary for the following users: {", ".join(map(str, notfound))}. '
'You can\'t remove them.',
)
if 'sudo_commands' in data:
verrors.add_child(
f'{schema}.sudo_commands',
await self.middleware.run_in_thread(validate_sudo_commands, data['sudo_commands']),
)
if 'sudo_commands_nopasswd' in data:
verrors.add_child(
f'{schema}.sudo_commands_nopasswd',
await self.middleware.run_in_thread(validate_sudo_commands, data['sudo_commands_nopasswd']),
)
async def setup(middleware):
if await middleware.call('keyvalue.get', 'run_migration', False):
await middleware.call('user.sync_builtin')
| 80,364 | Python | .py | 1,716 | 33.389277 | 120 | 0.54507 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,484 | auth.py | truenas_middleware/src/middlewared/middlewared/plugins/auth.py | import asyncio
import random
from datetime import timedelta
import errno
import pam
import time
from middlewared.api import api_method
from middlewared.api.base.server.ws_handler.rpc import RpcWebSocketAppEvent
from middlewared.api.current import (
AuthLegacyPasswordLoginArgs, AuthLegacyApiKeyLoginArgs, AuthLegacyTokenLoginArgs,
AuthLegacyTwoFactorArgs, AuthLegacyResult,
AuthLoginExArgs, AuthLoginExContinueArgs, AuthLoginExResult,
AuthMeArgs, AuthMeResult,
AuthMechChoicesArgs, AuthMechChoicesResult,
)
from middlewared.auth import (UserSessionManagerCredentials, UnixSocketSessionManagerCredentials,
ApiKeySessionManagerCredentials, LoginPasswordSessionManagerCredentials,
LoginTwofactorSessionManagerCredentials, AuthenticationContext,
TrueNasNodeSessionManagerCredentials, TokenSessionManagerCredentials,
dump_credentials)
from middlewared.plugins.account_.constants import MIDDLEWARE_PAM_SERVICE, MIDDLEWARE_PAM_API_KEY_SERVICE
from middlewared.schema import accepts, Any, Bool, Datetime, Dict, Int, Password, returns, Str
from middlewared.service import (
Service, filterable, filterable_returns, filter_list, no_auth_required, no_authz_required,
pass_app, private, cli_private, CallError,
)
from middlewared.service_exception import MatchNotFound
import middlewared.sqlalchemy as sa
from middlewared.utils.auth import (
aal_auth_mechanism_check, AuthMech, AuthResp, AuthenticatorAssuranceLevel, AA_LEVEL1,
AA_LEVEL2, AA_LEVEL3, CURRENT_AAL, MAX_OTP_ATTEMPTS,
)
from middlewared.utils.crypto import generate_token
from middlewared.utils.time_utils import utc_now
PAM_SERVICES = {MIDDLEWARE_PAM_SERVICE, MIDDLEWARE_PAM_API_KEY_SERVICE}
class TokenManager:
def __init__(self):
self.tokens = {}
def create(self, ttl, attributes, match_origin, parent_credentials, session_id):
credentials = parent_credentials
if isinstance(credentials, TokenSessionManagerCredentials):
if root_credentials := credentials.token.root_credentials():
credentials = root_credentials
token = generate_token(48, url_safe=True)
self.tokens[token] = Token(self, token, ttl, attributes, match_origin, credentials, session_id)
return self.tokens[token]
def get(self, token, origin):
token = self.tokens.get(token)
if token is None:
return None
if not token.is_valid():
self.tokens.pop(token.token)
return None
if token.match_origin:
if not isinstance(origin, type(token.match_origin)):
return None
if not token.match_origin.match(origin):
return None
return token
def destroy(self, token):
self.tokens.pop(token.token, None)
def destroy_by_session_id(self, session_id):
self.tokens = {k: v for k, v in self.tokens.items() if session_id not in v.session_ids}
class Token:
def __init__(self, manager, token, ttl, attributes, match_origin, parent_credentials, session_id):
self.manager = manager
self.token = token
self.ttl = ttl
self.attributes = attributes
self.match_origin = match_origin
self.parent_credentials = parent_credentials
self.session_ids = {session_id}
self.last_used_at = time.monotonic()
def is_valid(self):
return time.monotonic() < self.last_used_at + self.ttl
def notify_used(self):
self.last_used_at = time.monotonic()
def root_credentials(self):
credentials = self.parent_credentials
while True:
if isinstance(credentials, TokenSessionManagerCredentials):
credentials = credentials.token.parent_credentials
elif credentials is None:
return None
else:
return credentials
class SessionManager:
def __init__(self):
self.sessions = {}
self.middleware = None
async def login(self, app, credentials):
if app.authenticated:
self.sessions[app.session_id].credentials = credentials
app.authenticated_credentials = credentials
return
session = Session(self, credentials, app)
self.sessions[app.session_id] = session
app.authenticated = True
app.authenticated_credentials = credentials
app.register_callback(RpcWebSocketAppEvent.MESSAGE, self._app_on_message)
app.register_callback(RpcWebSocketAppEvent.CLOSE, self._app_on_close)
if not is_internal_session(session):
self.middleware.send_event("auth.sessions", "ADDED", fields=dict(id=app.session_id, **session.dump()))
await self.middleware.log_audit_message(app, "AUTHENTICATION", {
"credentials": dump_credentials(credentials),
"error": None,
}, True)
async def logout(self, app):
session = self.sessions.pop(app.session_id, None)
if session is not None:
session.credentials.logout()
if not is_internal_session(session):
self.middleware.send_event("auth.sessions", "REMOVED", fields=dict(id=app.session_id))
app.authenticated = False
async def _app_on_message(self, app, message):
session = self.sessions.get(app.session_id)
if session is None:
app.authenticated = False
return
if not session.credentials.is_valid():
await self.logout(app)
return
session.credentials.notify_used()
async def _app_on_close(self, app):
await self.logout(app)
class Session:
def __init__(self, manager, credentials, app):
self.manager = manager
self.credentials = credentials
self.app = app
self.created_at = time.monotonic()
def dump(self):
return {
"origin": str(self.app.origin),
**dump_credentials(self.credentials),
"created_at": utc_now() - timedelta(seconds=time.monotonic() - self.created_at),
}
def is_internal_session(session) -> bool:
try:
is_root_sock = session.app.origin.is_unix_family and session.app.origin.uid == 0
if is_root_sock:
return True
except AttributeError:
# session.app.origin can be NoneType
pass
if isinstance(session.app.authenticated_credentials, TrueNasNodeSessionManagerCredentials):
return True
return False
class UserWebUIAttributeModel(sa.Model):
__tablename__ = 'account_bsdusers_webui_attribute'
id = sa.Column(sa.Integer(), primary_key=True)
uid = sa.Column(sa.Integer(), unique=True)
attributes = sa.Column(sa.JSON())
class AuthService(Service):
class Config:
cli_namespace = "auth"
session_manager = SessionManager()
token_manager = TokenManager()
def __init__(self, *args, **kwargs):
super(AuthService, self).__init__(*args, **kwargs)
self.session_manager.middleware = self.middleware
@filterable(roles=['AUTH_SESSIONS_READ'])
@filterable_returns(Dict(
'session',
Str('id'),
Bool('current'),
Bool('internal'),
Str('origin'),
Str('credentials'),
Dict('credentials_data', additional_attrs=True),
Datetime('created_at'),
))
@pass_app()
def sessions(self, app, filters, options):
"""
Returns list of active auth sessions.
Example of return value:
[
{
"id": "NyhB1J5vjPjIV82yZ6caU12HLA1boDJcZNWuVQM4hQWuiyUWMGZTz2ElDp7Yk87d",
"origin": "192.168.0.3:40392",
"credentials": "LOGIN_PASSWORD",
"credentials_data": {"username": "root"},
"current": True,
"internal": False,
"created_at": {"$date": 1545842426070}
}
]
`credentials` can be `UNIX_SOCKET`, `ROOT_TCP_SOCKET`, `LOGIN_PASSWORD`, `API_KEY` or `TOKEN`,
depending on what authentication method was used.
For `UNIX_SOCKET` and `LOGIN_PASSWORD` logged-in `username` field will be provided in `credentials_data`.
For `API_KEY` corresponding `api_key` will be provided in `credentials_data`.
For `TOKEN` its `parent` credential will be provided in `credentials_data`.
If you want to exclude all internal connections from the list, call this method with following arguments:
[
[
["internal", "=", True]
]
]
"""
return filter_list(
[
dict(
id=session_id,
current=app.session_id == session_id,
internal=is_internal_session(session),
**session.dump()
)
for session_id, session in sorted(self.session_manager.sessions.items(),
key=lambda t: t[1].created_at)
],
filters,
options,
)
@accepts(Str('id'), roles=['AUTH_SESSIONS_WRITE'])
@returns(Bool(description='Is `true` if session was terminated successfully'))
async def terminate_session(self, id_):
"""
Terminates session `id`.
"""
session = self.session_manager.sessions.get(id_)
if session is None:
return False
self.token_manager.destroy_by_session_id(id_)
await session.app.ws.close()
@accepts(roles=['AUTH_SESSIONS_WRITE'])
@returns()
@pass_app()
async def terminate_other_sessions(self, app):
"""
Terminates all other sessions (except the current one).
"""
errors = []
for session_id, session in list(self.session_manager.sessions.items()):
if session_id == app.session_id:
continue
if is_internal_session(session):
continue
try:
await self.terminate_session(session_id)
except Exception as e:
errors.append(str(e))
if errors:
raise CallError("\n".join(["Unable to terminate all sessions:"] + errors))
@no_auth_required
@accepts(
Int('ttl', default=600, null=True),
Dict('attrs', additional_attrs=True),
Bool('match_origin', default=False),
)
@returns(Str('token'))
@pass_app(rest=True)
def generate_token(self, app, ttl, attrs, match_origin):
"""
Generate a token to be used for authentication.
`ttl` stands for Time To Live, in seconds. The token will be invalidated if the connection
has been inactive for a time greater than this.
`attrs` is a general purpose object/dictionary to hold information about the token.
`match_origin` will only allow using this token from the same IP address or with the same user UID.
"""
if not app.authenticated:
raise CallError('Not authenticated', errno.EACCES)
if ttl is None:
ttl = 600
token = self.token_manager.create(
ttl,
attrs,
app.origin if match_origin else None,
app.authenticated_credentials,
app.session_id,
)
return token.token
@private
def get_token(self, token_id):
try:
return {
'attributes': self.token_manager.tokens[token_id].attributes,
}
except KeyError:
return None
@private
def get_token_for_action(self, token_id, origin, method, resource):
if (token := self.token_manager.get(token_id, origin)) is None:
return None
if token.attributes:
return None
if not token.parent_credentials.authorize(method, resource):
return None
return TokenSessionManagerCredentials(self.token_manager, token)
@private
def get_token_for_shell_application(self, token_id, origin):
if (token := self.token_manager.get(token_id, origin)) is None:
return None
if token.attributes:
return None
root_credentials = token.root_credentials()
if not isinstance(root_credentials, UserSessionManagerCredentials):
return None
if not root_credentials.user['privilege']['web_shell']:
return None
return {
'username': root_credentials.user['username'],
}
@no_auth_required
@api_method(AuthLegacyTwoFactorArgs, AuthLegacyResult)
async def two_factor_auth(self, username, password):
"""
Returns true if two-factor authorization is required for authorizing user's login.
"""
user_authenticated = await self.middleware.call('auth.authenticate_plain', username, password)
return user_authenticated and (
await self.middleware.call('auth.twofactor.config')
)['enabled'] and '2FA' in user_authenticated['account_attributes']
@cli_private
@no_auth_required
@api_method(AuthLegacyPasswordLoginArgs, AuthLegacyResult)
@pass_app()
async def login(self, app, username, password, otp_token):
"""
Authenticate session using username and password.
`otp_token` must be specified if two factor authentication is enabled.
"""
resp = await self.login_ex(app, {
'mechanism': AuthMech.PASSWORD_PLAIN,
'username': username,
'password': password,
'login_options': {'user_info': False},
})
match resp['response_type']:
case AuthResp.SUCCESS:
return True
case AuthResp.OTP_REQUIRED:
if otp_token is None:
return False
otp_resp = await self.login_ex(app, {
'mechanism': AuthMech.OTP_TOKEN.name,
'otp_token': otp_token
})
return otp_resp['response_type'] == AuthResp.SUCCESS
case _:
return False
@private
async def set_authenticator_assurance_level(self, level: str):
"""
This method is for CI tests. Currently we only support AA_LEVEL_1.
See NIST SP 800-63B Section 4:
https://nvlpubs.nist.gov/nistpubs/specialpublications/nist.sp.800-63b.pdf
"""
self.logger.warning('Setting AAL to %s', level)
match level:
case 'LEVEL_1':
level = AA_LEVEL1
case 'LEVEL_2':
level = AA_LEVEL2
case 'LEVEL_3':
level = AA_LEVEL3
case _:
raise CallError(f'{level}: unknown authenticator assurance level')
CURRENT_AAL.level = level
@private
async def check_auth_mechanism(
self,
app,
mechanism: AuthMech,
auth_ctx: AuthenticationContext,
level: AuthenticatorAssuranceLevel
) -> None:
# The current session may be in the middle of a challenge-response conversation
# and so we need to validate that what we received from client was expected
# next message.
if auth_ctx.next_mech and mechanism is not auth_ctx.next_mech:
expected = auth_ctx.auth_data['user']['username']
self.logger.debug('%s: received auth mechanism for user %s while expecting next auth mechanism: %s',
mechanism, expected, auth_ctx.next_mech)
expected = auth_ctx.auth_data['user']['username']
if auth_ctx.next_mech is AuthMech.OTP_TOKEN:
errmsg = (
'Abandoning login attempt after being presented wtih '
'requirement for second factor for authentication.'
)
await self.middleware.log_audit_message(app, 'AUTHENTICATION', {
'credentials': {
'credentials': 'LOGIN_TWOFACTOR',
'credentials_data': {
'username': expected,
},
},
'error': errmsg
}, False)
# Discard in-progress auth attempt
auth_ctx.next_mech = None
auth_ctx.auth_data = None
# OTP tokens are only permitted when prompted
if auth_ctx.next_mech is None and mechanism == AuthMech.OTP_TOKEN.name:
raise CallError(f'{mechanism}: no authentication in progress', errno.EINVAL)
# Verify that auth mechanism is permitted under authenticator assurance level
if not aal_auth_mechanism_check(mechanism, level):
# Per NIST SP 800-63B only permitted authenticator types may be used
raise CallError(
f'{mechanism}: mechanism is not supported at current authenticator level.',
errno.EOPNOTSUPP
)
@no_auth_required
@api_method(AuthMechChoicesArgs, AuthMechChoicesResult)
async def mechanism_choices(self) -> list:
""" Get list of available authentication mechanisms available for auth.login_ex """
aal = CURRENT_AAL.level
return [mech.name for mech in aal.mechanisms]
@cli_private
@no_auth_required
@api_method(AuthLoginExContinueArgs, AuthLoginExResult)
@pass_app()
async def login_ex_continue(self, app, data):
"""
Continue in-progress authentication attempt. This endpoint should be
called to continue an auth.login_ex attempt that returned OTP_REQUIRED.
This is a convenience wrapper around auth.login_ex for API consumers.
params:
mechanism: the mechanism by which to continue authentication.
Currently the only supported mechanism here is OTP_TOKEN.
OTP_TOKEN
otp_token: one-time password token. This is only permitted if
a previous auth.login_ex call responded with "OTP_REQUIRED".
returns:
JSON object containing the following keys:
`response_type` - will be one of the following:
SUCCESS - continued auth was required
OTP_REQUIRED - otp token was rejected. API consumer may call this
endpoint again with correct OTP token.
AUTH_ERR - invalid OTP token submitted too many times.
"""
return await self.login_ex(app, data)
@cli_private
@no_auth_required
@api_method(AuthLoginExArgs, AuthLoginExResult)
@pass_app()
async def login_ex(self, app, data):
"""
Authenticate using one of a variety of mechanisms
NOTE: mechanisms with a _PLAIN suffix indicate that they involve
passing plain-text passwords or password-equivalent strings and
should not be used on untrusted / insecure transport. Available
mechanisms will be expanded in future releases.
params:
This takes a single argument consistning of a JSON object with the
following keys:
mechanism: the mechanism by which to authenticate to the backend
the exact parameters to use vary by mechanism and are described
below
PASSWORD_PLAIN
username: username with which to authenticate
password: password with which to authenticate
login_options: dictionary with additional authentication options
API_KEY_PLAIN
username: username with which to authenticate
api_key: API key string
login_options: dictionary with additional authentication options
AUTH_TOKEN_PLAIN
token: authentication token string
login_options: dictionary with additional authentication options
OTP_TOKEN
otp_token: one-time password token. This is only permitted if
a previous auth.login_ex call responded with "OTP_REQUIRED".
login_options
user_info: boolean - include auth.me output in successful responses.
raises:
CallError: a middleware CallError may be raised in the following
circumstances.
* An multistep challenge-response authentication mechanism is being
used and the specified `mechanism` does not match the expected
next step for authentication. In this case the errno will be set
to EBUSY.
* OTP_TOKEN mechanism was passed without an explicit request from
a previous authentication step. In this case the errno will be set
to EINVAL.
* Current authenticator assurance level prohibits the use of the
specified authentication mechanism. In this case the errno will be
set to EOPNOTSUPP.
returns:
JSON object containing the following keys:
response_type: string indicating the results of the current authentication
mechanism. This is used to inform client of nature of authentication
error or whether further action will be required in order to complete
authentication.
<additional keys per response_type>
Notes about response types:
SUCCESS:
additional key:
user_info: includes auth.me output for the resulting authenticated
credentials.
OTP_REQUIRED
additional key:
username: normalized username of user who must provide an OTP token.
AUTH_ERR
Generic authentication error corresponds to PAM_AUTH_ERR and PAM_USER_UNKOWN
from libpam. This may be returned if the account does not exist or if the
credential is incorrect.
EXPIRED
The specified credential is expired and not suitable for authentication.
"""
mechanism = AuthMech[data['mechanism']]
auth_ctx = app.authentication_context
login_fn = self.session_manager.login
response = {'response_type': AuthResp.AUTH_ERR}
await self.check_auth_mechanism(app, mechanism, auth_ctx, CURRENT_AAL.level)
match mechanism:
case AuthMech.PASSWORD_PLAIN:
# Both of these mechanisms are de-factor username + password
# combinations and pass through libpam.
resp = await self.get_login_user(
app,
data['username'],
data['password'],
mechanism
)
if resp['otp_required']:
# A one-time password is required for this user account and so
# we should request it from API client.
auth_ctx.next_mech = AuthMech.OTP_TOKEN
auth_ctx.auth_data = {'cnt': 0, 'user': resp['user_data']}
return {
'response_type': AuthResp.OTP_REQUIRED,
'username': resp['user_data']['username']
}
elif CURRENT_AAL.level.otp_mandatory:
if resp['pam_response'] == 'SUCCESS':
# Insert a failure delay so that we don't leak information about
# the PAM response
await asyncio.sleep(random.uniform(1, 2))
raise CallError(
'Two-factor authentication is requried at the current authenticator level.',
errno.EOPNOTSUPP
)
match resp['pam_response']['code']:
case pam.PAM_SUCCESS:
cred = LoginPasswordSessionManagerCredentials(resp['user_data'], CURRENT_AAL.level)
await login_fn(app, cred)
case pam.PAM_AUTH_ERR:
await self.middleware.log_audit_message(app, 'AUTHENTICATION', {
'credentials': {
'credentials': 'LOGIN_PASSWORD',
'credentials_data': {'username': data['username']},
},
'error': 'Bad username or password'
}, False)
case _:
await self.middleware.log_audit_message(app, 'AUTHENTICATION', {
'credentials': {
'credentials': 'LOGIN_PASSWORD',
'credentials_data': {'username': data['username']},
},
'error': resp['pam_response']['reason']
}, False)
case AuthMech.API_KEY_PLAIN:
# API key that we receive over wire is concatenation of the
# datastore `id` of the particular key with the key itself,
# delimited by a dash. <id>-<key>.
resp = await self.get_login_user(
app,
data['username'],
data['api_key'],
mechanism
)
if resp['pam_response']['code'] == pam.PAM_AUTHINFO_UNAVAIL:
# This is a special error code that means we need to
# etc.generate because we somehow got garbage in the file.
# It should not happen, but we must try to recover.
self.logger.warning('API key backend has errors that require regenerating its file.')
await self.middleware.call('etc.generate', 'pam_middleware')
# We've exhausted steps we can take, so we'll take the
# response to second request as authoritative
resp = await self.get_login_user(
app,
data['username'],
data['api_key'],
mechanism
)
# Retrieve the API key here so that we can upgrade the underlying
# hash type and iterations if needed (since we have plain-text).
# We also need the key info so that we can generate a useful
# audit entry in case of failure.
try:
key_id = int(data['api_key'].split('-')[0])
key = await self.middleware.call(
'api_key.query', [['id', '=', key_id]],
{'get': True, 'select': ['id', 'name', 'keyhash', 'expired']}
)
thehash = key.pop('keyhash')
except Exception:
key = None
if resp['pam_response']['code'] == pam.PAM_CRED_EXPIRED:
# Give more precise reason for login failure for audit trails
# because we need to differentiate between key and account
# being expired.
resp['pam_response']['reason'] = 'Api key is expired.'
if resp['pam_response']['code'] == pam.PAM_SUCCESS:
if thehash.startswith('$pbkdf2-sha256'):
# Legacy API key with insufficient iterations. Since we
# know that the plain-text we have here is correct, we can
# use it to update the hash in backend.
await self.middleware.call('api_key.update_hash', data['api_key'])
cred = ApiKeySessionManagerCredentials(resp['user_data'], key, CURRENT_AAL.level)
await login_fn(app, cred)
else:
await self.middleware.log_audit_message(app, 'AUTHENTICATION', {
'credentials': {
'credentials': 'API_KEY',
'credentials_data': {
'username': data['username'],
'api_key': key,
}
},
'error': resp['pam_response']['reason'],
}, False)
case AuthMech.OTP_TOKEN:
# We've received a one-time password token based in response to our
# response to an earlier authentication attempt. This means our auth
# context has user information. We don't re-request username from the
# client as this would open possibility of user trivially bypassing
# 2FA.
otp_ok = await self.middleware.call(
'user.verify_twofactor_token',
auth_ctx.auth_data['user']['username'],
data['otp_token'],
)
resp = {
'pam_response': {
'code': pam.PAM_SUCCESS if otp_ok else pam.PAM_AUTH_ERR,
'reason': None
}
}
# get reference to auth data
auth_data = auth_ctx.auth_data
# reset the auth_ctx state
auth_ctx.next_mech = None
auth_ctx.auth_data = None
if otp_ok:
# Per feedback to NEP-053 it was decided to only request second
# factor for password-based logins (not user-linked API keys).
# Hence we don't have to worry about whether this is based on
# an API key.
cred = LoginTwofactorSessionManagerCredentials(auth_data['user'], CURRENT_AAL.level)
await login_fn(app, cred)
else:
# Add a sleep like pam_delay() would add for pam_oath
await asyncio.sleep(random.uniform(1, 2))
await self.middleware.log_audit_message(app, 'AUTHENTICATION', {
'credentials': {
'credentials': 'LOGIN_TWOFACTOR',
'credentials_data': {
'username': auth_data['user']['username'],
},
},
'error': 'One-time token validation failed.'
}, False)
# Give the user a few attempts to recover a fat-fingered OTP cred
if auth_data['cnt'] < MAX_OTP_ATTEMPTS:
auth_data['cnt'] += 1
auth_ctx.auth_data = auth_data
auth_ctx.next_mech = AuthMech.OTP_TOKEN
return {
'response_type': AuthResp.OTP_REQUIRED,
'username': auth_data['user']['username']
}
case AuthMech.TOKEN_PLAIN:
# We've received a authentication token that _should_ have been
# generated by `auth.generate_token`. For consistency with other
# authentication methods a failure delay has been added, but this
# may be removed more safely than for other authentication methods
# since the tokens are short-lived.
token_str = data['token']
token = self.token_manager.get(token_str, app.origin)
if token is None:
await asyncio.sleep(random.uniform(1, 2))
await self.middleware.log_audit_message(app, 'AUTHENTICATION', {
'credentials': {
'credentials': 'TOKEN',
'credentials_data': {
'token': token_str,
}
},
'error': 'Invalid token',
}, False)
return response
if token.attributes:
await asyncio.sleep(random.uniform(1, 2))
await self.middleware.log_audit_message(app, 'AUTHENTICATION', {
'credentials': {
'credentials': 'TOKEN',
'credentials_data': {
'token': token.token,
}
},
'error': 'Bad token',
}, False)
return response
cred = TokenSessionManagerCredentials(self.token_manager, token)
await login_fn(app, cred)
resp = {
'pam_response': {
'code': pam.PAM_SUCCESS,
'reason': None
}
}
case _:
# This shouldn't happen so we'll log it and raise a call error
self.logger.error('%s: unexpected authentication mechanism', mechanism)
raise CallError(f'{mechanism}: unexpected authentication mechanism')
match resp['pam_response']['code']:
case pam.PAM_SUCCESS:
response['response_type'] = AuthResp.SUCCESS
if data['login_options']['user_info']:
response['user_info'] = await self.me(app)
else:
response['user_info'] = None
case pam.PAM_AUTH_ERR | pam.PAM_USER_UNKNOWN:
# We have to squash AUTH_ERR and USER_UNKNOWN into a generic response
# to prevent unauthenticated remote clients from guessing valid usernames.
response['response_type'] = AuthResp.AUTH_ERR
case pam.PAM_ACCT_EXPIRED | pam.PAM_NEW_AUTHTOK_REQD | pam.PAM_CRED_EXPIRED:
response['response_type'] = AuthResp.EXPIRED.name
case _:
# This is unexpected and so we should generate a debug message
# so that we can better handle in the future.
self.logger.debug(
'%s: unexpected response code [%d] to authentication request',
mechanism, resp['pam_response']['code']
)
response['response_type'] = AuthResp.AUTH_ERR
return response
@private
@pass_app()
async def get_login_user(self, app, username, password, mechanism):
"""
This is a private endpoint that performs the actual validation of username/password
combination and returns user information and whether additional OTP is required.
"""
otp_required = False
resp = await self.middleware.call(
'auth.authenticate_plain',
username, password,
mechanism == AuthMech.API_KEY_PLAIN,
app=app
)
if mechanism == AuthMech.PASSWORD_PLAIN and resp['pam_response']['code'] == pam.PAM_SUCCESS:
twofactor_auth = await self.middleware.call('auth.twofactor.config')
if twofactor_auth['enabled'] and '2FA' in resp['user_data']['account_attributes']:
otp_required = True
return resp | {'otp_required': otp_required}
@cli_private
@no_auth_required
@api_method(AuthLegacyApiKeyLoginArgs, AuthLegacyResult)
@pass_app()
async def login_with_api_key(self, app, api_key):
"""
Authenticate session using API Key.
"""
try:
key_id = int(api_key.split('-')[0])
key_entry = await self.middleware.call('api_key.query', [['id', '=', key_id]])
except Exception:
key_entry = None
if not key_entry:
await asyncio.sleep(random.uniform(1, 2))
await self.middleware.log_audit_message(app, 'AUTHENTICATION', {
'credentials': {
'credentials': 'API_KEY',
'credentials_data': {
'username': None,
'api_key': api_key,
}
},
'error': 'Invalid API key'
}, False)
return False
resp = await self.login_ex(app, {
'mechanism': AuthMech.API_KEY_PLAIN,
'username': key_entry[0]['username'],
'api_key': api_key,
'login_options': {'user_info': False},
})
return resp['response_type'] == AuthResp.SUCCESS
@cli_private
@no_auth_required
@api_method(AuthLegacyTokenLoginArgs, AuthLegacyResult)
@pass_app()
async def login_with_token(self, app, token_str):
"""
Authenticate session using token generated with `auth.generate_token`.
"""
resp = await self.login_ex(app, {
'mechanism': AuthMech.TOKEN_PLAIN,
'token': token_str,
'login_options': {'user_info': False},
})
return resp['response_type'] == AuthResp.SUCCESS
@cli_private
@accepts()
@returns(Bool('successful_logout'))
@pass_app()
async def logout(self, app):
"""
Deauthenticates an app and if a token exists, removes that from the
session.
"""
await self.session_manager.logout(app)
return True
@no_authz_required
@api_method(AuthMeArgs, AuthMeResult)
@pass_app()
async def me(self, app):
"""
Returns currently logged-in user.
"""
user = await self._me(app)
if attr := await self._attributes(user):
attributes = attr['attributes']
else:
attributes = {}
try:
twofactor_config = await self.middleware.call('user.twofactor_config', user['pw_name'])
except Exception:
self.logger.error('%s: failed to look up 2fa details', exc_info=True)
twofactor_config = None
return {**user, 'attributes': attributes, 'two_factor_config': twofactor_config}
@no_authz_required
@accepts(
Str('key'),
Any('value'),
)
@returns()
@pass_app()
async def set_attribute(self, app, key, value):
"""
Set current user's `attributes` dictionary `key` to `value`.
e.g. Setting key="foo" value="var" will result in {"attributes": {"foo": "bar"}}
"""
user = await self._me(app)
async with self._attributes_lock:
if attrs := await self._attributes(user):
await self.middleware.call('datastore.update', 'account.bsdusers_webui_attribute', attrs['id'],
{'attributes': {**attrs['attributes'], key: value}})
else:
await self.middleware.call('datastore.insert', 'account.bsdusers_webui_attribute', {
'uid': user['pw_uid'],
'attributes': {key: value},
})
_attributes_lock = asyncio.Lock()
async def _me(self, app):
credentials = app.authenticated_credentials
if isinstance(credentials, TokenSessionManagerCredentials):
if root_credentials := credentials.token.root_credentials():
credentials = root_credentials
if not isinstance(credentials, UserSessionManagerCredentials):
raise CallError(f'You are logged in using {credentials.class_name()}')
username = credentials.user['username']
return {
**(await self.middleware.call('user.get_user_obj', {'username': username})),
'privilege': credentials.user['privilege'],
'account_attributes': credentials.user['account_attributes']
}
async def _attributes(self, user):
try:
return await self.middleware.call('datastore.query', 'account.bsdusers_webui_attribute',
[['uid', '=', user['pw_uid']]], {'get': True})
except MatchNotFound:
return None
async def check_permission(middleware, app):
"""Authenticates connections coming from loopback and from root user."""
origin = app.origin
if origin is None:
return
if origin.is_unix_family:
if origin.uid == 0:
user = await middleware.call('auth.authenticate_root')
else:
try:
user_info = (await middleware.call(
'datastore.query',
'account.bsdusers',
[['uid', '=', origin.uid]],
{'get': True, 'prefix': 'bsdusr_', 'select': ['id', 'uid', 'username']},
)) | {'local': True}
query = {'username': user_info.pop('username')}
except MatchNotFound:
query = {'uid': origin.uid}
user_info = {'id': None, 'uid': None, 'local': False}
user = await middleware.call('auth.authenticate_user', query, user_info, False)
if user is None:
return
await AuthService.session_manager.login(app, UnixSocketSessionManagerCredentials(user))
def setup(middleware):
middleware.event_register('auth.sessions', 'Notification of new and removed sessions.')
middleware.register_hook('core.on_connect', check_permission)
| 41,486 | Python | .py | 894 | 32.940716 | 114 | 0.570973 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,485 | cache.py | truenas_middleware/src/middlewared/middlewared/plugins/cache.py | from middlewared.schema import Any, Str, Int, accepts
from middlewared.service import Service, private
from collections import namedtuple
import time
class CacheService(Service):
class Config:
private = True
def __init__(self, *args, **kwargs):
super(CacheService, self).__init__(*args, **kwargs)
self.__cache = {}
self.kv_tuple = namedtuple('Cache', ['value', 'timeout'])
@accepts(Str('key'))
def has_key(self, key):
"""
Check if given `key` is in cache.
"""
return key in self.__cache
@accepts(Str('key'))
def get(self, key):
"""
Get `key` from cache.
Raises:
KeyError: not found in the cache
"""
if self.__cache[key].timeout > 0:
self.get_timeout(key)
return self.__cache[key].value
@accepts(Str('key'), Any('value'), Int('timeout', default=0))
def put(self, key, value, timeout):
"""
Put `key` of `value` in the cache.
"""
if timeout != 0:
timeout = time.monotonic() + timeout
v = self.kv_tuple(value=value, timeout=timeout)
self.__cache[key] = v
@accepts(Str('key'))
def pop(self, key):
"""
Removes and returns `key` from cache.
"""
cache = self.__cache.pop(key, None)
if cache is not None:
cache = cache.value
return cache
@private
def get_timeout(self, key):
"""
Check if 'key' has expired
"""
now = time.monotonic()
value, timeout = self.__cache[key]
if now >= timeout:
# Bust the cache
del self.__cache[key]
raise KeyError(f'{key} has expired')
@private
def get_or_put(self, key, timeout, method):
try:
return self.get(key)
except KeyError:
value = method()
self.put(key, value, timeout)
return value
| 1,990 | Python | .py | 64 | 22.59375 | 65 | 0.548269 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,486 | activedirectory.py | truenas_middleware/src/middlewared/middlewared/plugins/activedirectory.py | import errno
import json
import ipaddress
import os
import contextlib
from middlewared.plugins.smb import SMBCmd
from middlewared.plugins.kerberos import krb5ccache
from middlewared.schema import (
accepts, Bool, Dict, Int, IPAddr, LDAP_DN, List, NetbiosName, Ref, returns, Str
)
from middlewared.service import job, private, ConfigService, ValidationError, ValidationErrors
from middlewared.service_exception import CallError, MatchNotFound
import middlewared.sqlalchemy as sa
from middlewared.utils import run
from middlewared.utils.directoryservices.constants import DomainJoinResponse, DSStatus, DSType
from middlewared.utils.directoryservices.krb5_error import KRB5ErrCode, KRB5Error
from middlewared.validators import Range
class ActiveDirectoryModel(sa.Model):
__tablename__ = 'directoryservice_activedirectory'
id = sa.Column(sa.Integer(), primary_key=True)
ad_domainname = sa.Column(sa.String(120))
ad_bindname = sa.Column(sa.String(120))
ad_verbose_logging = sa.Column(sa.Boolean())
ad_allow_trusted_doms = sa.Column(sa.Boolean())
ad_use_default_domain = sa.Column(sa.Boolean())
ad_allow_dns_updates = sa.Column(sa.Boolean())
ad_disable_freenas_cache = sa.Column(sa.Boolean())
ad_restrict_pam = sa.Column(sa.Boolean())
ad_site = sa.Column(sa.String(120), nullable=True)
ad_timeout = sa.Column(sa.Integer())
ad_dns_timeout = sa.Column(sa.Integer())
ad_nss_info = sa.Column(sa.String(120), nullable=True)
ad_enable = sa.Column(sa.Boolean())
ad_kerberos_realm_id = sa.Column(sa.ForeignKey('directoryservice_kerberosrealm.id', ondelete='SET NULL'),
index=True, nullable=True)
ad_kerberos_principal = sa.Column(sa.String(255))
ad_createcomputer = sa.Column(sa.String(255))
class ActiveDirectoryService(ConfigService):
class Config:
service = "activedirectory"
datastore = 'directoryservice.activedirectory'
datastore_extend = "activedirectory.ad_extend"
datastore_prefix = "ad_"
cli_namespace = "directory_service.activedirectory"
role_prefix = "DIRECTORY_SERVICE"
ENTRY = Dict(
'activedirectory_update',
Str('domainname', required=True),
Str('bindname'),
Str('bindpw', private=True),
Bool('verbose_logging'),
Bool('use_default_domain'),
Bool('allow_trusted_doms'),
Bool('allow_dns_updates'),
Bool('disable_freenas_cache'),
Bool('restrict_pam', default=False),
Str('site', null=True),
Int('kerberos_realm', null=True),
Str('kerberos_principal', null=True),
Int('timeout', default=60),
Int('dns_timeout', default=10, validators=[Range(min_=5, max_=40)]),
Str('nss_info', null=True, enum=['TEMPLATE', 'SFU', 'SFU20', 'RFC2307']),
Str('createcomputer'),
NetbiosName('netbiosname'),
NetbiosName('netbiosname_b'),
List('netbiosalias', items=[NetbiosName('alias')]),
Bool('enable'),
register=True
)
@private
async def ad_extend(self, ad):
smb = await self.middleware.call('smb.config')
ad.update({
'netbiosname': smb['netbiosname_local'],
'netbiosalias': smb['netbiosalias']
})
if ad.get('nss_info'):
ad['nss_info'] = ad['nss_info'].upper()
else:
ad['nss_info'] = 'TEMPLATE'
if ad.get('kerberos_realm') and type(ad['kerberos_realm']) is dict:
ad['kerberos_realm'] = ad['kerberos_realm']['id']
return ad
@private
async def ad_compress(self, ad):
"""
Convert kerberos realm to id. Force domain to upper-case. Remove
foreign entries.
kinit will fail if domain name is lower-case.
"""
for key in ['netbiosname', 'netbiosname_b', 'netbiosalias', 'bindpw']:
if key in ad:
ad.pop(key)
if ad.get('nss_info'):
ad['nss_info'] = ad['nss_info'].upper()
return ad
@accepts()
@returns(Ref('nss_info_ad'))
async def nss_info_choices(self):
"""
Returns list of available LDAP schema choices.
"""
return await self.middleware.call('directoryservices.nss_info_choices', 'ACTIVEDIRECTORY')
@private
async def update_netbios_data(self, old, new):
must_update = False
for key in ['netbiosname', 'netbiosalias']:
if key in new and old[key] != new[key]:
if old['enable']:
raise ValidationError(
f'activedirectory.{key}',
'NetBIOS names may not be changed while service is enabled.'
)
must_update = True
break
if not must_update:
return
await self.middleware.call('smb.update', {
'netbiosname': new['netbiosname'],
'netbiosalias': new['netbiosalias']
})
@private
async def common_validate(self, new, old, verrors):
if new['enable']:
try:
if not (await self.middleware.call(
'activedirectory.netbiosname_is_ours',
new['netbiosname'], new['domainname'], new['dns_timeout'])
):
verrors.add(
'activedirectory_update.netbiosname',
f'NetBIOS name [{new["netbiosname"]}] appears to be in use by another computer in Active Directory DNS. '
'Further investigation and DNS corrections will be required prior to using the aforementioned name to '
'join Active Directory.'
)
except CallError:
pass
if new['kerberos_realm'] and new['kerberos_realm'] != old['kerberos_realm']:
realm = await self.middleware.call('kerberos.realm.query', [("id", "=", new['kerberos_realm'])])
if not realm:
verrors.add(
'activedirectory_update.kerberos_realm',
'Invalid Kerberos realm id. Realm does not exist.'
)
if not new["enable"]:
return
if not await self.middleware.call('pool.query', [], {'count': True}):
verrors.add(
"activedirectory_update.enable",
"Active Directory service may not be enabled before data pool is created."
)
ldap_enabled = (await self.middleware.call('ldap.config'))['enable']
if ldap_enabled:
verrors.add(
"activedirectory_update.enable",
"Active Directory service may not be enabled while LDAP service is enabled."
)
if new["enable"] and old["enable"] and new["kerberos_realm"] != old["kerberos_realm"]:
verrors.add(
"activedirectory_update.kerberos_realm",
"Kerberos realm may not be altered while the AD service is enabled. "
"This is to avoid introducing possible configuration errors that may result "
"in a production outage."
)
if not new.get("bindpw") and not new["kerberos_principal"]:
verrors.add(
"activedirectory_update.bindname",
"Bind credentials or kerberos keytab are required to join an AD domain."
)
if new.get("bindpw") and new["kerberos_principal"]:
verrors.add(
"activedirectory_update.kerberos_principal",
"Simultaneous keytab and password authentication are not permitted."
)
if not new["domainname"]:
verrors.add(
"activedirectory_update.domainname",
"AD domain name is required."
)
if new['allow_dns_updates']:
ha_mode = await self.middleware.call('smb.get_smb_ha_mode')
if ha_mode == 'UNIFIED':
if await self.middleware.call('failover.status') != 'MASTER':
return
smb = await self.middleware.call('smb.config')
addresses = await self.middleware.call(
'activedirectory.get_ipaddresses', new, smb, ha_mode
)
if not addresses:
verrors.add(
'activedirectory_update.allow_dns_updates',
'No server IP addresses passed DNS validation. '
'This may indicate an improperly configured reverse zone. '
'Review middleware log files for details regarding errors encountered.',
)
for a in addresses:
addr = ipaddress.ip_address(a)
if addr.is_reserved:
verrors.add(
'activedirectory_update.allow_dns_updates',
f'{addr}: automatic DNS update would result in registering a reserved '
'IP address. Users may disable automatic DNS updates and manually '
'configure DNS A and AAAA records as needed for their domain.'
)
if addr.is_loopback:
verrors.add(
'activedirectory_update.allow_dns_updates',
f'{addr}: automatic DNS update would result in registering a loopback '
'address. Users may disable automatic DNS updates and manually '
'configure DNS A and AAAA records as needed for their domain.'
)
if addr.is_link_local:
verrors.add(
'activedirectory_update.allow_dns_updates',
f'{addr}: automatic DNS update would result in registering a link-local '
'address. Users may disable automatic DNS updates and manually '
'configure DNS A and AAAA records as needed for their domain.'
)
if addr.is_multicast:
verrors.add(
'activedirectory_update.allow_dns_updates',
f'{addr}: automatic DNS update would result in registering a multicast '
'address. Users may disable automatic DNS updates and manually '
'configure DNS A and AAAA records as needed for their domain.'
)
@accepts(Ref('activedirectory_update'), audit='Active directory configuration update')
@returns(Ref('activedirectory_update'))
@job(lock="AD_start_stop")
async def do_update(self, job, data):
"""
Update active directory configuration.
`domainname` full DNS domain name of the Active Directory domain.
`bindname` username used to perform the intial domain join.
`bindpw` password used to perform the initial domain join. User-
provided credentials are used to obtain a kerberos ticket, which
is used to perform the actual domain join.
`verbose_logging` increase logging during the domain join process.
`use_default_domain` controls whether domain users and groups have
the pre-windows 2000 domain name prepended to the user account. When
enabled, the user appears as "administrator" rather than
"EXAMPLE\administrator"
`allow_trusted_doms` enable support for trusted domains. If this
parameter is enabled, then separate idmap backends _must_ be configured
for each trusted domain, and the idmap cache should be cleared.
`allow_dns_updates` during the domain join process, automatically
generate DNS entries in the AD domain for the NAS. If this is disabled,
then a domain administrator must manually add appropriate DNS entries
for the NAS. This parameter is recommended for TrueNAS HA servers.
`disable_freenas_cache` disables active caching of AD users and groups.
When disabled, only users cached in winbind's internal cache are
visible in GUI dropdowns. Disabling active caching is recommended
in environments with a large amount of users.
`site` AD site of which the NAS is a member. This parameter is auto-
detected during the domain join process. If no AD site is configured
for the subnet in which the NAS is configured, then this parameter
appears as 'Default-First-Site-Name'. Auto-detection is only performed
during the initial domain join.
`kerberos_realm` in which the server is located. This parameter is
automatically populated during the initial domain join. If the NAS has
an AD site configured and that site has multiple kerberos servers, then
the kerberos realm is automatically updated with a site-specific
configuration to use those servers. Auto-detection is only performed
during initial domain join.
`kerberos_principal` kerberos principal to use for AD-related
operations outside of Samba. After intial domain join, this field is
updated with the kerberos principal associated with the AD machine
account for the NAS.
`nss_info` controls how Winbind retrieves Name Service Information to
construct a user's home directory and login shell. This parameter
is only effective if the Active Directory Domain Controller supports
the Microsoft Services for Unix (SFU) LDAP schema.
`timeout` timeout value for winbind-related operations. This value may
need to be increased in environments with high latencies for
communications with domain controllers or a large number of domain
controllers. Lowering the value may cause status checks to fail.
`dns_timeout` timeout value for DNS queries during the initial domain
join. This value is also set as the NETWORK_TIMEOUT in the ldap config
file.
`createcomputer` Active Directory Organizational Unit in which new
computer accounts are created.
The OU string is read from top to bottom without RDNs. Slashes ("/")
are used as delimiters, like `Computers/Servers/NAS`. The backslash
("\\") is used to escape characters but not as a separator. Backslashes
are interpreted at multiple levels and might require doubling or even
quadrupling to take effect.
When this field is blank, new computer accounts are created in the
Active Directory default OU.
The Active Directory service is started after a configuration
update if the service was initially disabled, and the updated
configuration sets `enable` to `True`. The Active Directory
service is stopped if `enable` is changed to `False`. If the
configuration is updated, but the initial `enable` state is `True`, and
remains unchanged, then the samba server is only restarted.
During the domain join, a kerberos keytab for the newly-created AD
machine account is generated. It is used for all future
LDAP / AD interaction and the user-provided credentials are removed.
"""
verrors = ValidationErrors()
old = await self.config()
new = old.copy()
new.update(data)
new['domainname'] = new['domainname'].upper()
try:
await self.update_netbios_data(old, new)
except Exception as e:
raise ValidationError('activedirectory_update.netbiosname', str(e))
await self.common_validate(new, old, verrors)
verrors.check()
if new['enable']:
if new['allow_trusted_doms'] and not await self.middleware.call('idmap.may_enable_trusted_domains'):
raise ValidationError(
'activedirectory.allow_trusted_doms',
'Configuration for trusted domains requires that the idmap backend '
'be configured to handle these domains. There are two possible strategies to '
'achieve this. The first strategy is to use the AUTORID backend for the domain '
'to which TrueNAS is joined. The second strategy is to separately configure idmap '
'ranges for every domain that has a trust relationship with the domain to which '
'TrueNAS is joined and which has accounts that will be used on the TrueNAS server. '
'NOTE: the topic of how to properly map Windows SIDs to Unix IDs is complex and '
'may require consultation with administrators of other Unix servers in the '
'Active Directory domain to properly coordinate a comprehensive ID mapping strategy.'
)
if await self.middleware.call('failover.licensed'):
if await self.middleware.call('systemdataset.is_boot_pool'):
raise ValidationError(
'activedirectory.enable',
'Active Directory may not be enabled while '
'system dataset is on the boot pool'
)
if new['enable'] and old['enable']:
permitted_keys = [
'verbose_logging',
'use_default_domain',
'allow_trusted_doms',
'disable_freenas_cache',
'restrict_pam',
'timeout',
'dns_timeout'
]
for entry in old.keys():
if entry not in new or entry in permitted_keys:
continue
if new[entry] != old[entry]:
raise ValidationError(
f'activedirectory.{entry}',
'Parameter may not be changed while the Active Directory service is enabled.'
)
elif new['enable'] and not old['enable']:
"""
Currently run two health checks prior to validating domain.
1) Attempt to kinit with user-provided credentials. This is used to
verify that the credentials are correct.
2) Check for an overly large time offset. System kerberos libraries
may not report the time offset as an error during kinit, but the large
time offset will prevent libads from using the ticket for the domain
join.
"""
try:
domain_info = await self.domain_info(new['domainname'])
except CallError as e:
raise ValidationError('activedirectory.domainname', e.errmsg)
if abs(domain_info['Server time offset']) > 180:
raise ValidationError(
'activedirectory.domainname',
'Time offset from Active Directory domain exceeds maximum '
'permitted value. This may indicate an NTP misconfiguration.'
)
try:
await self.middleware.call(
'activedirectory.check_nameservers',
new['domainname'],
new['site'],
new['dns_timeout']
)
except CallError as e:
raise ValidationError(
'activedirectory.domainname',
e.errmsg
)
try:
await self.validate_credentials(new, domain_info['KDC server'])
except KRB5Error as e:
# initially assume the validation error will be
# about the actual password used
if new['kerberos_principal']:
key = 'activedirectory.kerberos_principal'
else:
key = 'activedirectory.bindpw'
match e.krb5_code:
case KRB5ErrCode.KRB5_LIBOS_CANTREADPWD:
if key == 'activedirectory.kerberos_principal':
msg = 'Kerberos keytab is no longer valid.'
else:
msg = f'Active Directory account password for user {new["bindname"]} is expired.'
case KRB5ErrCode.KRB5KDC_ERR_CLIENT_REVOKED:
msg = 'Active Directory account is locked.'
case KRB5ErrCode.KRB5_CC_NOTFOUND:
if key == 'activedirectory.kerberos_principal':
# When we kinit we try to regenerate keytab if the principal
# isn't present in it. If we hit this point it means that user
# has been tweaking the system-managed keytab in interesting ways.
choices = await self.middleware.call(
'kerberos.keytab.kerberos_principal_choices'
)
msg = (
'System keytab lacks an entry for the specified kerberos principal. '
f'Please select a valid kerberos principal from available choices: {", ".join(choices)}'
)
else:
# This error shouldn't occur if we're trying to get ticket
# with username + password combination
msg = str(e)
case KRB5ErrCode.KRB5KDC_ERR_POLICY:
msg = (
'Active Directory security policy rejected request to obtain kerberos ticket. '
'This may occur if the bind account has been configured to deny interactive '
'logons or require two-factor authentication. Depending on organizational '
'security policies, one may be required to pre-generate a kerberos keytab '
'and upload to TrueNAS server for use during join process.'
)
case KRB5ErrCode.KRB5KDC_ERR_C_PRINCIPAL_UNKNOWN:
# We're dealing with a missing account
if key == "activedirectory.bindpw":
key = "activedirectory.bindname"
msg = (
'Client\'s credentials were not found on remote domain controller. The most '
'common reasons for the domain controller to return this response is due to a '
'typo in the service account name or the service or the computer account being '
'deleted from Active Directory.'
)
case KRB5ErrCode.KRB5KRB_AP_ERR_SKEW:
# Domain permitted clock skew may be more restrictive than our basic
# check of no greater than 3 minutes.
key = 'activedirectory.domainname'
msg = (
'The time offset between the TrueNAS server and the active directory domain '
'controller exceeds the maximum value permitted by the Active Directory '
'configuration. This may occur if NTP is improperly configured on the '
'TrueNAS server or if the hardware clock on the TrueNAS server is configured '
'for a local timezone instead of UTC.'
)
case KRB5ErrCode.KRB5KDC_ERR_PREAUTH_FAILED:
if new['kerberos_principal']:
msg = (
'Kerberos principal credentials are no longer valid. Rejoining active directory '
'may be required.'
)
else:
msg = 'Preauthentication failed. This typically indicates an incorrect bind password.'
case _:
# Catchall for more kerberos errors. We can expand if needed.
msg = str(e)
raise ValidationError(key, msg)
except CallError as e:
# This may be an encapsulated GSSAPI library error
if e.errno == errno.EINVAL:
# special errno set if GSSAPI BadName exception raised
if new['kerberos_principal']:
raise ValidationError('activedirectory.kerberos_principal', 'Not a valid principal name')
else:
raise ValidationError('activedirectory.bindname', 'Not a valid username')
# No meaningful way to convert into a ValidationError, simply re-raise
raise e from None
elif not new['enable'] and new.get('bindpw'):
raise ValidationError(
'activedirectory.bindpw',
'The Active Directory bind password is only used when enabling the active '
'directory service for the first time and is not stored persistently. Therefore it '
'is only valid when enabling the service.'
)
config = await self.ad_compress(new)
await self.middleware.call('datastore.update', self._config.datastore, new['id'], config, {'prefix': 'ad_'})
await self.middleware.call('etc.generate', 'smb')
if not old['enable'] and new['enable']:
ngc = await self.middleware.call('network.configuration.config')
if not ngc['domain'] or ngc['domain'] == 'local':
try:
await self.middleware.call(
'network.configuration.update',
{'domain': new['domainname']}
)
except CallError:
self.logger.warning(
'Failed to update domain name in network configuration '
'to match active directory value of %s', new['domainname'], exc_info=True
)
if not await self.middleware.call(
'kerberos.check_ticket',
{'ccache': krb5ccache.SYSTEM.name},
False
):
await self.middleware.call('kerberos.start')
try:
await self.__start(job)
except Exception as e:
self.logger.error('Failed to start active directory service. Disabling.')
await self.middleware.call(
'directoryservices.health.set_state',
DSType.AD.value, DSStatus.DISABLED.name
)
await self.middleware.call(
'datastore.update', self._config.datastore, new['id'],
{'enable': False}, {'prefix': 'ad_'}
)
raise e
elif not new['enable'] and old['enable']:
await self.__stop(job, new)
elif new['enable'] and old['enable']:
await self.middleware.call('service.restart', 'idmap')
return await self.config()
@private
async def remove_privileges(self, domain_name):
"""
Remove any auto-granted domain privileges
"""
existing_privileges = await self.middleware.call(
'privilege.query',
[["name", "=", domain_name]]
)
if not existing_privileges:
return
await self.middleware.call('privilege.delete', existing_privileges[0]['id'])
async def __start(self, job):
"""
Start AD service. In 'UNIFIED' HA configuration, only start AD service
on active storage controller.
"""
await self.middleware.call('directoryservices.health.set_state', DSType.AD.value, DSStatus.JOINING.name)
ad = await self.config()
join_resp = await job.wrap(await self.middleware.call(
'directoryservices.connection.join_domain', DSType.AD.value, ad['domainname']
))
await self.middleware.call('directoryservices.health.set_state', DSType.AD.value, DSStatus.HEALTHY.name)
cache_job_id = await self.middleware.call('directoryservices.connection.activate')
await job.wrap(await self.middleware.call('core.job_wait', cache_job_id))
if DomainJoinResponse(join_resp) is DomainJoinResponse.PERFORMED_JOIN:
await self.set_ntp_servers()
await self.middleware.call('directoryservices.connection.grant_privileges', DSType.AD.value, ad['domainname'])
await self.middleware.call('directoryservices.restart_dependent_services')
async def __stop(self, job, config):
job.set_progress(0, 'Preparing to stop Active Directory service')
await self.middleware.call(
'datastore.update', self._config.datastore,
config['id'], {'ad_enable': False}
)
await self.middleware.call('etc.generate', 'hostname')
job.set_progress(10, 'Stopping kerberos service')
await self.middleware.call('kerberos.stop')
job.set_progress(20, 'Reconfiguring SMB.')
await self.middleware.call('service.stop', 'cifs')
await self.middleware.call('service.restart', 'idmap')
job.set_progress(40, 'Reconfiguring pam and nss.')
await self.middleware.call('etc.generate', 'pam')
await self.middleware.call('etc.generate', 'nss')
await self.middleware.call('directoryservices.health.set_state', DSType.AD.value, DSStatus.DISABLED.name)
job.set_progress(60, 'clearing caches.')
await self.middleware.call('directoryservices.cache.abort_refresh')
await self.middleware.call('service.start', 'cifs')
job.set_progress(100, 'Active Directory stop completed.')
@private
async def validate_credentials(self, ad=None, kdc=None):
"""
Kinit with user-provided credentials is sufficient to determine
whether the credentials are good. A testbind here is unnecessary.
"""
if await self.middleware.call(
'kerberos.check_ticket',
{'ccache': krb5ccache.SYSTEM.name},
False
):
# Short-circuit credential validation if we have a valid tgt
return
ad = ad or await self.config()
payload = {
'dstype': DSType.AD.value,
'conf': {
'bindname': ad.get('bindname', ''),
'bindpw': ad.get('bindpw', ''),
'domainname': ad['domainname'],
'kerberos_principal': ad['kerberos_principal'],
}
}
cred = await self.middleware.call('kerberos.get_cred', payload)
await self.middleware.call('kerberos.do_kinit', {
'krb5_cred': cred,
'kinit-options': {'kdc_override': {'domain': ad['domainname'], 'kdc': kdc}},
})
return
@accepts(Str('domain', default=''), roles=['DIRECTORY_SERVICE_READ'])
@returns(Dict(
IPAddr('LDAP server'),
Str('LDAP server name'),
Str('Realm'),
LDAP_DN('Bind Path'),
Int('LDAP port'),
Int('Server time'),
IPAddr('KDC server'),
Int('Server time offset'),
Int('Last machine account password change')
))
async def domain_info(self, domain):
"""
Returns the following information about the currently joined domain:
`LDAP server` IP address of current LDAP server to which TrueNAS is connected.
`LDAP server name` DNS name of LDAP server to which TrueNAS is connected
`Realm` Kerberos realm
`LDAP port`
`Server time` timestamp.
`KDC server` Kerberos KDC to which TrueNAS is connected
`Server time offset` current time offset from DC.
`Last machine account password change`. timestamp
"""
if domain:
cmd = [SMBCmd.NET.value, '-S', domain, '--json', '--option', f'realm={domain}', 'ads', 'info']
else:
cmd = [SMBCmd.NET.value, '--json', 'ads', 'info']
netads = await self.cache_flush_retry(cmd)
if netads.returncode != 0:
err_msg = netads.stderr.decode().strip()
if err_msg == "Didn't find the ldap server!":
raise CallError(
'Failed to discover Active Directory Domain Controller '
'for domain. This may indicate a DNS misconfiguration.',
errno.ENOENT
)
raise CallError(netads.stderr.decode())
return json.loads(netads.stdout.decode())
@private
async def set_ntp_servers(self):
"""
Appropriate time sources are a requirement for an AD environment. By default kerberos authentication
fails if there is more than a 5 minute time difference between the AD domain and the member server.
"""
ntp_servers = await self.middleware.call('system.ntpserver.query')
ntp_pool = 'debian.pool.ntp.org'
default_ntp_servers = list(filter(lambda x: ntp_pool in x['address'], ntp_servers))
if len(ntp_servers) != 3 or len(default_ntp_servers) != 3:
return
try:
dc_info = await self.lookup_dc()
except CallError:
self.logger.warning("Failed to automatically set time source.", exc_info=True)
return
if not dc_info['Flags']['Is running time services']:
return
dc_name = dc_info["Information for Domain Controller"]
try:
await self.middleware.call('system.ntpserver.create', {'address': dc_name, 'prefer': True})
except Exception:
self.logger.warning('Failed to configure NTP for the Active Directory domain. Additional '
'manual configuration may be required to ensure consistent time offset, '
'which is required for a stable domain join.', exc_info=True)
return
@private
async def cache_flush_retry(self, cmd, retry=True):
rv = await run(cmd, check=False)
if rv.returncode != 0 and retry:
await self.middleware.call('idmap.gencache.flush')
return await self.cache_flush_retry(cmd, False)
return rv
@private
async def lookup_dc(self, domain=None):
if domain is None:
domain = (await self.config())['domainname']
lookup = await self.cache_flush_retry([SMBCmd.NET.value, '--json', '-S', domain, '--realm', domain, 'ads', 'lookup'])
if lookup.returncode != 0:
raise CallError("Failed to look up Domain Controller information: "
f"{lookup.stderr.decode().strip()}")
out = json.loads(lookup.stdout.decode())
return out
@accepts(Ref('kerberos_username_password'), roles=['DIRECTORY_SERVICE_WRITE'], audit='Active directory leave')
@returns()
@job(lock="AD_start_stop")
async def leave(self, job, data):
"""
Leave Active Directory domain. This will remove computer
object from AD and clear relevant configuration data from
the NAS.
This requires credentials for appropriately-privileged user.
Credentials are used to obtain a kerberos ticket, which is
used to perform the actual removal from the domain.
"""
ad = await self.config()
if not ad['domainname']:
raise CallError('Active Directory domain name present in configuration.')
ad['bindname'] = data.get("username", "")
ad['bindpw'] = data.get("password", "")
ad['kerberos_principal'] = ''
payload = {
'dstype': DSType.AD.value,
'conf': {
'bindname': data.get('username', ''),
'bindpw': data.get('password', ''),
'domainname': ad['domainname'],
'kerberos_principal': '',
}
}
try:
await self.remove_privileges(ad['domainname'])
except Exception:
self.logger.warning('Failed to remove Domain Admins privileges', exc_info=True)
job.set_progress(5, 'Obtaining kerberos ticket for privileged user.')
cred = await self.middleware.call('kerberos.get_cred', payload)
await self.middleware.call('kerberos.do_kinit', {'krb5_cred': cred})
job.set_progress(10, 'Leaving Active Directory domain.')
await job.wrap(await self.middleware.call('directoryservices.connection.leave_domain', DSType.AD.value, ad['domainname']))
job.set_progress(15, 'Removing DNS entries')
await self.middleware.call('activedirectory.unregister_dns', ad)
job.set_progress(20, 'Removing kerberos keytab and realm.')
krb_princ = await self.middleware.call(
'kerberos.keytab.query',
[('name', '=', 'AD_MACHINE_ACCOUNT')]
)
if krb_princ:
await self.middleware.call(
'datastore.delete', 'directoryservice.kerberoskeytab', krb_princ[0]['id']
)
if ad['kerberos_realm']:
try:
await self.middleware.call(
'datastore.delete', 'directoryservice.kerberosrealm', ad['kerberos_realm']
)
except MatchNotFound:
pass
try:
await self.middleware.call("directoryservices.secrets.backup")
except Exception:
self.logger.debug("Failed to remove stale secrets entries.", exc_info=True)
job.set_progress(30, 'Clearing local Active Directory settings.')
payload = {
'enable': False,
'site': None,
'bindname': '',
'kerberos_realm': None,
'kerberos_principal': '',
'domainname': '',
}
await self.middleware.call(
'datastore.update', self._config.datastore,
ad['id'], payload, {'prefix': 'ad_'}
)
await self.middleware.call('directoryservices.health.set_state', DSType.AD.value, DSStatus.DISABLED.name)
job.set_progress(40, 'Flushing caches.')
try:
await self.middleware.call('idmap.gencache.flush')
except Exception:
self.logger.warning("Failed to flush cache after leaving Active Directory.", exc_info=True)
with contextlib.suppress(FileNotFoundError):
os.unlink('/etc/krb5.keytab')
job.set_progress(50, 'Clearing kerberos configuration and ticket.')
await self.middleware.call('kerberos.stop')
job.set_progress(60, 'Regenerating configuration.')
await self.middleware.call('etc.generate', 'pam')
await self.middleware.call('etc.generate', 'nss')
await self.middleware.call('etc.generate', 'smb')
job.set_progress(60, 'Restarting services.')
await self.middleware.call('service.restart', 'cifs')
await self.middleware.call('service.restart', 'idmap')
job.set_progress(100, 'Successfully left activedirectory domain.')
return
| 39,346 | Python | .py | 760 | 38.123684 | 130 | 0.59127 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,487 | snapshot.py | truenas_middleware/src/middlewared/middlewared/plugins/snapshot.py | from datetime import datetime, time, timedelta
import os
from middlewared.common.attachment import FSAttachmentDelegate
from middlewared.schema import accepts, returns, Bool, Cron, Dataset, Dict, Int, List, Patch, Str
from middlewared.service import CallError, CRUDService, item_method, private, ValidationErrors
import middlewared.sqlalchemy as sa
from middlewared.utils.cron import croniter_for_schedule
from middlewared.utils.path import is_child
from middlewared.validators import ReplicationSnapshotNamingSchema
class PeriodicSnapshotTaskModel(sa.Model):
__tablename__ = 'storage_task'
id = sa.Column(sa.Integer(), primary_key=True)
task_dataset = sa.Column(sa.String(150))
task_recursive = sa.Column(sa.Boolean(), default=False)
task_lifetime_value = sa.Column(sa.Integer(), default=2)
task_lifetime_unit = sa.Column(sa.String(120), default='WEEK')
task_begin = sa.Column(sa.Time(), default=time(hour=9))
task_end = sa.Column(sa.Time(), default=time(hour=18))
task_enabled = sa.Column(sa.Boolean(), default=True)
task_exclude = sa.Column(sa.JSON(list))
task_naming_schema = sa.Column(sa.String(150), default='auto-%Y-%m-%d_%H-%M')
task_minute = sa.Column(sa.String(100), default="00")
task_hour = sa.Column(sa.String(100), default="*")
task_daymonth = sa.Column(sa.String(100), default="*")
task_month = sa.Column(sa.String(100), default='*')
task_dayweek = sa.Column(sa.String(100), default="*")
task_allow_empty = sa.Column(sa.Boolean(), default=True)
class PeriodicSnapshotTaskService(CRUDService):
class Config:
datastore = 'storage.task'
datastore_prefix = 'task_'
datastore_extend = 'pool.snapshottask.extend'
datastore_extend_context = 'pool.snapshottask.extend_context'
namespace = 'pool.snapshottask'
cli_namespace = 'task.snapshot'
@private
async def extend_context(self, rows, extra):
return {
'state': await self.middleware.call('zettarepl.get_state'),
'vmware': await self.middleware.call('vmware.query'),
}
@private
async def extend(self, data, context):
Cron.convert_db_format_to_schedule(data, begin_end=True)
data['vmware_sync'] = any(
(
vmware['filesystem'] == data['dataset'] or
(data['recursive'] and is_child(vmware['filesystem'], data['dataset']))
)
for vmware in context['vmware']
)
if 'error' in context['state']:
data['state'] = context['state']['error']
else:
data['state'] = context['state']['tasks'].get(f'periodic_snapshot_task_{data["id"]}', {
'state': 'PENDING',
})
return data
@accepts(
Dict(
'periodic_snapshot_create',
Dataset('dataset', required=True),
Bool('recursive', required=True),
List('exclude', items=[Dataset('item')]),
Int('lifetime_value', required=True),
Str('lifetime_unit', enum=['HOUR', 'DAY', 'WEEK', 'MONTH', 'YEAR'], required=True),
Str('naming_schema', required=True, validators=[ReplicationSnapshotNamingSchema()]),
Cron(
'schedule',
defaults={
'minute': '00',
'begin': '00:00',
'end': '23:59',
},
required=True,
begin_end=True
),
Bool('allow_empty', default=True),
Bool('enabled', default=True),
register=True
),
audit='Snapshot task create:',
audit_extended=lambda data: data['dataset']
)
async def do_create(self, data):
"""
Create a Periodic Snapshot Task
Create a Periodic Snapshot Task that will take snapshots of specified `dataset` at specified `schedule`.
Recursive snapshots can be created if `recursive` flag is enabled. You can `exclude` specific child datasets
or zvols from the snapshot.
Snapshots will be automatically destroyed after a certain amount of time, specified by
`lifetime_value` and `lifetime_unit`.
If multiple periodic tasks create snapshots at the same time (for example hourly and daily at 00:00) the snapshot
will be kept until the last of these tasks reaches its expiry time.
Snapshots will be named according to `naming_schema` which is a `strftime`-like template for snapshot name
and must contain `%Y`, `%m`, `%d`, `%H` and `%M`.
.. examples(websocket)::
Create a recursive Periodic Snapshot Task for dataset `data/work` excluding `data/work/temp`. Snapshots
will be created on weekdays every hour from 09:00 to 18:00 and will be stored for two weeks.
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "pool.snapshottask.create",
"params": [{
"dataset": "data/work",
"recursive": true,
"exclude": ["data/work/temp"],
"lifetime_value": 2,
"lifetime_unit": "WEEK",
"naming_schema": "auto_%Y-%m-%d_%H-%M",
"schedule": {
"minute": "0",
"hour": "*",
"dom": "*",
"month": "*",
"dow": "1,2,3,4,5",
"begin": "09:00",
"end": "18:00"
}
}]
}
"""
verrors = ValidationErrors()
verrors.add_child('periodic_snapshot_create', await self._validate(data))
verrors.check()
Cron.convert_schedule_to_db_format(data, begin_end=True)
data['id'] = await self.middleware.call(
'datastore.insert',
self._config.datastore,
data,
{'prefix': self._config.datastore_prefix}
)
await self.middleware.call('zettarepl.update_tasks')
return await self.get_instance(data['id'])
@accepts(
Int('id', required=True),
Patch(
'periodic_snapshot_create',
'periodic_snapshot_update',
('add', {'name': 'fixate_removal_date', 'type': 'bool'}),
('attr', {'update': True})
),
audit='Snapshot task update:',
audit_callback=True,
)
async def do_update(self, audit_callback, id_, data):
"""
Update a Periodic Snapshot Task with specific `id`
See the documentation for `create` method for information on payload contents
.. examples(websocket)::
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "pool.snapshottask.update",
"params": [
1,
{
"dataset": "data/work",
"recursive": true,
"exclude": ["data/work/temp"],
"lifetime_value": 2,
"lifetime_unit": "WEEK",
"naming_schema": "auto_%Y-%m-%d_%H-%M",
"schedule": {
"minute": "0",
"hour": "*",
"dom": "*",
"month": "*",
"dow": "1,2,3,4,5",
"begin": "09:00",
"end": "18:00"
}
}
]
}
"""
fixate_removal_date = data.pop('fixate_removal_date', False)
old = await self.get_instance(id_)
audit_callback(old['dataset'])
new = old.copy()
new.update(data)
verrors = ValidationErrors()
verrors.add_child('periodic_snapshot_update', await self._validate(new))
if not new['enabled']:
for replication_task in await self.middleware.call('replication.query', [['enabled', '=', True]]):
if any(periodic_snapshot_task['id'] == id_
for periodic_snapshot_task in replication_task['periodic_snapshot_tasks']):
verrors.add(
'periodic_snapshot_update.enabled',
(f'You can\'t disable this periodic snapshot task because it is bound to enabled replication '
f'task {replication_task["id"]!r}')
)
break
verrors.check()
Cron.convert_schedule_to_db_format(new, begin_end=True)
for key in ('vmware_sync', 'state'):
new.pop(key, None)
will_change_retention_for = None
if fixate_removal_date:
will_change_retention_for = await self.middleware.call(
'pool.snapshottask.update_will_change_retention_for', id_, data,
)
await self.middleware.call(
'datastore.update',
self._config.datastore,
id_,
new,
{'prefix': self._config.datastore_prefix}
)
if will_change_retention_for:
await self.middleware.call('pool.snapshottask.fixate_removal_date', will_change_retention_for, old)
await self.middleware.call('zettarepl.update_tasks')
return await self.get_instance(id_)
@accepts(
Int('id'),
Dict(
'options',
Bool('fixate_removal_date', default=False),
),
audit='Snapshot task delete:',
audit_callback=True,
)
async def do_delete(self, audit_callback, id_, options):
"""
Delete a Periodic Snapshot Task with specific `id`
.. examples(websocket)::
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "pool.snapshottask.delete",
"params": [
1
]
}
"""
if (task := await self.query([['id','=', id_]])):
audit_callback(task[0]['dataset'])
else:
audit_callback(f'Task id {id_} not found')
for replication_task in await self.middleware.call('replication.query', [
['direction', '=', 'PUSH'],
['also_include_naming_schema', '=', []],
['enabled', '=', True],
]):
if len(replication_task['periodic_snapshot_tasks']) == 1:
if replication_task['periodic_snapshot_tasks'][0]['id'] == id_:
raise CallError(
f'You are deleting the last periodic snapshot task bound to enabled replication task '
f'{replication_task["name"]!r} which will break it. Please, disable that replication task '
f'first.',
)
if options['fixate_removal_date']:
will_change_retention_for = await self.middleware.call(
'pool.snapshottask.delete_will_change_retention_for', id_
)
if will_change_retention_for:
task = await self.get_instance(id_)
await self.middleware.call('pool.snapshottask.fixate_removal_date', will_change_retention_for, task)
response = await self.middleware.call(
'datastore.delete',
self._config.datastore,
id_
)
await self.middleware.call('zettarepl.update_tasks')
return response
@accepts()
@returns(Int())
def max_count(self):
"""
Returns a maximum amount of snapshots (per-dataset) the system can sustain.
"""
# There is a limit to how many snapshots Windows will present to users through File Explorer. If we respond
# with too many, then File Explorer will show no snapshots available.
return 512
@accepts()
@returns(Int())
def max_total_count(self):
"""
Returns a maximum amount of snapshots (total) the system can sustain.
"""
# Having too many snapshots results in various performance complications (mainly, when listing them).
# This is a random round number that is large enough and does not cause issues in most use cases.
return 10000
@item_method
@accepts(Int("id"))
async def run(self, id_):
"""
Execute a Periodic Snapshot Task of `id`.
"""
task = await self.get_instance(id_)
if not task["enabled"]:
raise CallError("Task is not enabled")
await self.middleware.call("zettarepl.run_periodic_snapshot_task", task["id"])
async def _validate(self, data):
verrors = ValidationErrors()
if data['dataset'] not in (await self.middleware.call('pool.filesystem_choices')):
verrors.add(
'dataset',
'Dataset not found'
)
if not data['recursive'] and data['exclude']:
verrors.add(
'exclude',
'Excluding datasets is not necessary for non-recursive periodic snapshot tasks'
)
for i, v in enumerate(data['exclude']):
if not v.startswith(f'{data["dataset"]}/'):
verrors.add(
f'exclude.{i}',
'Excluded dataset should be a child or other descendant of the selected dataset'
)
return verrors
class PeriodicSnapshotTaskFSAttachmentDelegate(FSAttachmentDelegate):
name = 'snapshottask'
title = 'Snapshot Task'
resource_name = 'dataset'
async def query(self, path, enabled, options=None):
results = []
for task in await self.middleware.call('pool.snapshottask.query', [['enabled', '=', enabled]]):
if await self.middleware.call('filesystem.is_child', os.path.join('/mnt', task['dataset']), path):
results.append(task)
return results
async def delete(self, attachments):
for attachment in attachments:
await self.middleware.call('datastore.delete', 'storage.task', attachment['id'])
await self.middleware.call('zettarepl.update_tasks')
async def toggle(self, attachments, enabled):
for attachment in attachments:
await self.middleware.call('datastore.update', 'storage.task', attachment['id'], {'task_enabled': enabled})
await self.middleware.call('zettarepl.update_tasks')
async def on_zettarepl_state_changed(middleware, id_, fields):
if id_.startswith('periodic_snapshot_task_'):
task_id = int(id_.split('_')[-1])
middleware.send_event('pool.snapshottask.query', 'CHANGED', id=task_id, fields={'state': fields})
async def setup(middleware):
await middleware.call('pool.dataset.register_attachment_delegate',
PeriodicSnapshotTaskFSAttachmentDelegate(middleware))
middleware.register_hook('zettarepl.state_change', on_zettarepl_state_changed)
| 15,323 | Python | .py | 341 | 32.680352 | 121 | 0.562659 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,488 | boot.py | truenas_middleware/src/middlewared/middlewared/plugins/boot.py | import asyncio
import os
from contextlib import asynccontextmanager
from middlewared.schema import accepts, Bool, Dict, Int, List, Str, returns, Patch
from middlewared.service import CallError, Service, job, private
from middlewared.utils import run
from middlewared.utils.disks import valid_zfs_partition_uuids
from middlewared.validators import Range
BOOT_ATTACH_REPLACE_LOCK = 'boot_attach_replace'
BOOT_POOL_NAME = BOOT_POOL_DISKS = None
BOOT_POOL_NAME_VALID = ['freenas-boot', 'boot-pool']
class BootService(Service):
class Config:
cli_namespace = 'system.boot'
@private
async def pool_name(self):
return BOOT_POOL_NAME
@accepts(roles=['READONLY_ADMIN'])
@returns(Patch(
'pool_entry', 'get_state',
('rm', {'name': 'id'}),
('rm', {'name': 'guid'}),
))
async def get_state(self):
"""
Returns the current state of the boot pool, including all vdevs, properties and datasets.
"""
# WebUI expects same data as `pool.pool_extend`
return await self.middleware.call('pool.pool_normalize_info', BOOT_POOL_NAME)
@private
async def get_disks_cache(self):
"""If boot pool disk cache hasn't been set (or cleared),
then set it.
NOTE: we cache this information since it doesn't change
very often and we have a ton of callers (especially on HA)
that need to determine this information. By caching this,
it reduces the amount of times we have to use our ProcessPool
"""
global BOOT_POOL_DISKS
if BOOT_POOL_DISKS is None:
# Using an immutable object is very important since this is
# a globally cached value
BOOT_POOL_DISKS = tuple(await self.middleware.call('zfs.pool.get_disks', BOOT_POOL_NAME))
return list(BOOT_POOL_DISKS)
@private
async def clear_disks_cache(self):
"""Clear the boot pool disk cache"""
global BOOT_POOL_DISKS
BOOT_POOL_DISKS = None
@accepts(roles=['READONLY_ADMIN'])
@returns(List('disks', items=[Str('disk')]))
async def get_disks(self):
"""
Returns disks of the boot pool.
"""
return await self.get_disks_cache()
@private
async def get_boot_type(self):
"""
Get the boot type of the boot pool.
Returns:
"BIOS", "EFI", None
"""
# https://wiki.debian.org/UEFI
return 'EFI' if os.path.exists('/sys/firmware/efi') else 'BIOS'
@accepts(
Str('dev'),
Dict(
'options',
Bool('expand', default=False),
),
)
@returns()
@job(lock=BOOT_ATTACH_REPLACE_LOCK)
async def attach(self, job, dev, options):
"""
Attach a disk to the boot pool, turning a stripe into a mirror.
`expand` option will determine whether the new disk partition will be
the maximum available or the same size as the current disk.
"""
await self.check_update_ashift_property()
disks = list(await self.get_disks())
if len(disks) > 1:
raise CallError('3-way mirror not supported')
format_opts = {}
if not options['expand']:
# Lets try to find out the size of the current ZFS or FreeBSD-ZFS (upgraded TrueNAS CORE installation)
# partition so the new partition is not bigger, preventing size mismatch if one of them fail later on.
zfs_part = await self.middleware.call(
'disk.get_partition_with_uuids',
disks[0],
list(valid_zfs_partition_uuids()),
)
if zfs_part:
format_opts['size'] = zfs_part['size']
format_opts['legacy_schema'] = await self.legacy_schema(disks[0])
await self.middleware.call('boot.format', dev, format_opts)
pool = await self.middleware.call('zfs.pool.query', [['name', '=', BOOT_POOL_NAME]], {'get': True})
zfs_dev_part = await self.middleware.call('disk.get_partition', dev)
extend_pool_job = await self.middleware.call(
'zfs.pool.extend', BOOT_POOL_NAME, None, [{
'target': pool['groups']['data'][0]['guid'],
'type': 'DISK',
'path': f'/dev/{zfs_dev_part["name"]}'
}]
)
await self.middleware.call('boot.install_loader', dev)
await job.wrap(extend_pool_job)
# If the user is upgrading his disks, let's set expand to True to make sure that we
# register the new disks capacity which increase the size of the pool
await self.middleware.call('zfs.pool.online', BOOT_POOL_NAME, zfs_dev_part['name'], True)
await self.update_initramfs()
@accepts(Str('dev'))
@returns()
async def detach(self, dev):
"""
Detach given `dev` from boot pool.
"""
await self.check_update_ashift_property()
await self.middleware.call('zfs.pool.detach', BOOT_POOL_NAME, dev, {'clear_label': True})
await self.update_initramfs()
@accepts(Str('label'), Str('dev'))
@returns()
@job(lock=BOOT_ATTACH_REPLACE_LOCK)
async def replace(self, job, label, dev):
"""
Replace device `label` on boot pool with `dev`.
"""
format_opts = {}
await self.check_update_ashift_property()
disks = list(await self.get_disks())
format_opts['legacy_schema'] = await self.legacy_schema(disks[0])
job.set_progress(0, f'Formatting {dev}')
await self.middleware.call('boot.format', dev, format_opts)
job.set_progress(0, f'Replacing {label} with {dev}')
zfs_dev_part = await self.middleware.call('disk.get_partition', dev)
await self.middleware.call('zfs.pool.replace', BOOT_POOL_NAME, label, zfs_dev_part['name'])
# We need to wait for pool resilver after replacing a device, otherwise grub might
# fail with `unknown filesystem` error
while True:
state = await self.get_state()
if (
state['scan'] and
state['scan']['function'] == 'RESILVER' and
state['scan']['state'] == 'SCANNING'
):
left = int(state['scan']['total_secs_left']) if state['scan']['total_secs_left'] else 'unknown'
job.set_progress(int(state['scan']['percentage']), f'Resilvering boot pool, {left} seconds left')
await asyncio.sleep(5)
else:
break
job.set_progress(100, 'Installing boot loader')
await self.middleware.call('boot.install_loader', dev)
await self.update_initramfs()
@accepts()
@returns()
@job(lock='boot_scrub')
async def scrub(self, job):
"""
Scrub on boot pool.
"""
subjob = await self.middleware.call('pool.scrub.scrub', BOOT_POOL_NAME)
return await job.wrap(subjob)
@accepts(
Int('interval', validators=[Range(min_=1)])
)
@returns(Int('interval'))
async def set_scrub_interval(self, interval):
"""
Set Automatic Scrub Interval value in days.
"""
await self.middleware.call(
'datastore.update',
'system.advanced',
(await self.middleware.call('system.advanced.config'))['id'],
{'adv_boot_scrub': interval},
)
return interval
@asynccontextmanager
async def __toggle_rootfs_readwrite(self):
mnt = await self.middleware.call('filesystem.mount_info', [['mountpoint', '=', '/']], {'get': True})
if 'RO' in mnt['super_opts']:
try:
await self.middleware.call('zfs.dataset.update', mnt['mount_source'], {
"properties": {"readonly": {"parsed": False}}
})
yield
finally:
await self.middleware.call('zfs.dataset.update', mnt['mount_source'], {
"properties": {"readonly": {"parsed": True}}
})
else:
yield
@accepts(Dict(
'options',
Str('database', default=None, null=True),
Bool('force', default=False),
))
@private
async def update_initramfs(self, options):
"""
Returns true if initramfs was updated and false otherwise.
"""
async with self.__toggle_rootfs_readwrite():
args = ['/']
if options['database']:
args.extend(['-d', options['database']])
if options['force']:
args.extend(['-f'])
cp = await run(
'/usr/local/bin/truenas-initrd.py', *args,
encoding='utf8', errors='ignore', check=False
)
if cp.returncode > 1:
raise CallError(f'Failed to update initramfs: {cp.stderr}')
return cp.returncode == 1
@private
async def expand(self):
await self.check_update_ashift_property()
boot_pool = await self.middleware.call('boot.pool_name')
for device in await self.middleware.call('zfs.pool.get_devices', boot_pool):
try:
await self.expand_device(device)
except CallError as e:
self.middleware.logger.error('Error trying to expand boot pool partition %r: %r', device, e)
except Exception:
self.middleware.logger.error('Error trying to expand boot pool partition %r', device, exc_info=True)
@private
async def expand_device(self, device):
disk = await self.middleware.call('disk.get_disk_from_partition', device)
partitions = await self.middleware.call('disk.list_partitions', disk)
if len(partitions) != 3:
raise CallError(f'Expected 3 partitions, found {len(partitions)}')
if partitions[-1]['name'] != device:
raise CallError(f'{device} is not the last partition')
if partitions[-1]['partition_number'] != 3:
raise CallError(f'{device} is not 3rd partition')
if partitions[-1]['start_sector'] != partitions[-2]['end_sector'] + 1:
raise CallError(f'{device} does not immediately follow the 2nd partition')
disk_size = await self.middleware.call('disk.get_dev_size', disk)
if partitions[-1]['end'] > disk_size / 1.1:
return
self.middleware.logger.info('Resizing boot pool partition %r from %r (disk_size = %r)',
device, partitions[-1]['end'], disk_size)
await run('sgdisk', '-d', '3', f'/dev/{disk}', encoding='utf-8', errors='ignore')
await run('sgdisk', '-N', '3', f'/dev/{disk}', encoding='utf-8', errors='ignore')
await run('partprobe', encoding='utf-8', errors='ignore')
await run('zpool', 'online', '-e', 'boot-pool', device, encoding='utf-8', errors='ignore')
@private
async def legacy_schema(self, disk):
partitions = await self.middleware.call('disk.list_partitions', disk)
swap_types = [
'516e7cb5-6ecf-11d6-8ff8-00022d09712b', # used by freebsd
'0657fd6d-a4ab-43c4-84e5-0933c84b4f4f', # used by linux
]
partitions_without_swap = [p for p in partitions if p['partition_type'] not in swap_types]
if (
await self.middleware.call('boot.get_boot_type') == 'EFI' and
len(partitions_without_swap) == 2 and
partitions[0]['size'] == 524288
):
return 'BIOS_ONLY'
elif (
len(partitions_without_swap) == 2 and
partitions[0]['size'] == 272629760
):
return 'EFI_ONLY'
@private
async def check_update_ashift_property(self):
properties = {}
if (
zfs_pool := await self.middleware.call('zfs.pool.query', [('name', '=', BOOT_POOL_NAME)])
) and zfs_pool[0]['properties']['ashift']['source'] == 'DEFAULT':
properties['ashift'] = {'value': '12'}
if properties:
await self.middleware.call('zfs.pool.update', BOOT_POOL_NAME, {'properties': properties})
async def on_config_upload(middleware, path):
await middleware.call('boot.update_initramfs', {'database': path})
async def setup(middleware):
global BOOT_POOL_NAME
global BOOT_POOL_DISKS
try:
pools = dict([line.split('\t') for line in (
await run('zpool', 'list', '-H', '-o', 'name,compatibility', encoding='utf8')
).stdout.strip().splitlines()])
except Exception:
# this isn't fatal, but we need to log something so we can review and fix as needed
middleware.logger.warning('Unexpected failure parsing compatibility feature', exc_info=True)
return
for i in BOOT_POOL_NAME_VALID:
if i in pools:
BOOT_POOL_NAME = i
BOOT_POOL_DISKS = tuple(await middleware.call('zfs.pool.get_disks', BOOT_POOL_NAME))
compatibility = pools[i]
if compatibility != 'grub2':
middleware.logger.info(f'Boot pool {BOOT_POOL_NAME!r} has {compatibility=!r}, setting it to grub2')
try:
await run('zpool', 'set', 'compatibility=grub2', BOOT_POOL_NAME)
except Exception as e:
middleware.logger.error(f'Error setting boot pool compatibility: {e!r}')
break
else:
middleware.logger.error('Failed to detect boot pool name.')
middleware.register_hook('config.on_upload', on_config_upload, sync=True)
| 13,595 | Python | .py | 304 | 34.848684 | 116 | 0.596675 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,489 | keychain.py | truenas_middleware/src/middlewared/middlewared/plugins/keychain.py | import asyncio
import base64
import enum
import errno
import os
import re
import ssl
import subprocess
import tempfile
import urllib.parse
from truenas_api_client import Client, ClientException
from middlewared.api import api_method
from middlewared.api.current import (
KeychainCredentialEntry,
KeychainCredentialCreateArgs, KeychainCredentialCreateResult,
KeychainCredentialUpdateArgs, KeychainCredentialUpdateResult,
KeychainCredentialDeleteArgs, KeychainCredentialDeleteResult,
KeychainCredentialUsedByArgs, KeychainCredentialUsedByResult,
KeychainCredentialGetOfTypeArgs, KeychainCredentialGetOfTypeResult,
KeychainCredentialGenerateSSHKeyPairArgs, KeychainCredentialGenerateSSHKeyPairResult,
KeychainCredentialRemoteSSHHostKeyScanArgs, KeychainCredentialRemoteSSHHostKeyScanResult,
KeychainCredentialRemoteSSHSemiautomaticSetupArgs, KeychainCredentialRemoteSSHSemiautomaticSetupResult,
KeychainCredentialSSHPairArgs, KeychainCredentialSSHPairResult,
)
from middlewared.service_exception import CallError, MatchNotFound
from middlewared.schema import Int, Str, ValidationErrors
from middlewared.service import CRUDService, private
import middlewared.sqlalchemy as sa
from middlewared.utils import run
from middlewared.validators import validate_schema
class KeychainCredentialType:
name = NotImplemented
title = NotImplemented
credentials_schema = NotImplemented
used_by_delegates = []
def validate_and_pre_save_impl(self, middleware, verrors, schema_name, attributes):
pass
async def validate_and_pre_save(self, middleware, verrors, schema_name, attributes):
"""If blocking I/O must be called in here, then put the logic in the *_impl method
and call it using asyncio.to_thread"""
pass
class KeychainCredentialUsedByDelegate:
unbind_method = NotImplemented
def __init__(self, middleware):
self.middleware = middleware
async def query(self, id_):
raise NotImplementedError
async def get_title(self, row):
raise NotImplementedError
async def unbind(self, row):
raise NotImplementedError
class KeychainCredentialUsedByDelegateUnbindMethod(enum.Enum):
DELETE = "delete"
DISABLE = "disable"
class OtherKeychainCredentialKeychainCredentialUsedByDelegate(KeychainCredentialUsedByDelegate):
unbind_method = KeychainCredentialUsedByDelegateUnbindMethod.DELETE
type = NotImplemented
async def query(self, id_):
result = []
for row in await self.middleware.call("keychaincredential.query", [["type", "=", self.type]]):
if await self._is_related(row, id_):
result.append(row)
return result
async def get_title(self, row):
return f"{TYPES[self.type].title} {row['name']}"
async def unbind(self, row):
await self.middleware.call("keychaincredential.delete", row["id"], {"cascade": True})
async def _is_related(self, row, id_):
raise NotImplementedError
class SSHCredentialsSSHKeyPairUsedByDelegate(OtherKeychainCredentialKeychainCredentialUsedByDelegate):
type = "SSH_CREDENTIALS"
async def _is_related(self, row, id_):
return row["attributes"]["private_key"] == id_
class SFTPCloudSyncCredentialsSSHKeyPairUsedByDelegate(KeychainCredentialUsedByDelegate):
unbind_method = KeychainCredentialUsedByDelegateUnbindMethod.DISABLE
async def query(self, id_):
result = []
for cloud_credentials in await self.middleware.call("cloudsync.credentials.query", [["provider", "=", "SFTP"]]):
if cloud_credentials["attributes"].get("private_key") == id_:
result.append(cloud_credentials)
return result
async def get_title(self, row):
return f"Cloud credentials {row['name']}"
async def unbind(self, row):
row["attributes"].pop("private_key")
await self.middleware.call("datastore.update", "system.cloudcredentials", row["id"], {
"attributes": row["attributes"]
})
class SSHKeyPair(KeychainCredentialType):
name = "SSH_KEY_PAIR"
title = "SSH Key Pair"
credentials_schema = [
Str("private_key", null=True, default=None, max_length=None),
Str("public_key", null=True, default=None, max_length=None),
]
used_by_delegates = [
SSHCredentialsSSHKeyPairUsedByDelegate,
SFTPCloudSyncCredentialsSSHKeyPairUsedByDelegate,
]
def validate_and_pre_save_impl(self, middleware, verrors, schema_name, attributes):
opts = {"capture_output": True, "check": False, "encoding": "utf8"}
if attributes["private_key"]:
# TODO: It would be best if we use crypto plugin for this but as of right now we don't have support
# for openssh keys -
# https://stackoverflow.com/questions/59029092/how-to-load-openssh-private-key-using-cryptography-python-module
# so we keep on using ssh-keygen for now until that is properly supported in cryptography module.
attributes["private_key"] = (attributes["private_key"].strip()) + "\n"
with tempfile.NamedTemporaryFile("w+") as f:
os.fchmod(f.file.fileno(), 0o600)
f.write(attributes["private_key"])
f.flush()
proc = subprocess.run(["ssh-keygen", "-y", "-f", f.name], **opts)
if proc.returncode == 0:
public_key = proc.stdout
else:
if proc.stderr.startswith("Enter passphrase:"):
error = "Encrypted private keys are not allowed"
else:
error = proc.stderr
verrors.add(f"{schema_name}.private_key", error)
return
if attributes["public_key"]:
if self._normalize_public_key(attributes["public_key"]) != self._normalize_public_key(public_key):
verrors.add(f"{schema_name}.public_key", "Private key and public key do not match")
else:
attributes["public_key"] = public_key
if not attributes["public_key"]:
verrors.add(f"{schema_name}.public_key", "You must specify at least public key")
return
with tempfile.NamedTemporaryFile("w+") as f:
os.fchmod(f.file.fileno(), 0o600)
f.write(attributes["public_key"])
f.flush()
proc = subprocess.run(["ssh-keygen", "-l", "-f", f.name], **opts)
if proc.returncode != 0:
verrors.add(f"{schema_name}.public_key", "Invalid public key")
return
async def validate_and_pre_save(self, middleware, verrors, schema_name, attributes):
return await asyncio.to_thread(
self.validate_and_pre_save_impl, middleware, verrors, schema_name, attributes
)
def _normalize_public_key(self, public_key):
return " ".join(public_key.split()[:2]).strip()
class ReplicationTaskSSHCredentialsUsedByDelegate(KeychainCredentialUsedByDelegate):
unbind_method = KeychainCredentialUsedByDelegateUnbindMethod.DISABLE
async def query(self, id_):
return await self.middleware.call("replication.query", [["ssh_credentials.id", "=", id_]])
async def get_title(self, row):
return f"Replication task {row['name']}"
async def unbind(self, row):
await self.middleware.call("datastore.update", "storage.replication", row["id"], {
"repl_enabled": False,
"repl_ssh_credentials": None,
})
await self.middleware.call("zettarepl.update_tasks")
class RsyncTaskSSHCredentialsUsedByDelegate(KeychainCredentialUsedByDelegate):
unbind_method = KeychainCredentialUsedByDelegateUnbindMethod.DISABLE
async def query(self, id_):
return await self.middleware.call("rsynctask.query", [["ssh_credentials.id", "=", id_]])
async def get_title(self, row):
return f"Rsync task for {row['path']!r}"
async def unbind(self, row):
await self.middleware.call("rsynctask.update", row["id"], {"enabled": False})
await self.middleware.call("datastore.update", "tasks.rsync", row["id"], {
"rsync_ssh_credentials": None,
})
class SSHCredentials(KeychainCredentialType):
name = "SSH_CREDENTIALS"
title = "SSH credentials"
credentials_schema = [
Str("host", required=True),
Int("port", default=22),
Str("username", default="root"),
Int("private_key", required=True),
Str("remote_host_key", required=True),
Int("connect_timeout", default=10),
]
used_by_delegates = [
ReplicationTaskSSHCredentialsUsedByDelegate,
RsyncTaskSSHCredentialsUsedByDelegate,
]
TYPES = {
type_.name: type_()
for type_ in [SSHKeyPair, SSHCredentials]
}
def process_ssh_keyscan_output(output):
return "\n".join([" ".join(line.split()[1:]) for line in output.split("\n") if line and not line.startswith("# ")])
class KeychainCredentialModel(sa.Model):
__tablename__ = 'system_keychaincredential'
id = sa.Column(sa.Integer(), primary_key=True)
name = sa.Column(sa.String(255))
type = sa.Column(sa.String(255))
attributes = sa.Column(sa.JSON(encrypted=True))
class KeychainCredentialService(CRUDService):
class Config:
datastore = "system.keychaincredential"
cli_namespace = "system.keychain_credential"
role_prefix = "KEYCHAIN_CREDENTIAL"
entry = KeychainCredentialEntry
@api_method(KeychainCredentialCreateArgs, KeychainCredentialCreateResult)
async def do_create(self, data):
"""
Create a Keychain Credential
Create a Keychain Credential of any type.
Every Keychain Credential has a `name` which is used to distinguish it from others.
The following `type`s are supported:
* `SSH_KEY_PAIR`
Which `attributes` are:
* `private_key`
* `public_key` (which can be omitted and thus automatically derived from private key)
At least one attribute is required.
* `SSH_CREDENTIALS`
Which `attributes` are:
* `host`
* `port` (default 22)
* `username` (default root)
* `private_key` (Keychain Credential ID)
* `remote_host_key` (you can use `keychaincredential.remote_ssh_host_key_scan` do discover it)
* `connect_timeout` (default 10)
.. examples(websocket)::
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "keychaincredential.create",
"params": [{
"name": "Work SSH connection",
"type": "SSH_CREDENTIALS",
"attributes": {
"host": "work.freenas.org",
"private_key": 12,
"remote_host_key": "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMn1VjdSMatGnxbOsrneKyai+dh6d4Hm"
}
}]
}
"""
await self._validate("keychain_credential_create", data)
data["id"] = await self.middleware.call(
"datastore.insert",
self._config.datastore,
data,
)
return data
@api_method(KeychainCredentialUpdateArgs, KeychainCredentialUpdateResult)
async def do_update(self, id_, data):
"""
Update a Keychain Credential with specific `id`
Please note that you can't change `type`
Also you must specify full `attributes` value
See the documentation for `create` method for information on payload contents
.. examples(websocket)::
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "keychaincredential.update",
"params": [
13,
{
"name": "Work SSH connection",
"attributes": {
"host": "work.ixsystems.com",
"private_key": 12,
"remote_host_key": "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMn1VjdSMatGnxbOsrneKyai+dh6d4Hm"
}
}
]
}
"""
old = await self.get_instance(id_)
new = old.copy()
new.update(data)
await self._validate("keychain_credentials_update", new, id_)
await self.middleware.call(
"datastore.update",
self._config.datastore,
id_,
new,
)
if new["type"] in ["SSH_KEY_PAIR", "SSH_CREDENTIALS"]:
await self.middleware.call("zettarepl.update_tasks")
return new
@api_method(KeychainCredentialDeleteArgs, KeychainCredentialDeleteResult)
async def do_delete(self, id_, options):
"""
Delete Keychain Credential with specific `id`
.. examples(websocket)::
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "keychaincredential.delete",
"params": [
13
]
}
"""
instance = await self.get_instance(id_)
for delegate in TYPES[instance["type"]].used_by_delegates:
delegate = delegate(self.middleware)
for row in await delegate.query(instance["id"]):
if not options["cascade"]:
raise CallError("This credential is used and no cascade option is specified")
await delegate.unbind(row)
await self.middleware.call(
"datastore.delete",
self._config.datastore,
id_,
)
@api_method(KeychainCredentialUsedByArgs, KeychainCredentialUsedByResult)
async def used_by(self, id_):
"""
Returns list of objects that use this credential.
"""
instance = await self.get_instance(id_)
result = []
for delegate in TYPES[instance["type"]].used_by_delegates:
delegate = delegate(self.middleware)
for row in await delegate.query(instance["id"]):
result.append({
"title": await delegate.get_title(row),
"unbind_method": delegate.unbind_method.value,
})
if isinstance(delegate, OtherKeychainCredentialKeychainCredentialUsedByDelegate):
result.extend(await self.middleware.call("keychaincredential.used_by", row["id"]))
return result
async def _validate(self, schema_name, data, id_=None):
verrors = ValidationErrors()
await self._ensure_unique(verrors, schema_name, "name", data["name"], id_)
if data["type"] not in TYPES:
verrors.add(f"{schema_name}.type", "Invalid type")
raise verrors
else:
type_ = TYPES[data["type"]]
attributes_verrors = validate_schema(type_.credentials_schema, data["attributes"])
verrors.add_child(f"{schema_name}.attributes", attributes_verrors)
verrors.check()
await type_.validate_and_pre_save(self.middleware, verrors, f"{schema_name}.attributes", data["attributes"])
verrors.check()
@api_method(KeychainCredentialGetOfTypeArgs, KeychainCredentialGetOfTypeResult, private=True)
async def get_of_type(self, id_, type_):
try:
credential = await self.middleware.call("keychaincredential.query", [["id", "=", id_]], {"get": True})
except MatchNotFound:
raise CallError("Credential does not exist", errno.ENOENT)
else:
if credential["type"] != type_:
raise CallError(f"Credential is not of type {type_}", errno.EINVAL)
if not credential["attributes"]:
raise CallError(f"Decrypting credential {credential['name']} failed", errno.EFAULT)
return credential
@api_method(KeychainCredentialGenerateSSHKeyPairArgs, KeychainCredentialGenerateSSHKeyPairResult,
roles=["KEYCHAIN_CREDENTIAL_WRITE"])
def generate_ssh_key_pair(self):
"""
Generate a public/private key pair
Generate a public/private key pair (useful for `SSH_KEY_PAIR` type)
.. examples(websocket)::
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "keychaincredential.generate_ssh_key_pair",
"params": []
}
"""
with tempfile.TemporaryDirectory() as tmpdirname:
key = os.path.join(tmpdirname, "key")
subprocess.check_call(["ssh-keygen", "-t", "rsa", "-f", key, "-N", "", "-q"])
with open(key) as f:
private_key = f.read()
with open(f"{key}.pub") as f:
public_key = f.read()
return {
"private_key": private_key,
"public_key": public_key,
}
@api_method(KeychainCredentialRemoteSSHHostKeyScanArgs, KeychainCredentialRemoteSSHHostKeyScanResult,
roles=["KEYCHAIN_CREDENTIAL_WRITE"])
async def remote_ssh_host_key_scan(self, data):
"""
Discover a remote host key
Discover a remote host key (useful for `SSH_CREDENTIALS`)
.. examples(websocket)::
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "keychaincredential.delete",
"params": [{
"host": "work.freenas.org"
}]
}
"""
proc = await run(["ssh-keyscan", "-p", str(data["port"]), "-T", str(data["connect_timeout"]), data["host"]],
check=False, encoding="utf8")
if proc.returncode == 0:
if proc.stdout:
try:
return process_ssh_keyscan_output(proc.stdout)
except Exception:
raise CallError(f"ssh-keyscan failed: {proc.stdout + proc.stderr}") from None
elif proc.stderr:
raise CallError(f"ssh-keyscan failed: {proc.stderr}")
else:
raise CallError("SSH timeout")
else:
raise CallError(f"ssh-keyscan failed: {proc.stdout + proc.stderr}")
@api_method(KeychainCredentialRemoteSSHSemiautomaticSetupArgs, KeychainCredentialRemoteSSHSemiautomaticSetupResult,
roles=["KEYCHAIN_CREDENTIAL_WRITE"])
def remote_ssh_semiautomatic_setup(self, data):
"""
Perform semi-automatic SSH connection setup with other FreeNAS machine
Perform semi-automatic SSH connection setup with other FreeNAS machine. It creates a `SSH_CREDENTIALS`
credential with specified `name` that can be used to connect to FreeNAS machine with specified `url` and
temporary auth `token`. Other FreeNAS machine adds `private_key` to allowed `username`'s private keys. Other
`SSH_CREDENTIALS` attributes such as `connect_timeout` can be specified as well.
.. examples(websocket)::
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "keychaincredential.remote_ssh_semiautomatic_setup",
"params": [{
"name": "Work SSH connection",
"url": "https://work.freenas.org",
"token": "8c8d5fd1-f749-4429-b379-9c186db4f834",
"private_key": 12
}]
}
"""
replication_key = self.middleware.call_sync("keychaincredential.get_ssh_key_pair_with_private_key",
data["private_key"])
try:
client = Client(os.path.join(re.sub("^http", "ws", data["url"]), "websocket"),
verify_ssl=data["verify_ssl"])
except ssl.SSLCertVerificationError as e:
raise CallError(str(e), CallError.ESSLCERTVERIFICATIONERROR)
except Exception as e:
raise CallError(f"Unable to connect to remote system: {e}")
with client as c:
if data.get("token"):
if not c.call("auth.login_with_token", data["token"]):
raise CallError("Invalid token")
elif data.get("password"):
args = [data["admin_username"], data["password"]]
if data.get("otp_token"):
args.append(data["otp_token"])
if not c.call("auth.login", *args):
raise CallError("Invalid username or password")
else:
raise CallError("You should specify either remote system password or temporary authentication token")
try:
response = c.call("keychaincredential.ssh_pair", {
"remote_hostname": "any-host",
"username": data["username"],
"public_key": replication_key["attributes"]["public_key"],
})
except ClientException as e:
raise CallError(
f"Semi-automatic SSH connection setup failed: {e}\n\n"
f"Please make sure that home directory for {data['username']} user on the remote system exists and "
"is writeable."
)
except Exception as e:
raise CallError(f"Semi-automatic SSH connection setup failed: {e!r}")
user = c.call("user.query", [["username", "=", data["username"]], ['local', '=', True]], {"get": True})
user_update = {}
if user["shell"] == "/usr/sbin/nologin":
user_update["shell"] = "/usr/bin/bash"
if data["sudo"]:
if "ALL" not in user["sudo_commands_nopasswd"]:
zfs_binary = "/usr/sbin/zfs"
if zfs_binary not in user["sudo_commands_nopasswd"]:
user_update["sudo_commands_nopasswd"] = user["sudo_commands_nopasswd"] + [zfs_binary]
try:
c.call("user.update", user["id"], user_update)
except Exception as e:
raise CallError(f"Error updating remote user attributes: {e}")
return self.middleware.call_sync("keychaincredential.create", {
"name": data["name"],
"type": "SSH_CREDENTIALS",
"attributes": {
"host": urllib.parse.urlparse(data["url"]).hostname,
"port": response["port"],
"username": data["username"],
"private_key": replication_key["id"],
"remote_host_key": process_ssh_keyscan_output(response["host_key"]),
"connect_timeout": data["connect_timeout"],
}
})
@api_method(KeychainCredentialSSHPairArgs, KeychainCredentialSSHPairResult, private=True)
def ssh_pair(self, data):
"""
Receives public key, storing it to accept SSH connection and return
pertinent SSH data of this machine.
"""
service = self.middleware.call_sync("service.query", [("service", "=", "ssh")], {"get": True})
ssh = self.middleware.call_sync("ssh.config")
try:
user = self.middleware.call_sync("user.query", [("username", "=", data["username"]), ("local", "=", True)], {"get": True})
except MatchNotFound:
raise CallError(f"User {data['username']} does not exist")
if user["home"].startswith("/nonexistent") or not os.path.exists(user["home"]):
raise CallError(f"Home directory {user['home']} does not exist", errno.ENOENT)
# Make sure SSH is enabled
if not service["enable"]:
self.middleware.call_sync("service.update", "ssh", {"enable": True})
if service["state"] != "RUNNING":
self.middleware.call_sync("service.start", "ssh")
# This might be the first time of the service being enabled
# which will then result in new host keys we need to grab
ssh = self.middleware.call_sync("ssh.config")
# If .ssh dir does not exist, create it
dotsshdir = os.path.join(user["home"], ".ssh")
os.makedirs(dotsshdir, exist_ok=True)
os.chown(dotsshdir, user["uid"], user["group"]["bsdgrp_gid"])
# Write public key in user authorized_keys for SSH
with open(f"{dotsshdir}/authorized_keys", "a+") as f:
f.seek(0)
if data["public_key"] not in f.read():
f.write("\n" + data["public_key"] + "\n")
ssh_hostkey = "{0} {1}\n{0} {2}\n{0} {3}\n".format(
data["remote_hostname"],
base64.b64decode(ssh["host_rsa_key_pub"].encode()).decode(),
base64.b64decode(ssh["host_ecdsa_key_pub"].encode()).decode(),
base64.b64decode(ssh["host_ed25519_key_pub"].encode()).decode(),
)
return {
"port": ssh["tcpport"],
"host_key": ssh_hostkey,
}
@private
async def get_ssh_key_pair_with_private_key(self, id_):
try:
credential = await self.middleware.call("keychaincredential.query", [["id", "=", id_]], {"get": True})
except MatchNotFound:
return None
if credential["type"] != "SSH_KEY_PAIR":
return None
if not credential["attributes"]["private_key"]:
return None
return credential
| 25,955 | Python | .py | 543 | 36.322284 | 134 | 0.604108 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,490 | replication.py | truenas_middleware/src/middlewared/middlewared/plugins/replication.py | from datetime import datetime, time
import os
import re
from middlewared.auth import fake_app
from middlewared.common.attachment import FSAttachmentDelegate
from middlewared.schema import accepts, Bool, Cron, Dataset, Dict, Int, List, Patch, returns, Str
from middlewared.service import item_method, job, pass_app, private, CallError, CRUDService, ValidationErrors
import middlewared.sqlalchemy as sa
from middlewared.utils.path import is_child
from middlewared.validators import Port, Range, ReplicationSnapshotNamingSchema, Unique
class ReplicationModel(sa.Model):
__tablename__ = 'storage_replication'
id = sa.Column(sa.Integer(), primary_key=True)
repl_target_dataset = sa.Column(sa.String(120))
repl_recursive = sa.Column(sa.Boolean(), default=False)
repl_compression = sa.Column(sa.String(120), nullable=True, default="LZ4")
repl_speed_limit = sa.Column(sa.Integer(), nullable=True, default=None)
repl_schedule_begin = sa.Column(sa.Time(), nullable=True, default=time(hour=0))
repl_schedule_end = sa.Column(sa.Time(), nullable=True, default=time(hour=23, minute=45))
repl_enabled = sa.Column(sa.Boolean(), default=True)
repl_direction = sa.Column(sa.String(120), default="PUSH")
repl_transport = sa.Column(sa.String(120), default="SSH")
repl_ssh_credentials_id = sa.Column(sa.ForeignKey('system_keychaincredential.id'), index=True, nullable=True)
repl_sudo = sa.Column(sa.Boolean())
repl_netcat_active_side = sa.Column(sa.String(120), nullable=True, default=None)
repl_netcat_active_side_port_min = sa.Column(sa.Integer(), nullable=True)
repl_netcat_active_side_port_max = sa.Column(sa.Integer(), nullable=True)
repl_source_datasets = sa.Column(sa.JSON(list))
repl_exclude = sa.Column(sa.JSON(list))
repl_naming_schema = sa.Column(sa.JSON(list))
repl_name_regex = sa.Column(sa.String(120), nullable=True)
repl_auto = sa.Column(sa.Boolean(), default=True)
repl_schedule_minute = sa.Column(sa.String(100), nullable=True, default="00")
repl_schedule_hour = sa.Column(sa.String(100), nullable=True, default="*")
repl_schedule_daymonth = sa.Column(sa.String(100), nullable=True, default="*")
repl_schedule_month = sa.Column(sa.String(100), nullable=True, default='*')
repl_schedule_dayweek = sa.Column(sa.String(100), nullable=True, default="*")
repl_only_matching_schedule = sa.Column(sa.Boolean())
repl_readonly = sa.Column(sa.String(120))
repl_allow_from_scratch = sa.Column(sa.Boolean())
repl_hold_pending_snapshots = sa.Column(sa.Boolean())
repl_retention_policy = sa.Column(sa.String(120), default="NONE")
repl_lifetime_unit = sa.Column(sa.String(120), nullable=True, default='WEEK')
repl_lifetime_value = sa.Column(sa.Integer(), nullable=True, default=2)
repl_lifetimes = sa.Column(sa.JSON(list))
repl_large_block = sa.Column(sa.Boolean(), default=True)
repl_embed = sa.Column(sa.Boolean(), default=False)
repl_compressed = sa.Column(sa.Boolean(), default=True)
repl_retries = sa.Column(sa.Integer(), default=5)
repl_restrict_schedule_minute = sa.Column(sa.String(100), nullable=True, default="00")
repl_restrict_schedule_hour = sa.Column(sa.String(100), nullable=True, default="*")
repl_restrict_schedule_daymonth = sa.Column(sa.String(100), nullable=True, default="*")
repl_restrict_schedule_month = sa.Column(sa.String(100), nullable=True, default='*')
repl_restrict_schedule_dayweek = sa.Column(sa.String(100), nullable=True, default="*")
repl_restrict_schedule_begin = sa.Column(sa.Time(), nullable=True, default=time(hour=0))
repl_restrict_schedule_end = sa.Column(sa.Time(), nullable=True, default=time(hour=23, minute=45))
repl_netcat_active_side_listen_address = sa.Column(sa.String(120), nullable=True, default=None)
repl_netcat_passive_side_connect_address = sa.Column(sa.String(120), nullable=True, default=None)
repl_logging_level = sa.Column(sa.String(120), nullable=True, default=None)
repl_name = sa.Column(sa.String(120))
repl_state = sa.Column(sa.Text(), default="{}")
repl_properties = sa.Column(sa.Boolean(), default=True)
repl_properties_exclude = sa.Column(sa.JSON(list))
repl_properties_override = sa.Column(sa.JSON())
repl_replicate = sa.Column(sa.Boolean())
repl_encryption = sa.Column(sa.Boolean())
repl_encryption_inherit = sa.Column(sa.Boolean(), nullable=True)
repl_encryption_key = sa.Column(sa.EncryptedText(), nullable=True)
repl_encryption_key_format = sa.Column(sa.String(120), nullable=True)
repl_encryption_key_location = sa.Column(sa.Text(), nullable=True)
repl_periodic_snapshot_tasks = sa.relationship('PeriodicSnapshotTaskModel',
secondary=lambda: ReplicationPeriodicSnapshotTaskModel.__table__)
class ReplicationPeriodicSnapshotTaskModel(sa.Model):
__tablename__ = 'storage_replication_repl_periodic_snapshot_tasks'
id = sa.Column(sa.Integer(), primary_key=True)
replication_id = sa.Column(sa.ForeignKey('storage_replication.id', ondelete='CASCADE'), index=True)
task_id = sa.Column(sa.ForeignKey('storage_task.id', ondelete='CASCADE'), index=True)
class ReplicationService(CRUDService):
class Config:
datastore = "storage.replication"
datastore_prefix = "repl_"
datastore_extend = "replication.extend"
datastore_extend_context = "replication.extend_context"
cli_namespace = "task.replication"
role_prefix = "REPLICATION_TASK"
@private
async def extend_context(self, rows, extra):
if extra.get("check_dataset_encryption_keys", False) and any(row["direction"] == "PUSH" for row in rows):
dataset_mapping = await self.middleware.call("pool.dataset.dataset_encryption_root_mapping")
else:
dataset_mapping = {}
return {
"state": await self.middleware.call("zettarepl.get_state"),
"dataset_encryption_root_mapping": dataset_mapping,
"check_dataset_encryption_keys": extra.get("check_dataset_encryption_keys", False),
}
@private
async def extend(self, data, context):
data["periodic_snapshot_tasks"] = [
{k.replace("task_", ""): v for k, v in task.items()}
for task in data["periodic_snapshot_tasks"]
]
for task in data["periodic_snapshot_tasks"]:
Cron.convert_db_format_to_schedule(task, begin_end=True)
if data["direction"] == "PUSH":
data["also_include_naming_schema"] = data["naming_schema"]
data["naming_schema"] = []
if data["direction"] == "PULL":
data["also_include_naming_schema"] = []
Cron.convert_db_format_to_schedule(data, "schedule", key_prefix="schedule_", begin_end=True)
Cron.convert_db_format_to_schedule(data, "restrict_schedule", key_prefix="restrict_schedule_", begin_end=True)
if "error" in context["state"]:
data["state"] = context["state"]["error"]
else:
data["state"] = context["state"]["tasks"].get(f"replication_task_{data['id']}", {
"state": "PENDING",
})
data["job"] = data["state"].pop("job", None)
if context["check_dataset_encryption_keys"]:
if context["dataset_encryption_root_mapping"] and data["direction"] == "PUSH":
data["has_encrypted_dataset_keys"] = bool(
await self.middleware.call(
"pool.dataset.export_keys_for_replication_internal", data,
context["dataset_encryption_root_mapping"], True,
)
)
else:
data["has_encrypted_dataset_keys"] = False
return data
@private
async def compress(self, data):
if data["direction"] == "PUSH":
data["naming_schema"] = data["also_include_naming_schema"]
del data["also_include_naming_schema"]
Cron.convert_schedule_to_db_format(data, "schedule", key_prefix="schedule_", begin_end=True)
Cron.convert_schedule_to_db_format(data, "restrict_schedule", key_prefix="restrict_schedule_", begin_end=True)
del data["periodic_snapshot_tasks"]
return data
@accepts(
Dict(
"replication_create",
Str("name", required=True),
Str("direction", enum=["PUSH", "PULL"], required=True),
Str("transport", enum=["SSH", "SSH+NETCAT", "LOCAL"], required=True),
Int("ssh_credentials", null=True, default=None),
Str("netcat_active_side", enum=["LOCAL", "REMOTE"], null=True, default=None),
Str("netcat_active_side_listen_address", null=True, default=None),
Int("netcat_active_side_port_min", null=True, default=None, validators=[Port()]),
Int("netcat_active_side_port_max", null=True, default=None, validators=[Port()]),
Str("netcat_passive_side_connect_address", null=True, default=None),
Bool("sudo", default=False),
List("source_datasets", items=[Dataset("dataset")], empty=False),
Dataset("target_dataset", required=True),
Bool("recursive", required=True),
List("exclude", items=[Dataset("dataset")]),
Bool("properties", default=True),
List("properties_exclude", items=[Str("property", empty=False)]),
Dict("properties_override", additional_attrs=True),
Bool("replicate", default=False),
Bool("encryption", default=False),
Bool("encryption_inherit", null=True, default=None),
Str("encryption_key", null=True, default=None),
Str("encryption_key_format", enum=["HEX", "PASSPHRASE"], null=True, default=None),
Str("encryption_key_location", null=True, default=None),
List("periodic_snapshot_tasks", items=[Int("periodic_snapshot_task")],
validators=[Unique()]),
List("naming_schema", items=[
Str("naming_schema", validators=[ReplicationSnapshotNamingSchema()])]),
List("also_include_naming_schema", items=[
Str("naming_schema", validators=[ReplicationSnapshotNamingSchema()])]),
Str("name_regex", null=True, default=None, empty=False),
Bool("auto", required=True),
Cron(
"schedule",
defaults={"minute": "00"},
begin_end=True,
null=True,
default=None
),
Cron(
"restrict_schedule",
defaults={"minute": "00"},
begin_end=True,
null=True,
default=None
),
Bool("only_matching_schedule", default=False),
Bool("allow_from_scratch", default=False),
Str("readonly", enum=["SET", "REQUIRE", "IGNORE"], default="SET"),
Bool("hold_pending_snapshots", default=False),
Str("retention_policy", enum=["SOURCE", "CUSTOM", "NONE"], required=True),
Int("lifetime_value", null=True, default=None, validators=[Range(min_=1)]),
Str("lifetime_unit", null=True, default=None, enum=["HOUR", "DAY", "WEEK", "MONTH", "YEAR"]),
List("lifetimes", items=[
Dict(
"lifetime",
Cron("schedule"),
Int("lifetime_value", validators=[Range(min_=1)], required=True),
Str("lifetime_unit", enum=["HOUR", "DAY", "WEEK", "MONTH", "YEAR"], required=True),
strict=True,
),
]),
Str("compression", enum=["LZ4", "PIGZ", "PLZIP"], null=True, default=None),
Int("speed_limit", null=True, default=None, validators=[Range(min_=1)]),
Bool("large_block", default=True),
Bool("embed", default=False),
Bool("compressed", default=True),
Int("retries", default=5, validators=[Range(min_=1)]),
Str("logging_level", enum=["DEBUG", "INFO", "WARNING", "ERROR"], null=True, default=None),
Bool("enabled", default=True),
register=True,
strict=True,
),
audit="Replication task create:",
audit_extended=lambda data: data["name"]
)
@pass_app(require=True)
async def do_create(self, app, data):
"""
Create a Replication Task
Create a Replication Task that will push or pull ZFS snapshots to or from remote host..
* `name` specifies a name for replication task
* `direction` specifies whether task will `PUSH` or `PULL` snapshots
* `transport` is a method of snapshots transfer:
* `SSH` transfers snapshots via SSH connection. This method is supported everywhere but does not achieve
great performance
`ssh_credentials` is a required field for this transport (Keychain Credential ID of type `SSH_CREDENTIALS`)
* `SSH+NETCAT` uses unencrypted connection for data transfer. This can only be used in trusted networks
and requires a port (specified by range from `netcat_active_side_port_min` to `netcat_active_side_port_max`)
to be open on `netcat_active_side`
`ssh_credentials` is also required for control connection
* `LOCAL` replicates to or from localhost
`sudo` flag controls whether `SSH` and `SSH+NETCAT` transports should use sudo (which is expected to be
passwordless) to run `zfs` command on the remote machine.
* `source_datasets` is a non-empty list of datasets to replicate snapshots from
* `target_dataset` is a dataset to put snapshots into. It must exist on target side
* `recursive` and `exclude` have the same meaning as for Periodic Snapshot Task
* `properties` control whether we should send dataset properties along with snapshots
* `periodic_snapshot_tasks` is a list of periodic snapshot task IDs that are sources of snapshots for this
replication task. Only push replication tasks can be bound to periodic snapshot tasks.
* `naming_schema` is a list of naming schemas for pull replication
* `also_include_naming_schema` is a list of naming schemas for push replication
* `name_regex` will replicate all snapshots which names match specified regular expression
* `auto` allows replication to run automatically on schedule or after bound periodic snapshot task
* `schedule` is a schedule to run replication task. Only `auto` replication tasks without bound periodic
snapshot tasks can have a schedule
* `restrict_schedule` restricts when replication task with bound periodic snapshot tasks runs. For example,
you can have periodic snapshot tasks that run every 15 minutes, but only run replication task every hour.
* Enabling `only_matching_schedule` will only replicate snapshots that match `schedule` or
`restrict_schedule`
* `allow_from_scratch` will destroy all snapshots on target side and replicate everything from scratch if none
of the snapshots on target side matches source snapshots
* `readonly` controls destination datasets readonly property:
* `SET` will set all destination datasets to readonly=on after finishing the replication
* `REQUIRE` will require all existing destination datasets to have readonly=on property
* `IGNORE` will avoid this kind of behavior
* `hold_pending_snapshots` will prevent source snapshots from being deleted by retention of replication fails
for some reason
* `retention_policy` specifies how to delete old snapshots on target side:
* `SOURCE` deletes snapshots that are absent on source side
* `CUSTOM` deletes snapshots that are older than `lifetime_value` and `lifetime_unit`
* `NONE` does not delete any snapshots
* `compression` compresses SSH stream. Available only for SSH transport
* `speed_limit` limits speed of SSH stream. Available only for SSH transport
* `large_block`, `embed` and `compressed` are various ZFS stream flag documented in `man zfs send`
* `retries` specifies number of retries before considering replication failed
.. examples(websocket)::
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "replication.create",
"params": [{
"name": "Work Backup",
"direction": "PUSH",
"transport": "SSH",
"ssh_credentials": [12],
"source_datasets", ["data/work"],
"target_dataset": "repl/work",
"recursive": true,
"periodic_snapshot_tasks": [5],
"auto": true,
"restrict_schedule": {
"minute": "0",
"hour": "*/2",
"dom": "*",
"month": "*",
"dow": "1,2,3,4,5",
"begin": "09:00",
"end": "18:00"
},
"only_matching_schedule": true,
"retention_policy": "CUSTOM",
"lifetime_value": 1,
"lifetime_unit": "WEEK",
}]
}
"""
verrors = ValidationErrors()
verrors.add_child("replication_create", await self._validate(app, data))
verrors.check()
periodic_snapshot_tasks = data["periodic_snapshot_tasks"]
await self.compress(data)
id_ = await self.middleware.call(
"datastore.insert",
self._config.datastore,
data,
{"prefix": self._config.datastore_prefix}
)
await self._set_periodic_snapshot_tasks(id_, periodic_snapshot_tasks)
await self.middleware.call("zettarepl.update_tasks")
return await self.get_instance(id_)
@accepts(Int("id"), Patch(
"replication_create",
"replication_update",
("attr", {"update": True}),
), audit="Replication task update:", audit_callback=True)
@pass_app(require=True)
async def do_update(self, app, audit_callback, id_, data):
"""
Update a Replication Task with specific `id`
See the documentation for `create` method for information on payload contents
.. examples(websocket)::
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "replication.update",
"params": [
7,
{
"name": "Work Backup",
"direction": "PUSH",
"transport": "SSH",
"ssh_credentials": [12],
"source_datasets", ["data/work"],
"target_dataset": "repl/work",
"recursive": true,
"periodic_snapshot_tasks": [5],
"auto": true,
"restrict_schedule": {
"minute": "0",
"hour": "*/2",
"dom": "*",
"month": "*",
"dow": "1,2,3,4,5",
"begin": "09:00",
"end": "18:00"
},
"only_matching_schedule": true,
"retention_policy": "CUSTOM",
"lifetime_value": 1,
"lifetime_unit": "WEEK",
}
]
}
"""
old = await self.get_instance(id_)
audit_callback(old["name"])
new = old.copy()
if new["ssh_credentials"]:
new["ssh_credentials"] = new["ssh_credentials"]["id"]
new["periodic_snapshot_tasks"] = [task["id"] for task in new["periodic_snapshot_tasks"]]
new.update(data)
verrors = ValidationErrors()
verrors.add_child("replication_update", await self._validate(app, new, id_))
verrors.check()
periodic_snapshot_tasks = new["periodic_snapshot_tasks"]
await self.compress(new)
new.pop("state", None)
new.pop("job", None)
await self.middleware.call(
"datastore.update",
self._config.datastore,
id_,
new,
{'prefix': self._config.datastore_prefix}
)
await self._set_periodic_snapshot_tasks(id_, periodic_snapshot_tasks)
await self.middleware.call("zettarepl.update_tasks")
return await self.get_instance(id_)
@accepts(
Int("id"),
audit="Replication task delete:",
audit_callback=True
)
async def do_delete(self, audit_callback, id_):
"""
Delete a Replication Task with specific `id`
.. examples(websocket)::
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "replication.delete",
"params": [
1
]
}
"""
task_name = (await self.get_instance(id_))["name"]
audit_callback(task_name)
response = await self.middleware.call(
"datastore.delete",
self._config.datastore,
id_
)
await self.middleware.call("zettarepl.update_tasks")
return response
@item_method
@accepts(
Int("id"),
Bool("really_run", default=True, hidden=True),
roles=["REPLICATION_TASK_WRITE"],
)
@job(logs=True)
async def run(self, job, id_, really_run):
"""
Run Replication Task of `id`.
"""
if really_run:
task = await self.get_instance(id_)
if not task["enabled"]:
raise CallError("Task is not enabled")
if task["state"]["state"] == "RUNNING":
raise CallError("Task is already running")
if task["state"]["state"] == "HOLD":
raise CallError("Task is on hold")
await self.middleware.call("zettarepl.run_replication_task", id_, really_run, job)
@accepts(
Patch(
"replication_create",
"replication_run_onetime",
("rm", {"name": "name"}),
("rm", {"name": "auto"}),
("rm", {"name": "schedule"}),
("rm", {"name": "only_matching_schedule"}),
("rm", {"name": "enabled"}),
("add", Bool("exclude_mountpoint_property", default=True)),
("add", Bool("only_from_scratch", default=False)),
),
)
@job(logs=True)
async def run_onetime(self, job, data):
"""
Run replication task without creating it.
If `only_from_scratch` is `true` then replication will fail if target dataset already exists.
"""
data["name"] = f"Temporary replication task for job {job.id}"
data["schedule"] = None
data["only_matching_schedule"] = False
data["auto"] = False
data["enabled"] = True
verrors = ValidationErrors()
verrors.add_child("replication_run_onetime", await self._validate(fake_app(), data))
verrors.check()
if data.get("ssh_credentials") is not None:
data["ssh_credentials"] = await self.middleware.call(
"keychaincredential.get_of_type", data["ssh_credentials"], "SSH_CREDENTIALS",
)
await self.middleware.call("zettarepl.run_onetime_replication_task", job, data)
async def _validate(self, app, data, id_=None):
verrors = ValidationErrors()
await self._ensure_unique(verrors, "", "name", data["name"], id_)
# Direction
snapshot_tasks = []
if data["direction"] == "PUSH":
e, snapshot_tasks = await self._query_periodic_snapshot_tasks(data["periodic_snapshot_tasks"])
verrors.add_child("periodic_snapshot_tasks", e)
if data["naming_schema"]:
verrors.add("naming_schema", "This field has no sense for push replication")
if not snapshot_tasks and not data["also_include_naming_schema"] and not data["name_regex"]:
verrors.add(
"periodic_snapshot_tasks", "You must at least either bind a periodic snapshot task or provide "
"\"Also Include Naming Schema\" or \"Name Regex\" for push replication "
"task"
)
if data["schedule"] is None and data["auto"] and not data["periodic_snapshot_tasks"]:
verrors.add("auto", "Push replication that runs automatically must be either "
"bound to a periodic snapshot task or have a schedule")
if data["direction"] == "PULL":
if data["schedule"] is None and data["auto"]:
verrors.add("auto", "Pull replication that runs automatically must have a schedule")
if data["periodic_snapshot_tasks"]:
verrors.add("periodic_snapshot_tasks", "Pull replication can't be bound to a periodic snapshot task")
if not data["naming_schema"] and not data["name_regex"]:
verrors.add("naming_schema", "Naming schema or Name regex are required for pull replication")
if data["also_include_naming_schema"]:
verrors.add("also_include_naming_schema", "This field has no sense for pull replication")
if data["hold_pending_snapshots"]:
verrors.add("hold_pending_snapshots", "Pull replication tasks can't hold pending snapshots because "
"they don't do source retention")
if app.authenticated_credentials.has_role("REPLICATION_TASK_WRITE"):
if not app.authenticated_credentials.has_role("REPLICATION_TASK_WRITE_PULL"):
verrors.add("direction", "You don't have permissions to use PULL replication")
# Transport
if data["transport"] == "SSH+NETCAT":
if data["netcat_active_side"] is None:
verrors.add("netcat_active_side", "You must choose active side for SSH+netcat replication")
if data["netcat_active_side_port_min"] is not None and data["netcat_active_side_port_max"] is not None:
if data["netcat_active_side_port_min"] > data["netcat_active_side_port_max"]:
verrors.add("netcat_active_side_port_max",
"Please specify value greater or equal than netcat_active_side_port_min")
if data["compression"] is not None:
verrors.add("compression", "Compression is not supported for SSH+netcat replication")
if data["speed_limit"] is not None:
verrors.add("speed_limit", "Speed limit is not supported for SSH+netcat replication")
else:
if data["netcat_active_side"] is not None:
verrors.add("netcat_active_side", "This field only has sense for SSH+netcat replication")
for k in ["netcat_active_side_listen_address", "netcat_active_side_port_min", "netcat_active_side_port_max",
"netcat_passive_side_connect_address"]:
if data[k] is not None:
verrors.add(k, "This field only has sense for SSH+netcat replication")
if data["transport"] == "LOCAL":
if data["ssh_credentials"] is not None:
verrors.add("ssh_credentials", "Remote credentials have no sense for local replication")
if data["compression"] is not None:
verrors.add("compression", "Compression has no sense for local replication")
if data["speed_limit"] is not None:
verrors.add("speed_limit", "Speed limit has no sense for local replication")
else:
if data["ssh_credentials"] is None:
verrors.add("ssh_credentials", "SSH Credentials are required for non-local replication")
else:
try:
await self.middleware.call("keychaincredential.get_of_type", data["ssh_credentials"],
"SSH_CREDENTIALS")
except CallError as e:
verrors.add("ssh_credentials", str(e))
# Common for all directions and transports
for i, source_dataset in enumerate(data["source_datasets"]):
for snapshot_task in snapshot_tasks:
if is_child(source_dataset, snapshot_task["dataset"]):
if data["recursive"]:
for exclude in snapshot_task["exclude"]:
if is_child(exclude, source_dataset) and exclude not in data["exclude"]:
verrors.add("exclude", f"You should exclude {exclude!r} as bound periodic snapshot "
f"task dataset {snapshot_task['dataset']!r} does")
else:
if source_dataset in snapshot_task["exclude"]:
verrors.add(f"source_datasets.{i}", f"Dataset {source_dataset!r} is excluded by bound "
f"periodic snapshot task for dataset "
f"{snapshot_task['dataset']!r}")
if not data["recursive"] and data["exclude"]:
verrors.add("exclude", "Excluding child datasets is only supported for recursive replication")
for i, v in enumerate(data["exclude"]):
if not any(v.startswith(ds + "/") for ds in data["source_datasets"]):
verrors.add(f"exclude.{i}", "This dataset is not a child of any of source datasets")
if data["replicate"]:
if not data["recursive"]:
verrors.add("recursive", "This option is required for full filesystem replication")
if data["exclude"]:
verrors.add("exclude", "This option is not supported for full filesystem replication")
if not data["properties"]:
verrors.add("properties", "This option is required for full filesystem replication")
for i, source_dataset in enumerate(data["source_datasets"]):
for j, another_source_dataset in enumerate(data["source_datasets"]):
if j != i:
if is_child(source_dataset, another_source_dataset):
verrors.add(
f"source_datasets.{i}",
"Replication task that replicates the entire filesystem can't replicate both "
f"{another_source_dataset!r} and its child {source_dataset!r}"
)
for i, periodic_snapshot_task in enumerate(snapshot_tasks):
if (
not any(is_child(source_dataset, periodic_snapshot_task["dataset"])
for source_dataset in data["source_datasets"]) or
not periodic_snapshot_task["recursive"]
):
verrors.add(
f"periodic_snapshot_tasks.{i}",
"Replication tasks that replicate the entire filesystem can only use periodic snapshot tasks "
"that take recursive snapshots of the dataset being replicated (or its ancestor)"
)
if data["encryption"]:
if not data["encryption_inherit"]:
for k in ["encryption_key", "encryption_key_format", "encryption_key_location"]:
if data[k] is None:
verrors.add(k, "This property is required when remote dataset encryption is enabled")
if data["schedule"]:
if not data["auto"]:
verrors.add("schedule", "You can't have schedule for replication that does not run automatically")
else:
if data["only_matching_schedule"]:
verrors.add("only_matching_schedule", "You can't have only-matching-schedule without schedule")
if data["name_regex"]:
try:
re.compile(f"({data['name_regex']})$")
except Exception as e:
verrors.add("name_regex", f"Invalid regex: {e}")
if data["naming_schema"] or data["also_include_naming_schema"]:
verrors.add("name_regex", "Naming regex can't be used with Naming schema")
if data["retention_policy"] not in ["SOURCE", "NONE"]:
verrors.add(
"retention_policy",
"Only `Same as Source` and `None` retention policies can be used with Naming regex",
)
if data["retention_policy"] == "CUSTOM":
if data["lifetime_value"] is None:
verrors.add("lifetime_value", "This field is required for custom retention policy")
if data["lifetime_unit"] is None:
verrors.add("lifetime_value", "This field is required for custom retention policy")
else:
if data["lifetime_value"] is not None:
verrors.add("lifetime_value", "This field has no sense for specified retention policy")
if data["lifetime_unit"] is not None:
verrors.add("lifetime_unit", "This field has no sense for specified retention policy")
if data["lifetimes"]:
verrors.add("lifetimes", "This field has no sense for specified retention policy")
if data["enabled"]:
for i, snapshot_task in enumerate(snapshot_tasks):
if not snapshot_task["enabled"]:
verrors.add(
f"periodic_snapshot_tasks.{i}",
"You can't bind disabled periodic snapshot task to enabled replication task"
)
return verrors
async def _set_periodic_snapshot_tasks(self, replication_task_id, periodic_snapshot_tasks_ids):
await self.middleware.call("datastore.delete", "storage.replication_repl_periodic_snapshot_tasks",
[["replication_id", "=", replication_task_id]])
for periodic_snapshot_task_id in periodic_snapshot_tasks_ids:
await self.middleware.call(
"datastore.insert", "storage.replication_repl_periodic_snapshot_tasks",
{
"replication_id": replication_task_id,
"task_id": periodic_snapshot_task_id,
},
)
async def _query_periodic_snapshot_tasks(self, ids):
verrors = ValidationErrors()
query_result = await self.middleware.call("pool.snapshottask.query", [["id", "in", ids]])
snapshot_tasks = []
for i, task_id in enumerate(ids):
for task in query_result:
if task["id"] == task_id:
snapshot_tasks.append(task)
break
else:
verrors.add(str(i), "This snapshot task does not exist")
return verrors, snapshot_tasks
@accepts(Str("transport", enum=["SSH", "SSH+NETCAT", "LOCAL"], required=True),
Int("ssh_credentials", null=True, default=None),
roles=["REPLICATION_TASK_WRITE"])
@returns(List("datasets", items=[Str("dataset")]))
async def list_datasets(self, transport, ssh_credentials):
"""
List datasets on remote side
Accepts `transport` and SSH credentials ID (for non-local transport)
.. examples(websocket)::
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "replication.list_datasets",
"params": [
"SSH",
7
]
}
"""
return await self.middleware.call("zettarepl.list_datasets", transport, ssh_credentials)
@accepts(Str("dataset", required=True),
Str("transport", enum=["SSH", "SSH+NETCAT", "LOCAL"], required=True),
Int("ssh_credentials", null=True, default=None),
roles=["REPLICATION_TASK_WRITE"])
async def create_dataset(self, dataset, transport, ssh_credentials):
"""
Creates dataset on remote side
Accepts `dataset` name, `transport` and SSH credentials ID (for non-local transport)
.. examples(websocket)::
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "replication.create_dataset",
"params": [
"repl/work",
"SSH",
7
]
}
"""
return await self.middleware.call("zettarepl.create_dataset", dataset, transport, ssh_credentials)
@accepts(roles=["REPLICATION_TASK_WRITE"])
@returns(List("naming_schemas", items=[Str("naming_schema")]))
async def list_naming_schemas(self):
"""
List all naming schemas used in periodic snapshot and replication tasks.
"""
naming_schemas = []
for snapshottask in await self.middleware.call("pool.snapshottask.query"):
naming_schemas.append(snapshottask["naming_schema"])
for replication in await self.middleware.call("replication.query"):
naming_schemas.extend(replication["naming_schema"])
naming_schemas.extend(replication["also_include_naming_schema"])
return sorted(set(naming_schemas))
@accepts(
Dict(
"count_eligible_manual_snapshots",
List("datasets", empty=False, items=[
Dataset("dataset")
]),
List("naming_schema", items=[
Str("naming_schema", validators=[ReplicationSnapshotNamingSchema()])
]),
Str("name_regex", null=True, default=None, empty=False),
Str("transport", enum=["SSH", "SSH+NETCAT", "LOCAL"], required=True),
Int("ssh_credentials", null=True, default=None),
),
deprecated=[
(
lambda args: len(args) in [3, 4],
lambda datasets, naming_schema, transport, ssh_credentials=None: [{
"datasets": datasets,
"naming_schema": naming_schema,
"transport": transport,
"ssh_credentials": ssh_credentials,
}],
),
],
roles=["REPLICATION_TASK_WRITE"],
)
@returns(Dict(
Int("total"),
Int("eligible"),
))
async def count_eligible_manual_snapshots(self, data):
"""
Count how many existing snapshots of `dataset` match `naming_schema`.
.. examples(websocket)::
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "replication.count_eligible_manual_snapshots",
"params": [{
"dataset": "repl/work",
"naming_schema": ["auto-%Y-%m-%d_%H-%M"],
"transport": "SSH",
"ssh_credentials": 4,
}]
}
"""
return await self.middleware.call("zettarepl.count_eligible_manual_snapshots", data)
@accepts(
Str("direction", enum=["PUSH", "PULL"], required=True),
List("source_datasets", items=[Dataset("dataset")], required=True, empty=False),
Dataset("target_dataset", required=True),
Str("transport", enum=["SSH", "SSH+NETCAT", "LOCAL", "LEGACY"], required=True),
Int("ssh_credentials", null=True, default=None),
roles=["REPLICATION_TASK_WRITE"],
)
@returns(Dict(
additional_attrs=True,
example={
"backup/work": ["auto-2019-10-15_13-00", "auto-2019-10-15_09-00"],
"backup/games": ["auto-2019-10-15_13-00"],
},
))
async def target_unmatched_snapshots(self, direction, source_datasets, target_dataset, transport, ssh_credentials):
"""
Check if target has any snapshots that do not exist on source. Returns these snapshots grouped by dataset.
.. examples(websocket)::
:::javascript
{
"id": "6841f242-840a-11e6-a437-00e04d680384",
"msg": "method",
"method": "replication.target_unmatched_snapshots",
"params": [
"PUSH",
["repl/work", "repl/games"],
"backup",
"SSH",
4
]
}
"""
return await self.middleware.call("zettarepl.target_unmatched_snapshots", direction, source_datasets,
target_dataset, transport, ssh_credentials)
@private
def new_snapshot_name(self, naming_schema):
return datetime.now().strftime(naming_schema)
# Legacy pair support
@private
@accepts(Dict(
"replication-pair-data",
Str("hostname", required=True),
Str("public-key", required=True),
Str("user", null=True),
))
async def pair(self, data):
result = await self.middleware.call("keychaincredential.ssh_pair", {
"remote_hostname": data["hostname"],
"username": data["user"] or "root",
"public_key": data["public-key"],
})
return {
"ssh_port": result["port"],
"ssh_hostkey": result["host_key"],
}
class ReplicationFSAttachmentDelegate(FSAttachmentDelegate):
name = 'replication'
title = 'Replication'
async def query(self, path, enabled, options=None):
results = []
for replication in await self.middleware.call('replication.query', [['enabled', '=', enabled]]):
if replication['transport'] == 'LOCAL' or replication['direction'] == 'PUSH':
if await self.middleware.call('filesystem.is_child', [
os.path.join('/mnt', source_dataset) for source_dataset in replication['source_datasets']
], path):
results.append(replication)
if replication['transport'] == 'LOCAL' or replication['direction'] == 'PULL':
if await self.middleware.call('filesystem.is_child', os.path.join('/mnt', replication['target_dataset']), path):
results.append(replication)
return results
async def delete(self, attachments):
for attachment in attachments:
await self.middleware.call('datastore.delete', 'storage.replication', attachment['id'])
await self.middleware.call('zettarepl.update_tasks')
async def toggle(self, attachments, enabled):
for attachment in attachments:
await self.middleware.call('datastore.update', 'storage.replication', attachment['id'],
{'repl_enabled': enabled})
await self.middleware.call('zettarepl.update_tasks')
async def on_zettarepl_state_changed(middleware, id_, fields):
if id_.startswith('replication_task_'):
task_id = int(id_.split('_')[-1])
middleware.send_event('replication.query', 'CHANGED', id=task_id, fields={'state': fields})
async def setup(middleware):
await middleware.call('pool.dataset.register_attachment_delegate', ReplicationFSAttachmentDelegate(middleware))
await middleware.call('network.general.register_activity', 'replication', 'Replication')
middleware.register_hook('zettarepl.state_change', on_zettarepl_state_changed)
| 44,536 | Python | .py | 834 | 40.231415 | 128 | 0.583433 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,491 | api_key.py | truenas_middleware/src/middlewared/middlewared/plugins/api_key.py | import pam
import errno
from typing import Literal, TYPE_CHECKING
from datetime import datetime, UTC
from middlewared.api import api_method
from middlewared.api.current import (
ApiKeyEntry, ApiKeyCreateArgs, ApiKeyCreateResult, ApiKeyUpdateArgs, ApiKeyUpdateResult,
ApiKeyDeleteArgs, ApiKeyDeleteResult, ApiKeyMyKeysArgs, ApiKeyMyKeysResult,
)
from middlewared.service import CRUDService, pass_app, periodic, private, ValidationErrors
from middlewared.service_exception import CallError
import middlewared.sqlalchemy as sa
from middlewared.utils import filter_list
from middlewared.utils.auth import LEGACY_API_KEY_USERNAME
from middlewared.utils.crypto import generate_pbkdf2_512, generate_string
from middlewared.utils.privilege import credential_has_full_admin
from middlewared.utils.sid import sid_is_valid
from middlewared.utils.time_utils import utc_now
if TYPE_CHECKING:
from middlewared.main import Middleware
class APIKeyModel(sa.Model):
__tablename__ = "account_api_key"
id = sa.Column(sa.Integer(), primary_key=True)
name = sa.Column(sa.String(200))
user_identifier = sa.Column(sa.String(200))
key = sa.Column(sa.Text())
created_at = sa.Column(sa.DateTime())
expiry = sa.Column(sa.Integer())
class ApiKeyService(CRUDService):
class Config:
namespace = "api_key"
datastore = "account.api_key"
datastore_extend = "api_key.item_extend"
datastore_extend_context = "api_key.item_extend_ctx"
cli_namespace = "auth.api_key"
role_prefix = 'API_KEY'
entry = ApiKeyEntry
@private
async def item_extend_ctx(self, rows, extra):
# user.query performs somewhat expensive datastore extend that we perhaps
# don't care about (for example 2FA status)
users = await self.middleware.call(
'datastore.query', 'account.bsdusers',
[], {'prefix': 'bsdusr_'}
)
by_id = {x['id']: x['username'] for x in users}
# We want to convert legacy keys into the appropriate local
# administrator account
if (admin_user := filter_list(users, [['uid', '=', 950]])):
root_name = admin_user[0]['username']
else:
root_name = 'root'
return {
'by_id': by_id,
'by_sid': {},
'now': utc_now(naive=False),
'root_name': root_name
}
@private
async def item_extend(self, item, ctx):
"""
* modify `user_identifier` (change type if digit, add INVALID prefix before garbage)
* remove `expiry`
* add `username` - `user_identifer` is used for lookup
* add `local`
* add `expires_at` - derived from `expiry`
* add `revoked` - derived from `expiry`
"""
user_identifier = item['user_identifier']
expiry = item.pop('expiry')
thehash = item.pop('key')
item.update({
'username': None,
'keyhash': thehash,
'local': True,
'expires_at': None,
'revoked': False
})
if user_identifier.isdigit():
# If we can't resolve the ID then the account was probably deleted
# and we didn't quite get to clean up yet.
item['user_identifier'] = int(user_identifier)
item['username'] = ctx['by_id'].get(item['user_identifier'])
elif user_identifier == LEGACY_API_KEY_USERNAME:
# This may be magic string designating a migrated API key
item['username'] = ctx['root_name']
elif sid_is_valid(user_identifier):
if (username := ctx['by_sid'].get(user_identifier)) is None:
resp = await self.middleware.call('idmap.convert_sids', [user_identifier])
if entry := resp['mapped'].get(user_identifier):
username = entry['name']
# Feed SID we looked up back into our extend context
# Because there may be multiple keys for same SID value
ctx['by_sid'][user_identifier] = username
if username:
item['username'] = username
else:
# Something wildly invalid got written, but we can't
# write a log message here (queried too frequently).
item['username'] = None
item['local'] = True
if item['username'] is None:
# prevent keys we can't resolve from being written
item['revoked'] = True
match expiry:
case -1:
# key has been forcibly revoked
item['revoked'] = True
case 0 | None:
# zero value indicates never expires
pass
case _:
item['expires_at'] = datetime.fromtimestamp(expiry, UTC)
return item
@private
def compress(self, data: dict) -> dict:
out = data.copy()
if 'expires_at' in out:
if (expires_at := out.pop('expires_at')) is None:
out['expiry'] = 0
else:
out['expiry'] = int(expires_at.timestamp())
if out.get('revoked'):
out['expiry'] = -1
thehash = out.pop('keyhash')
if thehash:
out['key'] = thehash
for key in [
'username',
'revoked',
'keyhash',
'local',
]:
out.pop(key, None)
return out
@api_method(
ApiKeyCreateArgs,
ApiKeyCreateResult,
audit='Create API key',
audit_extended=lambda data: data['name'],
roles=['READONLY_ADMIN', 'API_KEY_WRITE']
)
@pass_app(rest=True)
def do_create(self, app, data: dict) -> dict:
"""
Creates API Key.
`name` is a user-readable name for key.
"""
# First catch any privilege errors to avoid leaking potentially sensitive information
self.api_key_privilege_check(app, data['username'], 'api_key.create')
verrors = ValidationErrors()
self._validate("api_key_create", data, verrors)
user = self.middleware.call_sync('user.query', [
['username', '=', data['username']]
])
if not user:
verrors.add('api_key_create', 'User does not exist.')
if user and not user[0]['roles']:
verrors.add('api_key_create', 'User lacks privilege role membership.')
verrors.check()
if user[0]['local']:
user_identifier = str(user[0]['id'])
elif user[0]['sid']:
user_identifier = user[0]['sid']
else:
# DS, but no SID available, fall back
# to our synthesized DB ID (which is derived
# from the UID of user)
user_identifier = str(user[0]['id'])
key = generate_string(string_size=64)
data['keyhash'] = generate_pbkdf2_512(key)
data['created_at'] = utc_now()
data['user_identifier'] = user_identifier
data['id'] = self.middleware.call_sync(
'datastore.insert',
self._config.datastore,
self.compress(data)
)
data.update({
'username': user[0]['username'],
'local': user[0]['local'],
'revoked': False,
})
self.middleware.call_sync('etc.generate', 'pam_middleware')
return dict(data, key=f"{data['id']}-{key}")
@api_method(
ApiKeyUpdateArgs,
ApiKeyUpdateResult,
audit='Update API key',
audit_callback=True,
roles=['READONLY_ADMIN', 'API_KEY_WRITE']
)
@pass_app(rest=True)
def do_update(self, app, audit_callback: callable, id_: int, data: dict) -> dict:
"""
Update API Key `id`.
Specify `reset: true` to reset this API Key.
"""
reset = data.pop("reset", False)
old = self.middleware.call_sync('api_key.query', [['id', '=', id_]], {'get': True})
audit_callback(old['name'])
new = old.copy()
new.update(data)
self.api_key_privilege_check(app, new['username'], 'api_key.update')
verrors = ValidationErrors()
self._validate("api_key_update", new, verrors, id_)
verrors.check()
key = None
if reset:
key = generate_string(string_size=64)
new['keyhash'] = generate_pbkdf2_512(key)
new['revoked'] = False
self.middleware.call_sync(
'datastore.update',
self._config.datastore,
id_,
self.compress(new),
)
if not key:
return new
self.middleware.call_sync('etc.generate', 'pam_middleware')
self.middleware.call_sync('api_key.check_status')
return dict(new, key=f"{new['id']}-{key}")
@api_method(
ApiKeyDeleteArgs,
ApiKeyDeleteResult,
audit='Delete API key',
audit_callback=True,
roles=['READONLY_ADMIN', 'API_KEY_WRITE']
)
@pass_app(rest=True)
async def do_delete(self, app, audit_callback: callable, id_: int) -> Literal[True]:
"""
Delete API Key `id`.
"""
api_key = await self.get_instance(id_)
audit_callback(api_key['name'])
self.api_key_privilege_check(app, api_key['username'], 'api_key.delete')
response = await self.middleware.call(
"datastore.delete",
self._config.datastore,
id_
)
await self.middleware.call('etc.generate', 'pam_middleware')
await self.check_status()
return response
@private
def _validate(self, schema_name: str, data: dict, verrors: ValidationErrors, id_: int = None):
if self.middleware.call_sync('datastore.query', self._config.datastore, [
['name', '=', data['name']], ['id', '!=', id_]
]):
verrors.add(schema_name, "name must be unique")
if (expiration := data.get('expires_at')) is not None:
if utc_now(naive=False) > expiration:
verrors.add(schema_name, 'Expiration date is in the past')
@private
def api_key_privilege_check(self, app, username: str, method_name: str) -> None:
if not app or not app.authenticated_credentials.is_user_session:
# internal session
return
if credential_has_full_admin(app.authenticated_credentials):
return
if app.authenticated_credentials.has_role('API_KEY_WRITE'):
return
auth_user = app.authenticated_credentials.user['username']
if auth_user != username:
raise CallError(
f'{auth_user}: authenticated user lacks privileges to create or '
'modify API keys of other users.', errno.EACCES
)
@private
def update_hash(self, old_key: str):
"""We have some legacy keys that have hashes generated with
insufficient iterations. This method refreshes the hash we're storing
with higher iterations and different algorithm"""
id_, key = old_key.split('-', 1)
newhash = generate_pbkdf2_512(key)
self.middleware.call_sync(
"datastore.update",
self._config.datastore,
int(id_),
{'key': newhash}
)
self.middleware.call_sync('etc.generate', 'pam_middleware')
@private
async def authenticate(self, key: str) -> dict | None:
""" Wrapper around auth.authenticate for REST API """
try:
key_id = int(key.split('-', 1)[0])
except ValueError:
return None
entry = await self.get_instance(key_id)
resp = await self.middleware.call('auth.authenticate_plain',
entry['username'],
key,
True)
if resp['pam_response']['code'] != pam.PAM_SUCCESS:
return None
return (resp['user_data'], {
'id': entry['id'],
'name': entry['name'],
})
@api_method(ApiKeyMyKeysArgs, ApiKeyMyKeysResult, roles=['READONLY_ADMIN', 'API_KEY_READ'])
@pass_app(require=True)
async def my_keys(self, app) -> list:
""" Get the existing API keys for the currently-authenticated user """
if not app.authenticated_credentials.is_user_session:
raise CallError('Not a user session')
username = app.authenticated_credentials.user['username']
return await self.query([['username', '=', username]])
@private
@periodic(3600, run_on_start=False)
async def check_status(self) -> None:
keys = await self.query()
revoked_keys = set([key['name'] for key in filter_list(keys, [['revoked', '=', True]])])
for key_name in revoked_keys:
await self.middleware.call('alert.oneshot_create', 'ApiKeyRevoked', {'key_name': key_name})
# delete any fixed key alerts
await self.middleware.call('alert.oneshot_delete', 'ApiKeyRevoked', revoked_keys)
| 13,132 | Python | .py | 319 | 31.122257 | 103 | 0.584314 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,492 | usage.py | truenas_middleware/src/middlewared/middlewared/plugins/usage.py | import asyncio
import json
import os
import random
import subprocess
from collections import defaultdict
import aiohttp
from middlewared.service import Service
from middlewared.utils.mount import getmntinfo
from middlewared.utils.time_utils import utc_now
USAGE_URL = 'https://usage.truenas.com/submit'
class UsageService(Service):
FAILED_RETRIES = 3
class Config:
private = True
async def start(self):
retries = self.FAILED_RETRIES
while retries:
if (
not await self.middleware.call('failover.is_single_master_node') or not await self.middleware.call(
'network.general.can_perform_activity', 'usage'
)
):
break
if (await self.middleware.call('system.general.config'))['usage_collection']:
restrict_usage = []
else:
restrict_usage = ['gather_total_capacity', 'gather_system_version']
try:
await self.middleware.call(
'usage.submit_stats', await self.middleware.call('usage.gather', restrict_usage)
)
except Exception as e:
# We still want to schedule the next call
self.logger.error(e)
retries -= 1
if retries:
self.logger.debug('Retrying gathering stats after 30 minutes')
await asyncio.sleep(1800)
else:
break
event_loop = asyncio.get_event_loop()
now = utc_now()
scheduled = (
now.replace(hour=23, minute=59, second=59) - now
).total_seconds() + random.uniform(1, 86400)
event_loop.call_later(
scheduled,
lambda: self.middleware.create_task(self.middleware.call('usage.start'))
)
self.logger.debug(f'Scheduled next run in {round(scheduled)} seconds')
return True
async def submit_stats(self, data):
async with aiohttp.ClientSession(raise_for_status=True) as session:
await session.post(
USAGE_URL,
data=json.dumps(data, sort_keys=True),
headers={'Content-type': 'application/json'},
proxy=os.environ.get('http_proxy'),
)
def get_gather_context(self):
opts = {'extra': {
'properties': [
'type', 'name', 'available',
'used', 'usedbydataset', 'usedbysnapshots',
'usedbychildren', 'usedbyrefreservation',
],
'snapshots_count': True
}}
context = {
'network': self.middleware.call_sync('interface.query'),
'root_datasets': {},
'total_capacity': 0,
'datasets_total_size': 0,
'datasets_total_size_recursive': 0,
'zvols_total_size': 0,
'zvols': [],
'datasets': {},
'total_snapshots': 0,
'total_datasets': 0,
'total_zvols': 0,
'services': [],
'mntinfo': getmntinfo(),
}
for i in self.middleware.call_sync('datastore.query', 'services.services', [], {'prefix': 'srv_'}):
context['services'].append({'name': i['service'], 'enabled': i['enable']})
for ds in self.middleware.call_sync('zfs.dataset.query', [], opts):
context['total_snapshots'] += ds['snapshot_count']
if '/' not in ds['id']:
context['root_datasets'][ds['id']] = ds
context['total_datasets'] += 1
context['datasets_total_size'] += ds['properties']['used']['parsed']
context['total_capacity'] += (
ds['properties']['used']['parsed'] + ds['properties']['available']['parsed']
)
elif ds['type'] == 'VOLUME':
context['zvols'].append(ds)
context['total_zvols'] += 1
context['zvols_total_size'] += ds['properties']['used']['parsed']
elif ds['type'] == 'FILESYSTEM':
context['total_datasets'] += 1
context['datasets_total_size_recursive'] += ds['properties']['used']['parsed']
context['datasets'][ds['id']] = ds
return context
def gather(self, restrict_usage=None):
context = self.get_gather_context()
restrict_usage = restrict_usage or []
usage_stats = {}
for func in filter(
lambda f: (
f.startswith('gather_') and callable(getattr(self, f)) and (not restrict_usage or f in restrict_usage)
),
dir(self)
):
try:
stats = self.middleware.call_sync(f'usage.{func}', context)
except Exception as e:
self.logger.error('Failed to gather stats from %r: %s', func, e, exc_info=True)
else:
usage_stats.update(stats)
return usage_stats
def gather_total_capacity(self, context):
return {'total_capacity': context['total_capacity']}
def gather_backup_data(self, context):
backed = {'cloudsync': 0, 'rsynctask': 0, 'zfs_replication': 0, 'total_size': 0}
filters = [['enabled', '=', True], ['direction', '=', 'PUSH'], ['locked', '=', False]]
tasks_found = {'cloudsync': set(), 'rsynctask': set()}
for namespace in ('cloudsync', 'rsynctask'):
opposite_namespace = 'rsynctask' if namespace == 'cloudsync' else 'cloudsync'
for task in self.middleware.call_sync(f'{namespace}.query', filters):
try:
task_ds = self.middleware.call_sync('zfs.dataset.path_to_dataset', task['path'], context['mntinfo'])
except Exception:
self.logger.error('Failed mapping path %r to dataset', task['path'], exc_info=True)
else:
if (task_ds and task_ds in context['datasets']) and (task_ds not in tasks_found[namespace]):
# dataset for the task was found, and exists and hasn't already been calculated
size = context['datasets'][task_ds]['properties']['used']['parsed']
backed[namespace] += size
if task_ds not in tasks_found[opposite_namespace]:
# a "task" (cloudsync, rsync, replication) can be backing up the same dataset
# so we don't want to add to the total backed up size because it will report
# an inflated number. Instead we only add to the total backed up size when it's
# a dataset only being backed up by a singular cloud/rsync/replication task
backed['total_size'] += size
tasks_found[namespace].add(task_ds)
repls_found = set()
filters = [['enabled', '=', True], ['transport', '!=', 'LOCAL'], ['direction', '=', 'PUSH']]
for task in self.middleware.call_sync('replication.query', filters):
for source in filter(lambda s: s in context['datasets'] and s not in repls_found, task['source_datasets']):
size = context['datasets'][source]['properties']['used']['parsed']
backed['zfs_replication'] += size
repls_found.add(source)
if source not in tasks_found['cloudsync'] and source not in tasks_found['rsynctask']:
# a "task" (cloudsync, rsync, replication) can be backing up the same dataset
# so we don't want to add to the total backed up size because it will report
# an inflated number. Instead we only add to the total backed up size when it's
# a dataset only being backed up by a singular cloud/rsync/replication task
backed['total_size'] += size
return {
'data_backup_stats': backed,
'data_without_backup_size': context['datasets_total_size_recursive'] - backed['total_size']
}
async def gather_applications(self, context):
# We want to retrieve following information
# 1) No of installed apps
# 2) catalog items with versions installed
# 3) List of docker images
output = {
'apps': 0,
# train -> item -> versions
'catalog_items': defaultdict(lambda: defaultdict(lambda: defaultdict(int))),
'docker_images': set(),
}
apps = await self.middleware.call('app.query')
output['apps'] = len(apps)
for app in apps:
app_metadata = app['metadata']
output['catalog_items'][app_metadata['train']][app_metadata['name']][app['version']] += 1
for image in await self.middleware.call('app.image.query'):
output['docker_images'].update(image['repo_tags'])
output['docker_images'] = list(output['docker_images'])
return output
def gather_filesystem_usage(self, context):
return {
'datasets': {'total_size': context['datasets_total_size']},
'zvols': {'total_size': context['zvols_total_size']},
}
async def gather_ha_stats(self, context):
return {
'ha_licensed': await self.middleware.call('failover.licensed'),
}
async def gather_directory_service_stats(self, context):
status = await self.middleware.call('directoryservices.status')
return {'directory_services': status}
async def gather_cloud_services(self, context):
return {
'cloud_services': list({
t['credentials']['provider']
for t in await self.middleware.call(
'cloudsync.query', [['enabled', '=', True]], {'select': ['enabled', 'credentials']}
)
})
}
async def gather_hardware(self, context):
network = context['network']
cpu = await self.middleware.call('system.cpu_info')
return {
'hardware': {
'cpus': cpu['core_count'],
'cpu_model': cpu['cpu_model'],
'memory': (await self.middleware.call('system.mem_info'))['physmem_size'],
'nics': len(network),
'disks': [
{k: disk[k]} for disk in await self.middleware.call('disk.query') for k in ['model']
]
}
}
async def gather_network(self, context):
info = {'network': {'bridges': [], 'lags': [], 'phys': [], 'vlans': []}}
for i in context['network']:
if i['type'] == 'BRIDGE':
info['network']['bridges'].append({'members': i['bridge_members'], 'mtu': i['mtu']})
elif i['type'] == 'LINK_AGGREGATION':
info['network']['lags'].append({'members': i['lag_ports'], 'mtu': i['mtu'], 'type': i['lag_protocol']})
elif i['type'] == 'PHYSICAL':
info['network']['phys'].append({
'name': i['name'], 'mtu': i['mtu'], 'dhcp': i['ipv4_dhcp'], 'slaac': i['ipv6_auto']
})
elif i['type'] == 'VLAN':
info['network']['vlans'].append({
'mtu': i['mtu'], 'name': i['name'], 'tag': i['vlan_tag'], 'pcp': i['vlan_pcp']
})
return info
async def gather_system_version(self, context):
return {
'platform': f'TrueNAS-{await self.middleware.call("system.product_type")}',
'version': await self.middleware.call('system.version')
}
async def gather_system(self, context):
return {
'system_hash': await self.middleware.call('system.host_id'),
'usage_version': 1,
'system': [{
'users': await self.middleware.call('user.query', [['local', '=', True]], {'count': True}),
'snapshots': context['total_snapshots'],
'zvols': context['total_zvols'],
'datasets': context['total_datasets'],
}]
}
async def gather_pools(self, context):
total_raw_capacity = 0 # zpool list -p -o size summed together of all zpools
pool_list = []
for p in filter(lambda x: x['status'] != 'OFFLINE', await self.middleware.call('pool.query')):
total_raw_capacity += p['size']
disks = vdevs = 0
_type = 'UNKNOWN'
if (pd := context['root_datasets'].get(p['name'])) is None:
self.logger.error('%r is missing, skipping collection', p['name'])
continue
else:
pd = pd['properties']
for d in p['topology']['data']:
if not d.get('path'):
vdevs += 1
_type = d['type']
disks += len(d['children'])
else:
disks += 1
_type = 'STRIPE'
pool_list.append({
'capacity': pd['used']['parsed'] + pd['available']['parsed'],
'disks': disks,
'l2arc': bool(p['topology']['cache']),
'type': _type.lower(),
'usedbydataset': pd['usedbydataset']['parsed'],
'usedbysnapshots': pd['usedbysnapshots']['parsed'],
'usedbychildren': pd['usedbychildren']['parsed'],
'usedbyrefreservation': pd['usedbyrefreservation']['parsed'],
'vdevs': vdevs if vdevs else disks,
'zil': bool(p['topology']['log'])
})
return {'pools': pool_list, 'total_raw_capacity': total_raw_capacity}
async def gather_services(self, context):
return {'services': context['services']}
async def gather_nfs(self, context_unused):
num_clients = await self.middleware.call('nfs.client_count')
nfs_config = await self.middleware.call('nfs.config')
return {
'NFS': {
'enabled_protocols': nfs_config['protocols'],
'kerberos': nfs_config['v4_krb_enabled'],
'num_clients': num_clients,
}
}
async def gather_ftp(self, context_unused):
""" Gather number of FTP connection info """
ftp_config = await self.middleware.call('ftp.config')
num_conn = await self.middleware.call('ftp.connection_count')
return {
'FTP': {
'connections_allowed': ftp_config['clients'] * ftp_config['ipconnections'],
'num_connections': num_conn
}
}
async def gather_sharing(self, context):
sharing_list = []
for service in {'iscsi', 'nfs', 'smb'}:
service_upper = service.upper()
namespace = f'sharing.{service}' if service != 'iscsi' else 'iscsi.targetextent'
for s in await self.middleware.call(f'{namespace}.query'):
if service == 'smb':
sharing_list.append({
'type': service_upper,
'home': s['home'],
'timemachine': s['timemachine'],
'browsable': s['browsable'],
'recyclebin': s['recyclebin'],
'shadowcopy': s['shadowcopy'],
'guestok': s['guestok'],
'abe': s['abe'],
'acl': s['acl'],
'fsrvp': s['fsrvp'],
'streams': s['streams'],
})
elif service == 'nfs':
sharing_list.append({'type': service_upper, 'readonly': s['ro']})
elif service == 'iscsi':
tar = await self.middleware.call('iscsi.target.query', [('id', '=', s['target'])], {'get': True})
ext = await self.middleware.call(
'iscsi.extent.query', [('id', '=', s['extent'])], {
'get': True,
'extra': {'retrieve_locked_info': False},
}
)
sharing_list.append({
'type': service_upper,
'mode': tar['mode'],
'groups': tar['groups'],
'iscsi_type': ext['type'],
'filesize': ext['filesize'],
'blocksize': ext['blocksize'],
'pblocksize': ext['pblocksize'],
'avail_threshold': ext['avail_threshold'],
'insecure_tpc': ext['insecure_tpc'],
'xen': ext['xen'],
'rpm': ext['rpm'],
'readonly': ext['ro'],
'legacy': ext['vendor'] == 'FreeBSD',
'vendor': ext['vendor'],
})
return {'shares': sharing_list}
async def gather_vms(self, context):
vms = []
for v in await self.middleware.call('vm.query'):
nics = disks = 0
display_list = []
for d in v['devices']:
dtype = d['dtype']
if dtype == 'NIC':
nics += 1
elif dtype == 'DISK':
disks += 1
elif dtype == 'DISPLAY':
attrs = d['attributes']
display_list.append({
'wait': attrs.get('wait'),
'resolution': attrs.get('resolution'),
'web': attrs.get('web')
})
vms.append({
'bootloader': v['bootloader'],
'memory': v['memory'],
'vcpus': v['vcpus'],
'autostart': v['autostart'],
'time': v['time'],
'nics': nics,
'disks': disks,
'display_devices': len(display_list),
'display_devices_configs': display_list
})
return {'vms': vms}
def gather_nspawn_containers(self, context):
nspawn_containers = list()
try:
cmd = subprocess.run(['machinectl', 'list', '-o', 'json'], capture_output=True)
if cmd.returncode == 0:
nspawn_containers = json.loads(cmd.stdout.decode())
except Exception:
return {'nspawn_containers': 0}
return {
'nspawn_containers': len([
i for i in nspawn_containers if i.get('service') == 'systemd-nspawn'
])
}
async def setup(middleware):
now = utc_now()
event_loop = asyncio.get_event_loop()
await middleware.call('network.general.register_activity', 'usage', 'Anonymous usage statistics')
event_loop.call_at(
random.uniform(1, (
now.replace(hour=23, minute=59, second=59) - now
).total_seconds()),
lambda: middleware.create_task(middleware.call('usage.start'))
)
| 19,156 | Python | .py | 405 | 33.088889 | 120 | 0.512331 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,493 | ftp.py | truenas_middleware/src/middlewared/middlewared/plugins/ftp.py | from middlewared.async_validators import check_path_resides_within_volume, resolve_hostname, validate_port
from middlewared.schema import accepts, Bool, Dict, Dir, Int, Patch, Str
from middlewared.validators import Exact, Match, Or, Range
from middlewared.service import private, SystemServiceService, ValidationErrors
import middlewared.sqlalchemy as sa
class FTPModel(sa.Model):
__tablename__ = 'services_ftp'
id = sa.Column(sa.Integer(), primary_key=True)
ftp_port = sa.Column(sa.Integer(), default=21)
ftp_clients = sa.Column(sa.Integer(), default=5)
ftp_ipconnections = sa.Column(sa.Integer(), default=2)
ftp_loginattempt = sa.Column(sa.Integer(), default=1)
ftp_timeout = sa.Column(sa.Integer(), default=600)
ftp_timeout_notransfer = sa.Column(sa.Integer(), default=300)
ftp_onlyanonymous = sa.Column(sa.Boolean(), default=False)
ftp_anonpath = sa.Column(sa.String(255), nullable=True, default=False)
ftp_onlylocal = sa.Column(sa.Boolean(), default=False)
ftp_banner = sa.Column(sa.Text())
ftp_filemask = sa.Column(sa.String(3), default="077")
ftp_dirmask = sa.Column(sa.String(3), default="022")
ftp_fxp = sa.Column(sa.Boolean(), default=False)
ftp_resume = sa.Column(sa.Boolean(), default=False)
ftp_defaultroot = sa.Column(sa.Boolean(), default=True)
ftp_ident = sa.Column(sa.Boolean(), default=False)
ftp_reversedns = sa.Column(sa.Boolean(), default=False)
ftp_masqaddress = sa.Column(sa.String(120))
ftp_passiveportsmin = sa.Column(sa.Integer(), default=0)
ftp_passiveportsmax = sa.Column(sa.Integer(), default=0)
ftp_localuserbw = sa.Column(sa.Integer(), default=0)
ftp_localuserdlbw = sa.Column(sa.Integer(), default=0)
ftp_anonuserbw = sa.Column(sa.Integer(), default=0)
ftp_anonuserdlbw = sa.Column(sa.Integer(), default=0)
ftp_tls = sa.Column(sa.Boolean(), default=False)
ftp_tls_policy = sa.Column(sa.String(120), default="on")
ftp_tls_opt_allow_client_renegotiations = sa.Column(sa.Boolean(), default=False)
ftp_tls_opt_allow_dot_login = sa.Column(sa.Boolean(), default=False)
ftp_tls_opt_allow_per_user = sa.Column(sa.Boolean(), default=False)
ftp_tls_opt_common_name_required = sa.Column(sa.Boolean(), default=False)
ftp_tls_opt_enable_diags = sa.Column(sa.Boolean(), default=False)
ftp_tls_opt_export_cert_data = sa.Column(sa.Boolean(), default=False)
ftp_tls_opt_no_empty_fragments = sa.Column(sa.Boolean(), default=False)
ftp_tls_opt_no_session_reuse_required = sa.Column(sa.Boolean(), default=False)
ftp_tls_opt_stdenvvars = sa.Column(sa.Boolean(), default=False)
ftp_tls_opt_dns_name_required = sa.Column(sa.Boolean(), default=False)
ftp_tls_opt_ip_address_required = sa.Column(sa.Boolean(), default=False)
ftp_ssltls_certificate_id = sa.Column(sa.ForeignKey('system_certificate.id'), index=True, nullable=True)
ftp_options = sa.Column(sa.Text())
class FTPService(SystemServiceService):
class Config:
service = "ftp"
datastore = "services.ftp"
datastore_prefix = "ftp_"
datastore_extend = "ftp.ftp_extend"
cli_namespace = "service.ftp"
role_prefix = "SHARING_FTP"
ENTRY = Dict(
'ftp_entry',
Int('id', required=True),
Int('port', validators=[Range(min_=1, max_=65535)], required=True),
Int('clients', validators=[Range(min_=1, max_=10000)], required=True),
Int('ipconnections', validators=[Range(min_=0, max_=1000)], required=True),
Int('loginattempt', validators=[Range(min_=0, max_=1000)], required=True),
Int('timeout', validators=[Range(min_=0, max_=10000)], required=True),
Int('timeout_notransfer', validators=[Range(min_=0, max_=10000)]),
Bool('onlyanonymous', required=True),
Dir('anonpath', null=True, required=True),
Bool('onlylocal', required=True),
Str('banner', max_length=None, required=True),
Str('filemask', validators=[Match(r"^[0-7]{3}$")], required=True),
Str('dirmask', validators=[Match(r"^[0-7]{3}$")], required=True),
Bool('fxp', required=True),
Bool('resume', required=True),
Bool('defaultroot', required=True),
Bool('ident', required=True),
Bool('reversedns', required=True),
Str('masqaddress', required=True),
Int('passiveportsmin', validators=[Or(Exact(0), Range(min_=1024, max_=65535))], required=True),
Int('passiveportsmax', validators=[Or(Exact(0), Range(min_=1024, max_=65535))], required=True),
Int('localuserbw', validators=[Range(min_=0)], required=True),
Int('localuserdlbw', validators=[Range(min_=0)], required=True),
Int('anonuserbw', validators=[Range(min_=0)], required=True),
Int('anonuserdlbw', validators=[Range(min_=0)], required=True),
Bool('tls', required=True),
Str('tls_policy', enum=[
'on', 'off', 'data', '!data', 'auth', 'ctrl', 'ctrl+data', 'ctrl+!data', 'auth+data', 'auth+!data'
], required=True),
Bool('tls_opt_allow_client_renegotiations', required=True),
Bool('tls_opt_allow_dot_login', required=True),
Bool('tls_opt_allow_per_user', required=True),
Bool('tls_opt_common_name_required', required=True),
Bool('tls_opt_enable_diags', required=True),
Bool('tls_opt_export_cert_data', required=True),
Bool('tls_opt_no_empty_fragments', required=True),
Bool('tls_opt_no_session_reuse_required', required=True),
Bool('tls_opt_stdenvvars', required=True),
Bool('tls_opt_dns_name_required', required=True),
Bool('tls_opt_ip_address_required', required=True),
Int('ssltls_certificate', null=True, required=True),
Str('options', max_length=None, required=True),
)
@private
async def ftp_extend(self, data):
if data['ssltls_certificate']:
data['ssltls_certificate'] = data['ssltls_certificate']['id']
return data
@accepts(
Patch(
'ftp_entry', 'ftp_update',
('rm', {'name': 'id'}),
('attr', {'update': True}),
),
audit='Update FTP configuration',
)
async def do_update(self, data):
"""
Update ftp service configuration.
`clients` is an integer value which sets the maximum number of simultaneous clients allowed. It defaults to 32.
`ipconnections` is an integer value which shows the maximum number of connections per IP address. It defaults
to 0 which equals to unlimited.
`timeout` is the maximum number of seconds that proftpd will allow clients to stay connected without receiving
any data on either the control or data connection.
`timeout_notransfer` is the maximum number of seconds a client is allowed to spend connected, after
authentication, without issuing a command which results in creating an active or passive data connection
(i.e. sending/receiving a file, or receiving a directory listing).
`onlyanonymous` allows anonymous FTP logins with access to the directory specified by `anonpath`.
`banner` is a message displayed to local login users after they successfully authenticate. It is not displayed
to anonymous login users.
`filemask` sets the default permissions for newly created files which by default are 077.
`dirmask` sets the default permissions for newly created directories which by default are 077.
`resume` if set allows FTP clients to resume interrupted transfers.
`fxp` if set to true indicates that File eXchange Protocol is enabled. Generally it is discouraged as it
makes the server vulnerable to FTP bounce attacks.
`defaultroot` when set ensures that for local users, home directory access is only granted if the user
is a member of group wheel.
`ident` is a boolean value which when set to true indicates that IDENT authentication is required. If identd
is not running on the client, this can result in timeouts.
`masqaddress` is the public IP address or hostname which is set if FTP clients cannot connect through a
NAT device.
`localuserbw` is a positive integer value which indicates maximum upload bandwidth in KB/s for local user.
Default of zero indicates unlimited upload bandwidth ( from the FTP server configuration ).
`localuserdlbw` is a positive integer value which indicates maximum download bandwidth in KB/s for local user.
Default of zero indicates unlimited download bandwidth ( from the FTP server configuration ).
`anonuserbw` is a positive integer value which indicates maximum upload bandwidth in KB/s for anonymous user.
Default of zero indicates unlimited upload bandwidth ( from the FTP server configuration ).
`anonuserdlbw` is a positive integer value which indicates maximum download bandwidth in KB/s for anonymous
user. Default of zero indicates unlimited download bandwidth ( from the FTP server configuration ).
`tls` is a boolean value which when set indicates that encrypted connections are enabled. This requires a
certificate to be configured first with the certificate service and the id of certificate is passed on in
`ssltls_certificate`.
`tls_policy` defines whether the control channel, data channel, both channels, or neither channel of an FTP
session must occur over SSL/TLS.
`tls_opt_enable_diags` is a boolean value when set, logs verbosely. This is helpful when troubleshooting a
connection.
`options` is a string used to add proftpd(8) parameters not covered by ftp service.
"""
old = await self.config()
new = old.copy()
new.update(data)
verrors = ValidationErrors()
if not ((new["passiveportsmin"] == 0) == (new["passiveportsmax"] == 0)):
verrors.add("passiveportsmin", "passiveportsmin and passiveportsmax should be both zero or non-zero")
if not ((new["passiveportsmin"] == 0 and new["passiveportsmax"] == 0) or
(new["passiveportsmax"] > new["passiveportsmin"])):
verrors.add("ftp_update.passiveportsmax", "When specified, should be greater than passiveportsmin")
if new["onlyanonymous"]:
if not new["anonpath"]:
verrors.add("ftp_update.anonpath", "This field is required for anonymous login")
else:
# Anonymous is disabled, clear the anonpath
if new["anonpath"] is not None:
new["anonpath"] = None
if new["anonpath"] is not None:
await check_path_resides_within_volume(verrors, self.middleware, "ftp_update.anonpath", new["anonpath"])
if new["tls"]:
if not new["ssltls_certificate"]:
verrors.add(
"ftp_update.ssltls_certificate",
"Please provide a valid certificate id when TLS is enabled"
)
else:
verrors.extend((await self.middleware.call(
"certificate.cert_services_validation", new["ssltls_certificate"],
"ftp_update.ssltls_certificate", False
)))
if new["masqaddress"]:
await resolve_hostname(self.middleware, verrors, "ftp_update.masqaddress", new["masqaddress"])
verrors.extend(await validate_port(self.middleware, "ftp_update.port", new["port"], "ftp"))
verrors.check()
await self._update_service(old, new)
if not old['tls'] and new['tls']:
await self.middleware.call('service.start', 'ssl')
return new
async def pool_post_import(middleware, pool):
"""
We don't set up anonymous FTP if pool is not imported yet.
"""
if pool is None:
try:
await middleware.call("etc.generate", "ftp")
except Exception:
middleware.logger.debug("Failed to generate ftp configuration file.", exc_info=True)
finally:
return
await middleware.call("service.reload", "ftp")
async def setup(middleware):
middleware.register_hook("pool.post_import", pool_post_import, sync=True)
| 12,298 | Python | .py | 206 | 51.033981 | 119 | 0.671814 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,494 | enclosure.py | truenas_middleware/src/middlewared/middlewared/plugins/enclosure.py | import asyncio
import errno
import logging
import re
import pathlib
from collections import OrderedDict
from libsg3.ses import EnclosureDevice
from middlewared.schema import Dict, Int, Str, accepts
from middlewared.service import CallError, CRUDService, filterable, private
from middlewared.service_exception import MatchNotFound
import middlewared.sqlalchemy as sa
from middlewared.utils import filter_list
from middlewared.plugins.enclosure_.r30_drive_identify import set_slot_status as r30_set_slot_status
from middlewared.plugins.enclosure_.fseries_drive_identify import set_slot_status as fseries_set_slot_status
from middlewared.plugins.enclosure_.sysfs_disks import toggle_enclosure_slot_identifier
logger = logging.getLogger(__name__)
ENCLOSURE_ACTIONS = {
'clear': '0x80 0x00 0x00 0x00',
'identify': '0x80 0x00 0x02 0x00',
'fault': '0x80 0x00 0x00 0x20',
}
STATUS_DESC = [
"Unsupported",
"OK",
"Critical",
"Noncritical",
"Unrecoverable",
"Not installed",
"Unknown",
"Not available",
"No access allowed",
"reserved [9]",
"reserved [10]",
"reserved [11]",
"reserved [12]",
"reserved [13]",
"reserved [14]",
"reserved [15]",
]
M_SERIES_REGEX = re.compile(r"(ECStream|iX) 4024S([ps])")
R_SERIES_REGEX = re.compile(r"(ECStream|iX) (FS1|FS2|DSS212S[ps])")
R20_REGEX = re.compile(r"(iX (TrueNAS R20|2012S)p|SMC SC826-P)")
R50_REGEX = re.compile(r"iX eDrawer4048S([12])")
X_SERIES_REGEX = re.compile(r"CELESTIC (P3215-O|P3217-B)")
ES24_REGEX = re.compile(r"(ECStream|iX) 4024J")
ES24F_REGEX = re.compile(r"(ECStream|iX) 2024J([ps])")
MINI_REGEX = re.compile(r"(TRUE|FREE)NAS-MINI")
R20_VARIANT = ["TRUENAS-R20", "TRUENAS-R20A", "TRUENAS-R20B"]
class EnclosureLabelModel(sa.Model):
__tablename__ = 'truenas_enclosurelabel'
id = sa.Column(sa.Integer(), primary_key=True)
encid = sa.Column(sa.String(200), unique=True)
label = sa.Column(sa.String(200))
class EnclosureService(CRUDService):
class Config:
cli_namespace = 'storage.enclosure'
@filterable
def query(self, filters, options):
enclosures = []
if not self.middleware.call_sync('truenas.is_ix_hardware'):
# this feature is only available on hardware that ix sells
return enclosures
for enc in self.__get_enclosures():
enclosure = {
"id": enc.encid,
"bsg": enc.devname,
"name": enc.name,
"model": enc.model,
"controller": enc.controller,
"elements": [],
}
for name, elems in enc.iter_by_name().items():
header = None
elements = []
has_slot_status = False
for elem in elems:
header = list(elem.get_columns().keys())
element = {
"slot": elem.slot,
"data": dict(zip(elem.get_columns().keys(), elem.get_values())),
"name": elem.name,
"descriptor": elem.descriptor,
"status": elem.status,
"value": elem.value,
"value_raw": 0x0,
}
if isinstance(elem.value_raw, int):
element["value_raw"] = hex(elem.value_raw)
if hasattr(elem, "device_slot_set"):
has_slot_status = True
element["fault"] = elem.fault
element["identify"] = elem.identify
elements.append(element)
if header is not None and elements:
enclosure["elements"].append({
"name": name,
"descriptor": enc.descriptors.get(name, ""),
"header": header,
"elements": elements,
"has_slot_status": has_slot_status
})
# Ensure R50's first expander is first in the list independent of cabling
if "eDrawer4048S1" in enclosure['name']:
enclosures.insert(0, enclosure)
else:
enclosures.append(enclosure)
enclosures.extend(self.middleware.call_sync("enclosure.map_nvme"))
enclosures = self.middleware.call_sync("enclosure.map_enclosures", enclosures)
for number, enclosure in enumerate(enclosures):
enclosure["number"] = number
labels = {
label["encid"]: label["label"]
for label in self.middleware.call_sync("datastore.query", "truenas.enclosurelabel")
}
for enclosure in enclosures:
enclosure["label"] = labels.get(enclosure["id"]) or enclosure["name"]
enclosures = sorted(enclosures, key=lambda enclosure: (0 if enclosure["controller"] else 1, enclosure["id"]))
return filter_list(enclosures, filters=filters or [], options=options or {})
@accepts(
Str("id"),
Dict(
"enclosure_update",
Str("label"),
update=True,
),
)
async def do_update(self, id_, data):
if "label" in data:
await self.middleware.call("datastore.delete", "truenas.enclosurelabel", [["encid", "=", id_]])
await self.middleware.call("datastore.insert", "truenas.enclosurelabel", {
"encid": id_,
"label": data["label"]
})
return await self.get_instance(id_)
def _get_slot(self, slot_filter, enclosure_query=None, enclosure_info=None):
if enclosure_info is None:
enclosure_info = self.middleware.call_sync("enclosure.query", enclosure_query or [])
for enclosure in enclosure_info:
try:
elements = next(filter(lambda element: element["name"] == "Array Device Slot",
enclosure["elements"]))["elements"]
slot = next(filter(slot_filter, elements))
return enclosure, slot
except StopIteration:
pass
raise MatchNotFound()
def _get_slot_for_disk(self, disk, enclosure_info=None):
return self._get_slot(lambda element: element["data"]["Device"] == disk, enclosure_info=enclosure_info)
def _get_ses_slot(self, enclosure, element):
if "original" in element:
enclosure_id = element["original"]["enclosure_id"]
slot = element["original"]["slot"]
else:
enclosure_id = enclosure["id"]
slot = element["slot"]
ses_enclosures = self.__get_enclosures()
ses_enclosure = ses_enclosures.get_by_encid(enclosure_id)
if ses_enclosure is None:
raise MatchNotFound()
ses_slot = ses_enclosure.get_by_slot(slot)
if ses_slot is None:
raise MatchNotFound()
return ses_slot
def _get_ses_slot_for_disk(self, disk):
# This can also return SES slot for disk that is not present in the system
try:
enclosure, element = self._get_slot_for_disk(disk)
except MatchNotFound:
disk = self.middleware.call_sync(
"disk.query",
[["devname", "=", disk]],
{"get": True, "extra": {"include_expired": True}, "order_by": ["expiretime"]},
)
if disk["enclosure"]:
enclosure, element = self._get_slot(lambda element: element["slot"] == disk["enclosure"]["slot"],
[["number", "=", disk["enclosure"]["number"]]])
else:
raise MatchNotFound()
return self._get_ses_slot(enclosure, element)
def _get_orig_enclosure_and_disk(self, enclosure_id, slot, info):
for i in filter(lambda x: x.get('name') == 'Array Device Slot', info['elements']):
for j in filter(lambda x: x['slot'] == slot, i['elements']):
if enclosure_id == 'mapped_enclosure_0':
# we've mapped the drive slots in a convenient way for the administrator
# to easily be able to identify drive slot 1 (when in reality, it's probably
# physically cabled to slot 5 (or whatever))
return j['original']['enclosure_bsg'], j['original']['slot']
else:
# a platform that doesn't require mapping the drives so we can just return
# the slot passed to us
return info['bsg'], slot
@accepts(Str("enclosure_id"), Int("slot"), Str("status", enum=["CLEAR", "FAULT", "IDENTIFY"]))
def set_slot_status(self, enclosure_id, slot, status):
if enclosure_id == 'r30_nvme_enclosure':
r30_set_slot_status(slot, status)
return
elif enclosure_id in ('f60_nvme_enclosure', 'f100_nvme_enclosure', 'f130_nvme_enclosure'):
fseries_set_slot_status(slot, status)
return
try:
info = self.middleware.call_sync('enclosure.query', [['id', '=', enclosure_id]])[0]
except IndexError:
raise CallError(f'Enclosure with id: {enclosure_id!r} not found', errno.ENOENT)
if info['model'] == 'H Series':
sysfs_to_ui = {
1: '8', 2: '9', 3: '10', 4: '11',
5: '12', 6: '13', 7: '14', 8: '15',
9: '0', 10: '1', 11: '2', 12: '3',
}
if slot not in sysfs_to_ui:
raise CallError(f'Slot: {slot!r} not found', errno.ENOENT)
addr = info['bsg'].removeprefix('bsg/')
sysfs_path = f'/sys/class/enclosure/{addr}'
mapped_slot = sysfs_to_ui[slot]
try:
toggle_enclosure_slot_identifier(sysfs_path, mapped_slot, status, True)
except FileNotFoundError:
raise CallError(f'Slot: {slot!r} not found', errno.ENOENT)
return
original = self._get_orig_enclosure_and_disk(enclosure_id, slot, info)
if original is None:
raise CallError(f'Slot: {slot!r} not found', errno.ENOENT)
original_bsg, original_slot = original
if status == 'CLEAR':
actions = ('clear=ident', 'clear=fault')
else:
actions = (f'set={status[:5].lower()}',)
enc = EnclosureDevice(f'/dev/{original_bsg}')
try:
for action in actions:
enc.set_control(str(original_slot - 1), action)
except OSError:
msg = f'Failed to {status} slot {slot!r} on enclosure {info["id"]!r}'
self.logger.warning(msg, exc_info=True)
raise CallError(msg)
@private
def sync_disk(self, id_, enclosure_info=None, retry=False):
"""
:param id:
:param enclosure_info:
:param retry: retry once more in 60 seconds if no enclosure slot for disk is found
"""
disk = self.middleware.call_sync(
'disk.query',
[['identifier', '=', id_]],
{'get': True, "extra": {'include_expired': True}}
)
try:
enclosure, element = self._get_slot_for_disk(disk["name"], enclosure_info)
except MatchNotFound:
if retry:
async def delayed():
await asyncio.sleep(60)
await self.middleware.call('enclosure.sync_disk', id_, enclosure_info)
self.middleware.run_coroutine(delayed(), wait=False)
return
disk_enclosure = None
else:
disk_enclosure = {
"number": enclosure["number"],
"slot": element["slot"],
}
if disk_enclosure != disk['enclosure']:
self.middleware.call_sync('disk.update', id_, {'enclosure': disk_enclosure})
def __get_enclosures(self):
return Enclosures(
self.middleware.call_sync("enclosure.get_ses_enclosures"),
self.middleware.call_sync("system.dmidecode_info")["system-product-name"]
)
class Enclosures(object):
def __init__(self, stat, product_name):
self.__enclosures = list()
if any((
not isinstance(product_name, str),
not product_name.startswith(("TRUENAS-", "FREENAS-"))
)):
return
if product_name.startswith("TRUENAS-H"):
blacklist = list()
else:
blacklist = ["VirtualSES"]
if "-MINI-" not in product_name and product_name not in R20_VARIANT:
blacklist.append("AHCI SGPIO Enclosure 2.00")
for num, data in stat.items():
enclosure = Enclosure(num, data, stat, product_name)
if any(s in enclosure.encname for s in blacklist):
continue
self.__enclosures.append(enclosure)
def __iter__(self):
for e in list(self.__enclosures):
yield e
def append(self, enc):
if not isinstance(enc, Enclosure):
raise ValueError("Not an enclosure")
self.__enclosures.append(enc)
def find_device_slot(self, devname):
for enc in self:
find = enc.find_device_slot(devname)
if find is not None:
return find
raise AssertionError(f"Enclosure slot not found for {devname}")
def get_by_id(self, _id):
for e in self:
if e.num == _id:
return e
def get_by_encid(self, _id):
for e in self:
if e.encid == _id:
return e
class Enclosure(object):
def __init__(self, num, data, stat, product_name):
self.num = num
self.stat = stat
self.product_name = product_name
self.devname, data = data
self.encname = ""
self.encid = ""
self.model = ""
self.controller = False
self.status = "OK"
self.__elements = []
self.__elementsbyname = {}
self.descriptors = {}
self._parse(data)
def _parse(self, data):
cf, es = data
self.encname = re.sub(r"\s+", " ", cf.splitlines()[0].strip())
if m := re.search(r"\s+enclosure logical identifier \(hex\): ([0-9a-f]+)", cf):
self.encid = m.group(1)
self._set_model(cf)
self.status = "OK"
is_hseries = self.product_name and self.product_name.startswith('TRUENAS-H')
self.map_disks_to_enclosure_slots(is_hseries)
element_type = None
element_number = None
for line in es.splitlines():
if m := re.match(r"\s+Element type: (.+), subenclosure", line):
element_type = m.group(1)
if element_type != "Audible alarm":
element_type = " ".join([
word[0].upper() + word[1:]
for word in element_type.split()
])
if element_type == "Temperature Sensor":
element_type = "Temperature Sensors"
element_number = None
elif m := re.match(r"\s+Element ([0-9]+) descriptor:", line):
element_number = int(m.group(1))
elif m := re.match(r"\s+([0-9a-f ]{11})", line):
if all((element_type, element_number, element_type != 'Array Device Slot')):
element = self._enclosure_element(
element_number + 1,
element_type,
self._parse_raw_value(m.group(1)),
None,
"",
"",
)
if element is not None:
self.append(element)
element_number = None
else:
element_number = None
def map_disks_to_enclosure_slots(self, is_hseries=False):
"""
The sysfs directory structure is dynamic based on the enclosure that
is attached.
Here are some examples of what we've seen on internal hardware:
/sys/class/enclosure/19:0:6:0/SLOT_001/
/sys/class/enclosure/13:0:0:0/Drive Slot #0_0000000000000000/
/sys/class/enclosure/13:0:0:0/Disk #00/
/sys/class/enclosure/13:0:0:0/Slot 00/
/sys/class/enclosure/13:0:0:0/slot00/
/sys/class/enclosure/13:0:0:0/0/
The safe assumption that we can make on whether or not the directory
represents a drive slot is looking for the file named "slot" underneath
each directory. (i.e. /sys/class/enclosure/13:0:0:0/Disk #00/slot)
If this file doesn't exist, it means 1 thing
1. this isn't a drive slot directory
Once we've determined that there is a file named "slot", we can read the
contents of that file to get the slot number associated to the disk device.
The "slot" file is always an integer so we don't need to convert to hexadecimal.
"""
ignore = tuple()
if is_hseries:
ignore = ('4', '5', '6', '7')
mapping = dict()
pci = self.devname.removeprefix('bsg/') # why do we set this as 'bsg/13:0:0:0'...?
for i in filter(lambda x: x.is_dir(), pathlib.Path(f'/sys/class/enclosure/{pci}').iterdir()):
if is_hseries and i.name in ignore:
# on hseries platform, the broadcom HBA enumerates sysfs
# with directory names as the slot number
# (i.e. /sys/class/enclosure/*/0, /sys/class/enclosure/*/1, etc)
# There are 16 ports on this card, but we only use 12
continue
try:
slot = int((i / 'slot').read_text().strip())
slot_status = (i / 'status').read_text().strip()
ident = (i / 'locate').read_text().strip()
fault = (i / 'fault').read_text().strip()
except (FileNotFoundError, ValueError):
# not a slot directory
continue
else:
try:
dev = next((i / 'device/block').iterdir(), '')
if dev:
dev = dev.name
mapping[slot] = (dev, slot_status, ident, fault)
except FileNotFoundError:
# no disk in this slot
mapping[slot] = ('', slot_status, ident, fault)
try:
if min(mapping) == 0:
# if the enclosure starts slots at 0 then we need
# to bump them by 1 to not cause confusion for
# end-user
mapping = {k + 1: v for k, v in mapping.items()}
except ValueError:
# means mapping is an empty dict (shouldn't happen)
return
disk_raw_values = dict()
if not is_hseries:
for k, v in EnclosureDevice(f'/dev/{self.devname}').status()['elements'].items():
if v['type'] == 23 and v['descriptor'] != '<empty>':
disk_raw_values[k] = v['status']
for slot in sorted(mapping):
disk, slot_status, ident, fault = mapping[slot]
if is_hseries:
info = self._enclosure_element(slot, 'Array Device Slot', slot_status, None, '', disk, ident, fault)
else:
info = self._enclosure_element(
slot,
'Array Device Slot',
self._parse_raw_value(disk_raw_values.get(slot, [5, 0, 0, 0])),
None,
'',
disk
)
self.append(info)
return mapping
def _set_model(self, data):
if M_SERIES_REGEX.match(self.encname):
self.model = "M Series"
self.controller = True
elif R_SERIES_REGEX.match(self.encname) or R20_REGEX.match(self.encname) or R50_REGEX.match(self.encname):
self.model = self.product_name.replace("TRUENAS-", "")
self.controller = True
elif self.encname == "AHCI SGPIO Enclosure 2.00":
if self.product_name in R20_VARIANT:
self.model = self.product_name.replace("TRUENAS-", "")
self.controller = True
elif MINI_REGEX.match(self.product_name):
# TrueNAS Mini's do not have their product name stripped
self.model = self.product_name
self.controller = True
self.controller = True
elif X_SERIES_REGEX.match(self.encname):
self.model = "X Series"
self.controller = True
elif self.encname.startswith("BROADCOM VirtualSES 03"):
self.model = "H Series"
self.controller = True
elif self.encname.startswith("QUANTA JB9 SIM"):
self.model = "E60"
elif self.encname.startswith("Storage 1729"):
self.model = "E24"
elif self.encname.startswith("ECStream 3U16+4R-4X6G.3"):
if "SD_9GV12P1J_12R6K4" in data:
self.model = "Z Series"
self.controller = True
else:
self.model = "E16"
elif self.encname.startswith("ECStream 3U16RJ-AC.r3"):
self.model = "E16"
elif self.encname.startswith("HGST H4102-J"):
self.model = "ES102"
elif self.encname.startswith((
"VikingES NDS-41022-BB",
"VikingES VDS-41022-BB",
)):
self.model = "ES102G2"
elif self.encname.startswith("CELESTIC R0904"):
self.model = "ES60"
elif self.encname.startswith("HGST H4060-J"):
self.model = "ES60G2"
elif ES24_REGEX.match(self.encname):
self.model = "ES24"
elif ES24F_REGEX.match(self.encname):
self.model = "ES24F"
elif self.encname.startswith("CELESTIC X2012"):
self.model = "ES12"
def _parse_raw_value(self, value):
if isinstance(value, str):
value = [int(i.replace("0x", ""), 16) for i in value.split(' ')]
newvalue = 0
for i, v in enumerate(value):
newvalue |= v << (2 * (3 - i)) * 4
return newvalue
def iter_by_name(self):
return OrderedDict(sorted(self.__elementsbyname.items()))
def append(self, element):
self.__elements.append(element)
if element.name not in self.__elementsbyname:
self.__elementsbyname[element.name] = [element]
else:
self.__elementsbyname[element.name].append(element)
element.enclosure = self
def _enclosure_element(self, slot, name, value, status, desc, dev, ident=None, fault=None):
if name == "Audible alarm":
return AlarmElm(slot=slot, value_raw=value, desc=desc)
elif name == "Communication Port":
return CommPort(slot=slot, value_raw=value, desc=desc)
elif name == "Current Sensor":
return CurrSensor(slot=slot, value_raw=value, desc=desc)
elif name == "Enclosure":
return EnclosureElm(slot=slot, value_raw=value, desc=desc)
elif name == "Voltage Sensor":
return VoltSensor(slot=slot, value_raw=value, desc=desc)
elif name == "Cooling":
return Cooling(slot=slot, value_raw=value, desc=desc)
elif name == "Temperature Sensors":
return TempSensor(slot=slot, value_raw=value, desc=desc)
elif name == "Power Supply":
return PowerSupply(slot=slot, value_raw=value, desc=desc)
elif name == "Array Device Slot":
# Echostream have actually only 16 physical disk slots
# See #24254
if self.encname.startswith('ECStream 3U16+4R-4X6G.3') and slot > 16:
return
if self.model.startswith('R50, ') and slot >= 25:
return
return ArrayDevSlot(slot=slot, value_raw=value, desc=desc, dev=dev, identify=ident, fault=fault)
elif name == "SAS Connector":
return SASConnector(slot=slot, value_raw=value, desc=desc)
elif name == "SAS Expander":
return SASExpander(slot=slot, value_raw=value, desc=desc)
else:
return Element(slot=slot, name=name, value_raw=value, desc=desc)
def __unicode__(self):
return self.name
def __repr__(self):
return f'<Enclosure: {self.name}>'
def __iter__(self):
for e in list(self.__elements):
yield e
@property
def name(self):
return self.encname
def find_device_slot(self, devname):
"""
Get the element that the device name points to
getencstat /dev/ses0 | grep da6
Element 0x7: Array Device Slot, status: OK (0x01 0x00 0x00 0x00),
descriptor: 'Slot 07', dev: 'da6,pass6'
What we are interested in is the 0x7
Returns:
A tuple of the form (Enclosure-slot-number, element)
Raises:
AssertionError: enclosure slot not found
"""
for e in self.__elementsbyname.get('Array Device Slot', []):
if e.devname == devname:
return e
def get_by_slot(self, slot):
for e in self:
if e.slot == slot:
return e
class Element(object):
def __init__(self, **kwargs):
if 'name' in kwargs:
self.name = kwargs.pop('name')
self.value_raw = kwargs.pop('value_raw')
self.slot = kwargs.pop('slot')
if isinstance(self.value_raw, int):
self.status_raw = (self.value_raw >> 24) & 0xf
else:
self.status_raw = self.value_raw
self._identify = kwargs.pop('identify')
self._fault = kwargs.pop('fault')
try:
self.descriptor = kwargs.pop('desc')
except Exception:
self.descriptor = 'Unknown'
self.enclosure = None
def __repr__(self):
return f'<Element: {self.name}>'
def get_columns(self):
return OrderedDict([
('Descriptor', lambda y: y.descriptor),
('Status', lambda y: y.status),
('Value', lambda y: y.value),
])
def get_values(self):
for value in list(self.get_columns().values()):
yield value(self)
@property
def value(self):
return self.value_raw & 0xffff
@property
def status(self):
if isinstance(self.status_raw, str):
return self.status_raw
else:
return STATUS_DESC[self.status_raw]
class AlarmElm(Element):
name = "Audible alarm"
@property
def identify(self):
return (self.value_raw >> 16) & 0x80
@property
def fail(self):
return (self.value_raw >> 16) & 0x40
@property
def rqmute(self):
return self.value_raw & 0x80
@property
def muted(self):
return self.value_raw & 0x40
@property
def remind(self):
return self.value_raw & 0x10
@property
def info(self):
return self.value_raw & 0x08
@property
def noncrit(self):
return self.value_raw & 0x04
@property
def crit(self):
return self.value_raw & 0x02
@property
def unrec(self):
return self.value_raw & 0x01
@property
def value(self):
output = []
if self.identify:
output.append("Identify on")
if self.fail:
output.append("Fail on")
if self.rqmute:
output.append("RQST mute")
if self.muted:
output.append("Muted")
if self.remind:
output.append("Remind")
if self.info:
output.append("INFO")
if self.noncrit:
output.append("NON-CRIT")
if self.crit:
output.append("CRIT")
if self.unrec:
output.append("UNRECOV")
if not output:
output.append("None")
return ', '.join(output)
class CommPort(Element):
name = "Communication Port"
@property
def identify(self):
return (self.value_raw >> 16) & 0x80
@property
def fail(self):
return (self.value_raw >> 16) & 0x40
@property
def disabled(self):
return self.value_raw & 0x01
@property
def value(self):
output = []
if self.identify:
output.append("Identify on")
if self.fail:
output.append("Fail on")
if self.disabled:
output.append("Disabled")
if not output:
output.append("None")
return ', '.join(output)
class CurrSensor(Element):
name = "Current Sensor"
@property
def identify(self):
return (self.value_raw >> 16) & 0x80
@property
def fail(self):
return (self.value_raw >> 16) & 0x40
@property
def warnover(self):
return (self.value_raw >> 16) & 0x8
@property
def critover(self):
return (self.value_raw >> 16) & 0x2
@property
def value(self):
output = []
output.append("%sA" % ((self.value_raw & 0xffff) / 100))
if self.identify:
output.append("Identify on")
if self.fail:
output.append("Fail on")
if self.warnover:
output.append("Warn over")
if self.critover:
output.append("Crit over")
return ', '.join(output)
class EnclosureElm(Element):
name = "Enclosure"
@property
def identify(self):
return (self.value_raw >> 16) & 0x80
@property
def pctime(self):
return (self.value_raw >> 10) & 0x3f
@property
def potime(self):
return (self.value_raw >> 2) & 0x3f
@property
def failind(self):
return (self.value_raw >> 8) & 0x02
@property
def warnind(self):
return (self.value_raw >> 8) & 0x01
@property
def value(self):
output = []
if self.identify:
output.append("Identify on")
if self.failind:
output.append("Fail on")
if self.warnind:
output.append("Warn on")
if self.pctime:
output.append(f"Power cycle {self.pctime} min, power off for {self.potime} min")
if not output:
output.append("None")
return ', '.join(output)
class VoltSensor(Element):
name = "Voltage Sensor"
@property
def identify(self):
return (self.value_raw >> 16) & 0x80
@property
def fail(self):
return (self.value_raw >> 16) & 0x40
@property
def warnover(self):
return (self.value_raw >> 16) & 0x8
@property
def warnunder(self):
return (self.value_raw >> 16) & 0x4
@property
def critover(self):
return (self.value_raw >> 16) & 0x2
@property
def critunder(self):
return (self.value_raw >> 16) & 0x1
@property
def value(self):
output = []
output.append("%sV" % ((self.value_raw & 0xffff) / 100))
if self.identify:
output.append("Identify on")
if self.fail:
output.append("Fail on")
if self.warnover:
output.append("Warn over")
if self.warnunder:
output.append("Warn under")
if self.critover:
output.append("Crit over")
if self.critunder:
output.append("Crit under")
return ', '.join(output)
class Cooling(Element):
name = "Cooling"
@property
def value(self):
return "%s RPM" % (((self.value_raw & 0x7ff00) >> 8) * 10)
class TempSensor(Element):
name = "Temperature Sensor"
@property
def value(self):
value = (self.value_raw & 0xff00) >> 8
if not value:
value = None
else:
# 8 bits represents -19 C to +235 C */
# value of 0 (would imply -20 C) reserved */
value -= 20
value = "%dC" % value
return value
class PowerSupply(Element):
name = "Power Supply"
@property
def identify(self):
return (self.value_raw >> 16) & 0x80
@property
def overvoltage(self):
return (self.value_raw >> 8) & 0x8
@property
def undervoltage(self):
return (self.value_raw >> 8) & 0x4
@property
def overcurrent(self):
return (self.value_raw >> 8) & 0x2
@property
def fail(self):
return self.value_raw & 0x40
@property
def off(self):
return self.value_raw & 0x10
@property
def tempfail(self):
return self.value_raw & 0x8
@property
def tempwarn(self):
return self.value_raw & 0x4
@property
def acfail(self):
return self.value_raw & 0x2
@property
def dcfail(self):
return self.value_raw & 0x1
@property
def value(self):
output = []
if self.identify:
output.append("Identify on")
if self.fail:
output.append("Fail on")
if self.overvoltage:
output.append("DC overvoltage")
if self.undervoltage:
output.append("DC undervoltage")
if self.overcurrent:
output.append("DC overcurrent")
if self.tempfail:
output.append("Overtemp fail")
if self.tempwarn:
output.append("Overtemp warn")
if self.acfail:
output.append("AC fail")
if self.dcfail:
output.append("DC fail")
if not output:
output.append("None")
return ', '.join(output)
class ArrayDevSlot(Element):
name = "Array Device Slot"
def __init__(self, dev=None, **kwargs):
super(ArrayDevSlot, self).__init__(**kwargs)
dev = [y for y in dev.strip().split(',') if not y.startswith('pass')]
if dev:
self.devname = dev[0]
else:
self.devname = ''
def get_columns(self):
columns = super(ArrayDevSlot, self).get_columns()
columns['Device'] = lambda y: y.devname
return columns
def device_slot_set(self, status):
"""
Actually issue the command to set ``status'' in a given `slot''
of the enclosure number ``encnumb''
Returns:
True if the command succeeded, False otherwise
"""
# Impossible to be used in an efficient way so it's a NO-OP
return True
@property
def identify(self):
if hasattr(self, '_identify'):
return self._identify != '0'
elif (self.value_raw >> 8) & 0x2:
return True
else:
return False
@property
def fault(self):
if hasattr(self, '_fault'):
return self._fault != '0'
elif self.value_raw & 0x20:
return True
else:
return False
@property
def value(self):
output = []
if self.identify:
output.append("Identify on")
if self.fault:
output.append("Fault on")
if not output:
output.append("None")
return ', '.join(output)
class SASConnector(Element):
name = "SAS Connector"
@property
def type(self):
"""
Determine the type of the connector
Based on sysutils/sg3-utils source code
"""
conn_type = (self.value_raw >> 16) & 0x7f
if conn_type == 0x0:
return "No information"
elif conn_type == 0x1:
return "SAS 4x receptacle (SFF-8470) [max 4 phys]"
elif conn_type == 0x2:
return "Mini SAS 4x receptacle (SFF-8088) [max 4 phys]"
elif conn_type == 0x3:
return "QSFP+ receptacle (SFF-8436) [max 4 phys]"
elif conn_type == 0x4:
return "Mini SAS 4x active receptacle (SFF-8088) [max 4 phys]"
elif conn_type == 0x5:
return "Mini SAS HD 4x receptacle (SFF-8644) [max 4 phys]"
elif conn_type == 0x6:
return "Mini SAS HD 8x receptacle (SFF-8644) [max 8 phys]"
elif conn_type == 0x7:
return "Mini SAS HD 16x receptacle (SFF-8644) [max 16 phys]"
elif conn_type == 0xf:
return "Vendor specific external connector"
elif conn_type == 0x10:
return "SAS 4i plug (SFF-8484) [max 4 phys]"
elif conn_type == 0x11:
return "Mini SAS 4i receptacle (SFF-8087) [max 4 phys]"
elif conn_type == 0x12:
return "Mini SAS HD 4i receptacle (SFF-8643) [max 4 phys]"
elif conn_type == 0x13:
return "Mini SAS HD 8i receptacle (SFF-8643) [max 8 phys]"
elif conn_type == 0x20:
return "SAS Drive backplane receptacle (SFF-8482) [max 2 phys]"
elif conn_type == 0x21:
return "SATA host plug [max 1 phy]"
elif conn_type == 0x22:
return "SAS Drive plug (SFF-8482) [max 2 phys]"
elif conn_type == 0x23:
return "SATA device plug [max 1 phy]"
elif conn_type == 0x24:
return "Micro SAS receptacle [max 2 phys]"
elif conn_type == 0x25:
return "Micro SATA device plug [max 1 phy]"
elif conn_type == 0x26:
return "Micro SAS plug (SFF-8486) [max 2 phys]"
elif conn_type == 0x27:
return "Micro SAS/SATA plug (SFF-8486) [max 2 phys]"
elif conn_type == 0x2f:
return "SAS virtual connector [max 1 phy]"
elif conn_type == 0x3f:
return "Vendor specific internal connector"
else:
if conn_type < 0x10:
return "unknown external connector type: 0x%x" % conn_type
elif conn_type < 0x20:
return "unknown internal wide connector type: 0x%x" % conn_type
elif conn_type < 0x30:
return (
"unknown internal connector to end device, type: 0x%x" % (
conn_type,
)
)
elif conn_type < 0x3f:
return "reserved for internal connector, type:0x%x" % conn_type
elif conn_type < 0x70:
return "reserved connector type: 0x%x" % conn_type
elif conn_type < 0x80:
return "vendor specific connector type: 0x%x" % conn_type
else:
return "unexpected connector type: 0x%x" % conn_type
@property
def fail(self):
if self.value_raw & 0x40:
return True
return False
@property
def value(self):
output = [self.type]
if self.fail:
output.append("Fail on")
return ', '.join(output)
class SASExpander(Element):
name = "SAS Expander"
@property
def identify(self):
return (self.value_raw >> 16) & 0x80
@property
def fail(self):
return (self.value_raw >> 16) & 0x40
@property
def value(self):
output = []
if self.identify:
output.append("Identify on")
if self.fail:
output.append("Fail on")
if not output:
output.append("None")
return ', '.join(output)
| 39,345 | Python | .py | 1,001 | 28.475524 | 117 | 0.556401 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,495 | keyvalue.py | truenas_middleware/src/middlewared/middlewared/plugins/keyvalue.py | from truenas_api_client import json
from middlewared.schema import Any, Str, accepts, Dict
from middlewared.service import Service
import middlewared.sqlalchemy as sa
class KeyValueModel(sa.Model):
__tablename__ = 'system_keyvalue'
id = sa.Column(sa.Integer(), primary_key=True)
key = sa.Column(sa.String(255), unique=True)
value = sa.Column(sa.Text())
class KeyValueService(Service):
class Config:
private = True
@accepts(Str('key'))
async def has_key(self, key):
try:
await self.get(key)
return True
except KeyError:
return False
@accepts(Str('key'), Any('default', null=True, default=None))
async def get(self, key, default):
try:
return json.loads(
(await self.middleware.call(
"datastore.query", "system.keyvalue", [["key", "=", key]], {"get": True}))["value"])
except IndexError:
if default is not None:
return default
raise KeyError(key)
@accepts(
Str('key'),
Any('value'),
Dict('options', additional_attrs=True),
)
async def set(self, key, value, options):
try:
row = await self.middleware.call("datastore.query", "system.keyvalue", [["key", "=", key]], {"get": True})
except IndexError:
await self.middleware.call(
"datastore.insert", "system.keyvalue", {"key": key, "value": json.dumps(value)}, options
)
else:
await self.middleware.call(
"datastore.update", "system.keyvalue", row["id"], {"value": json.dumps(value)}, options
)
return value
@accepts(
Str('key'),
Dict('options', additional_attrs=True),
)
async def delete(self, key, options):
await self.middleware.call("datastore.delete", "system.keyvalue", [["key", "=", key]], options)
| 1,959 | Python | .py | 52 | 28.807692 | 118 | 0.586061 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,496 | ntp.py | truenas_middleware/src/middlewared/middlewared/plugins/ntp.py | import errno
import subprocess
import middlewared.sqlalchemy as sa
from middlewared.plugins.ntp_.enums import Mode, State
from middlewared.schema import Bool, Dict, Int, IPAddr, Patch, Str, accepts
from middlewared.service import CRUDService, ValidationErrors, filterable, private
from middlewared.service_exception import CallError
from middlewared.utils import filter_list
from middlewared.plugins.ntp_.client import NTPClient
class NTPModel(sa.Model):
__tablename__ = 'system_ntpserver'
id = sa.Column(sa.Integer(), primary_key=True)
ntp_address = sa.Column(sa.String(120))
ntp_burst = sa.Column(sa.Boolean(), default=False)
ntp_iburst = sa.Column(sa.Boolean(), default=True)
ntp_prefer = sa.Column(sa.Boolean(), default=False)
ntp_minpoll = sa.Column(sa.Integer(), default=6)
ntp_maxpoll = sa.Column(sa.Integer(), default=10)
class NTPPeer:
def __init__(self, initial_data):
self._mode = Mode.from_str(initial_data['mode'])
self._state = State.from_str(initial_data['state'])
self._remote = initial_data['remote']
IPAddr().validate(self._remote)
self._stratum = initial_data['stratum']
self._poll_interval = initial_data['poll_interval']
self._reach = initial_data['reach']
self._lastrx = initial_data['lastrx']
self._offset = initial_data['offset']
self._offset_measured = initial_data['offset_measured']
self._jitter = initial_data['jitter']
@classmethod
def from_chronyc_sources(
cls, mode, state, remote, stratum, poll_interval, reach, lastrx, offset, offset_measured, jitter
):
"""Construct a NTPPeer object from one line of output from chronyc sources -c"""
# From chronyc man page (https://chrony.tuxfamily.org/doc/4.3/chronyc.html)
# -c This option enables printing of reports in a comma-separated values (CSV) format. Reverse DNS lookups
# will be disabled, time will be printed as number of seconds since the epoch, and values in seconds will
# not be converted to other units.
return cls({
'mode': mode,
'state': state,
'remote': remote,
'stratum': int(stratum),
'poll_interval': int(poll_interval),
'reach': int(reach, 8),
'lastrx': int(lastrx),
'offset': float(offset),
'offset_measured': float(offset_measured),
'jitter': float(jitter)
})
def asdict(self):
return {
'mode': str(self._mode),
'state': str(self._state),
'remote': self._remote,
'stratum': self._stratum,
'poll_interval': self._poll_interval,
'reach': self._reach,
'lastrx': self._lastrx,
'offset': self._offset,
'offset_measured': self._offset_measured,
'jitter': self._jitter,
'active': self.is_active(),
}
def is_active(self):
return self._state.is_active()
def __str__(self):
return f"{self._mode}: {self._state} [{self._remote}]"
@property
def remote(self):
return self._remote
@property
def offset_in_secs(self):
return self._offset
class NTPServerService(CRUDService):
class Config:
namespace = 'system.ntpserver'
datastore = 'system.ntpserver'
datastore_prefix = 'ntp_'
cli_namespace = 'system.ntp_server'
ENTRY = Patch(
'ntp_create', 'ntp_entry',
('rm', {'name': 'force'}),
('add', Int('id')),
)
@accepts(Dict(
'ntp_create',
Str('address'),
Bool('burst', default=False),
Bool('iburst', default=True),
Bool('prefer', default=False),
Int('minpoll', default=6),
Int('maxpoll', default=10),
Bool('force'),
register=True
))
async def do_create(self, data):
"""
Add an NTP Server.
`address` specifies the hostname/IP address of the NTP server.
`burst` when enabled makes sure that if server is reachable, sends a burst of eight packets instead of one.
This is designed to improve timekeeping quality with the server command.
`iburst` when enabled speeds up the initial synchronization, taking seconds rather than minutes.
`prefer` marks the specified server as preferred. When all other things are equal, this host is chosen
for synchronization acquisition with the server command. It is recommended that they be used for servers with
time monitoring hardware.
`minpoll` is minimum polling time in seconds. It must be a power of 2 and less than `maxpoll`.
`maxpoll` is maximum polling time in seconds. It must be a power of 2 and greater than `minpoll`.
`force` when enabled forces the addition of NTP server even if it is currently unreachable.
"""
await self.clean(data, 'ntpserver_create')
data['id'] = await self.middleware.call(
'datastore.insert', self._config.datastore, data,
{'prefix': self._config.datastore_prefix})
await self.middleware.call('service.restart', 'ntpd')
return await self.get_instance(data['id'])
@accepts(
Int('id'),
Patch(
'ntp_create',
'ntp_update',
('attr', {'update': True})
)
)
async def do_update(self, id_, data):
"""
Update NTP server of `id`.
"""
old = await self.get_instance(id_)
new = old.copy()
new.update(data)
await self.clean(new, 'ntpserver_update')
await self.middleware.call(
'datastore.update', self._config.datastore, id_, new,
{'prefix': self._config.datastore_prefix})
await self.middleware.call('service.restart', 'ntpd')
return await self.get_instance(id_)
async def do_delete(self, id_):
"""
Delete NTP server of `id`.
"""
response = await self.middleware.call('datastore.delete', self._config.datastore, id_)
await self.middleware.call('service.restart', 'ntpd')
return response
@staticmethod
@private
def test_ntp_server(addr):
try:
return bool(NTPClient(addr).make_request()['version'])
except Exception:
return False
@private
@filterable
def peers(self, filters, options):
peers = []
if not self.middleware.call_sync('system.ready'):
return peers
resp = subprocess.run(['chronyc', '-c', 'sources'], capture_output=True)
if resp.returncode != 0 or resp.stderr:
errmsg = resp.stderr.decode().strip()
raise CallError(
errmsg,
errno.ECONNREFUSED if "Connection refused" in errmsg else errno.EFAULT
)
for entry in resp.stdout.decode().splitlines():
values = entry.split(',')
if len(values) != 10:
self.logger.debug("Unexpected peer result: %s", entry)
continue
try:
peer = NTPPeer.from_chronyc_sources(*values)
# mode = Mode.from_str(values[0])
# state = State.from_str(values[1])
except NotImplementedError as e:
self.logger.debug(f"Unexpected item {e}: {entry}")
continue
except ValidationErrors as e:
self.logger.debug("Invalid remote address: %s", e)
continue
peers.append(peer.asdict())
return filter_list(peers, filters, options)
@private
async def clean(self, data, schema_name):
verrors = ValidationErrors()
maxpoll = data['maxpoll']
minpoll = data['minpoll']
if not data.pop('force', False):
if not await self.middleware.run_in_thread(self.test_ntp_server, data['address']):
verrors.add(
f'{schema_name}.address',
'Server could not be reached. Check "Force" to continue regardless.'
)
if not maxpoll > minpoll:
verrors.add(f'{schema_name}.maxpoll',
'Max Poll should be higher than Min Poll')
verrors.check()
return data
| 8,373 | Python | .py | 199 | 32.497487 | 117 | 0.605759 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,497 | dlm.py | truenas_middleware/src/middlewared/middlewared/plugins/dlm.py | import asyncio
from middlewared.service import Service, private
class DistributedLockManagerService(Service):
"""
Support the configuration of the kernel dlm in a multi-controller environment.
This will handle the following events:
- kernel udev online lockspace event (aka dlm.join_lockspace)
- kernel udev offline lockspace event (aka dlm.leave_lockspace)
- node join event (from another controller)
- node leave event (from another controller)
"""
class Config:
private = True
namespace = 'dlm'
resetting = False
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# The nodeID, peernodeID & nodes will be initialized by setup_nodes
self.nodeID = 0
self.peernodeID = "Unknown"
self.nodes = {}
self.fully_created = False
@private
async def setup_nodes(self):
"""
Setup the self.nodes dict and the self.nodeID.
It makes no guarantees that the remote node is currently accessible.
"""
if await self.middleware.call('failover.licensed'):
# We could determine local by fetching IPs, but failover.node is cheap
self.node = await self.middleware.call('failover.node')
self.nodes[1] = {'ip': '169.254.10.1', 'local': self.node == 'A'}
self.nodes[2] = {'ip': '169.254.10.2', 'local': self.node == 'B'}
for nodeid, node in self.nodes.items():
if node['local']:
self.nodeID = nodeid
else:
self.peernodeID = nodeid
@private
async def node_ready(self):
if not self.nodeID:
await self.middleware.call('dlm.create')
return await self.middleware.call('dlm.kernel.comms_node_ready', self.nodeID)
@private
async def create(self):
if self.fully_created:
return
if not self.nodes:
await self.middleware.call('dlm.setup_nodes')
# For code robustness sake, ensure the dlm is loaded. Should not be necessary.
await self.middleware.call('dlm.kernel.load_kernel_module')
# Setup the kernel dlm static config (i.e. define nodes, but not lockspaces)
for nodeid, node in self.nodes.items():
if node['local']:
await self.middleware.call('dlm.kernel.comms_add_node', nodeid, node['ip'], node['local'])
elif await self.middleware.call('failover.remote_connected'):
await self.middleware.call('dlm.kernel.comms_add_node', nodeid, node['ip'], node['local'])
self.fully_created = True
@private
async def lockspace_member(self, dest_nodeid, lockspace_name):
await self.middleware.call('dlm.create')
if dest_nodeid == self.nodeID:
# Local operation
self.logger.debug('[LOCAL] Checking whether lockspace %s exists on node %d', lockspace_name, dest_nodeid)
if await self.middleware.call('dlm.kernel.lockspace_present', lockspace_name):
return (dest_nodeid, True)
elif await self.middleware.call('failover.remote_connected'):
# Remote operation
self.logger.debug('[REMOTE] Checking whether lockspace %s exists on node %d', lockspace_name, dest_nodeid)
return await self.middleware.call(
'failover.call_remote', 'dlm.lockspace_member', [dest_nodeid, lockspace_name], {'timeout': 5}
)
return (dest_nodeid, False)
@private
async def lockspace_members(self, lockspace_name):
await self.middleware.call('dlm.create')
result = set()
exceptions = await asyncio.gather(*[self.lockspace_member(nodeid, lockspace_name) for nodeid in self.nodes], return_exceptions=True)
for exc in exceptions:
if isinstance(exc, Exception):
self.logger.warning(exc)
else:
(nodeid, member) = exc
if nodeid and member:
result.add(nodeid)
return list(result)
@private
async def stop_kernel_lockspace(self, dest_nodeid, lockspace_name):
if dest_nodeid == self.nodeID:
# Local operation
self.logger.debug('[LOCAL] Stopping kernel lockspace %s on node %d', lockspace_name, dest_nodeid)
await self.middleware.call('dlm.kernel.lockspace_stop', lockspace_name)
elif await self.middleware.call('failover.remote_connected'):
# Remote operation
self.logger.debug('[REMOTE] Stopping kernel lockspace %s on node %d', lockspace_name, dest_nodeid)
await self.middleware.call(
'failover.call_remote', 'dlm.stop_kernel_lockspace', [dest_nodeid, lockspace_name], {'timeout': 5}
)
@private
async def start_kernel_lockspace(self, dest_nodeid, lockspace_name):
if dest_nodeid == self.nodeID:
# Local operation
self.logger.debug('[LOCAL] Starting kernel lockspace %s on node %d', lockspace_name, dest_nodeid)
# If already stopped, tell the kernel lockspace to start
await self.middleware.call('dlm.kernel.lockspace_start', lockspace_name)
elif await self.middleware.call('failover.remote_connected'):
# Remote operation
self.logger.debug('[REMOTE] Starting kernel lockspace %s on node %d', lockspace_name, dest_nodeid)
await self.middleware.call(
'failover.call_remote', 'dlm.start_kernel_lockspace', [dest_nodeid, lockspace_name], {'timeout': 5}
)
@private
async def join_kernel_lockspace(self, dest_nodeid, lockspace_name, joining_nodeid, nodeIDs):
if dest_nodeid == self.nodeID:
# Local operation
self.logger.debug('[LOCAL] Joining kernel lockspace %s for node %s on node %s', lockspace_name, joining_nodeid, dest_nodeid)
# Ensure kernel lockspace is stopped
if not await self.middleware.call('dlm.kernel.lockspace_is_stopped', lockspace_name):
self.logger.warning('Lockspace %s not stopped', lockspace_name)
return
# If joining set global id_
if dest_nodeid == joining_nodeid:
await self.middleware.call('dlm.kernel.lockspace_set_global_id', lockspace_name)
for nodeid in nodeIDs:
await self.middleware.call('dlm.kernel.lockspace_add_node', lockspace_name, nodeid)
else:
# Add the joining node
await self.middleware.call('dlm.kernel.lockspace_add_node', lockspace_name, joining_nodeid)
# Start kernel lockspace again.
await self.middleware.call('dlm.kernel.lockspace_start', lockspace_name)
# If joining set event_done 0
if dest_nodeid == joining_nodeid:
await self.middleware.call('dlm.kernel.set_sysfs_event_done', lockspace_name, 0)
elif await self.middleware.call('failover.remote_connected'):
# Remote operation
self.logger.debug('[REMOTE] Joining kernel lockspace %s for node %s on node %s', lockspace_name, joining_nodeid, dest_nodeid)
await self.middleware.call(
'failover.call_remote', 'dlm.join_kernel_lockspace', [dest_nodeid, lockspace_name, joining_nodeid, nodeIDs], {'timeout': 5}
)
@private
async def leave_kernel_lockspace(self, dest_nodeid, lockspace_name, leaving_nodeid):
if dest_nodeid == self.nodeID:
# Local operation
self.logger.debug('[LOCAL] Node %s leaving kernel lockspace %s', leaving_nodeid, lockspace_name)
# Are we the ones leaving?
if dest_nodeid == leaving_nodeid:
# Remove members
await self.middleware.call('dlm.kernel.lockspace_leave', lockspace_name)
# Event done
await self.middleware.call('dlm.kernel.set_sysfs_event_done', lockspace_name, 0)
return
# Make config changes
await self.middleware.call('dlm.kernel.lockspace_remove_node', lockspace_name, leaving_nodeid)
elif await self.middleware.call('failover.remote_connected'):
# Remote operation
self.logger.debug('[REMOTE] Node %s leaving kernel lockspace %s on %s', leaving_nodeid, lockspace_name, dest_nodeid)
await self.middleware.call(
'failover.call_remote', 'dlm.leave_kernel_lockspace', [dest_nodeid, lockspace_name, leaving_nodeid], {'timeout': 5}
)
@private
async def join_lockspace(self, lockspace_name):
self.logger.info('Joining lockspace %s', lockspace_name)
await self.middleware.call('dlm.create')
try:
# Note that by virtue of this being a join_lockspace kernel lockspace stopped is already True (on this node)
await self.middleware.call('dlm.kernel.lockspace_mark_stopped', lockspace_name)
nodeIDs = set(await self.middleware.call('dlm.lockspace_members', lockspace_name))
# Stop kernel lockspace (on all other nodes)
await asyncio.gather(*[self.stop_kernel_lockspace(nodeid, lockspace_name) for nodeid in nodeIDs])
nodeIDs.add(self.nodeID)
# Join the kernel lockspace (on all nodes)
await asyncio.gather(*[self.join_kernel_lockspace(nodeid, lockspace_name, self.nodeID, list(nodeIDs)) for nodeid in nodeIDs])
except Exception:
self.logger.error('Failed to join lockspace %s', lockspace_name, exc_info=True)
await self.middleware.call('dlm.kernel.set_sysfs_event_done', lockspace_name, 1)
@private
async def leave_lockspace(self, lockspace_name):
self.logger.info('Leaving lockspace %s', lockspace_name)
await self.middleware.call('dlm.create')
if DistributedLockManagerService.resetting:
await self.middleware.call('dlm.kernel.lockspace_stop', lockspace_name)
await self.middleware.call('dlm.kernel.lockspace_leave', lockspace_name)
await self.middleware.call('dlm.kernel.set_sysfs_event_done', lockspace_name, 0)
return
try:
nodeIDs = set(await self.middleware.call('dlm.lockspace_members', lockspace_name))
# Stop kernel lockspace (on all nodes)
await asyncio.gather(*[self.stop_kernel_lockspace(nodeid, lockspace_name) for nodeid in nodeIDs])
# Leave the kernel lockspace (on all nodes).
await asyncio.gather(*[self.leave_kernel_lockspace(nodeid, lockspace_name, self.nodeID) for nodeid in nodeIDs])
nodeIDs.remove(self.nodeID)
# Start the kernel lockspace on remaining nodes
await asyncio.gather(*[self.start_kernel_lockspace(nodeid, lockspace_name) for nodeid in nodeIDs])
except Exception:
self.logger.error('Failed to leave lockspace %s', lockspace_name, exc_info=True)
await self.middleware.call('dlm.kernel.lockspace_start', lockspace_name)
await self.middleware.call('dlm.kernel.set_sysfs_event_done', lockspace_name, 1)
@private
async def add_node(self, nodeid):
"""
Possible future enhancement.
Handle addition of a node.
"""
raise NotImplementedError("add_node not currently implemented")
# if await self.middleware.call('failover.remote_connected'):
node = self.nodes.get(nodeid)
if node:
await self.middleware.call('dlm.kernel.comms_add_node', nodeid, node['ip'], node['local'])
@private
async def remove_node(self, nodeid):
"""
Possible future enhancement.
Handle a node failure.
"""
raise NotImplementedError("remove_node not currently implemented")
node = self.nodes.get(nodeid)
if node:
# Remove the node from any lockspaces it is in
for lockspace_name in await self.middleware.call('dlm.kernel.node_lockspaces', nodeid):
# Anticipate the day when we have N nodes, but for now this equates to this node.
nodeIDs = set(await self.middleware.call('dlm.lockspace_members', lockspace_name))
nodeIDs.remove(nodeid)
await asyncio.gather(*[self.stop_kernel_lockspace(node_id, lockspace_name) for node_id in nodeIDs])
await asyncio.gather(*[self.leave_kernel_lockspace(node_id, lockspace_name, nodeid) for node_id in nodeIDs])
await asyncio.gather(*[self.start_kernel_lockspace(node_id, lockspace_name) for node_id in nodeIDs])
# await self.middleware.call('dlm.kernel.comms_remove_node', nodeid)
@private
async def remote_down(self):
"""
Handle a node HA remote node going down.
"""
self.logger.info('Remote node %s down', self.peernodeID)
@private
async def local_remove_peer(self, lockspace_name):
"""Remove the peer node from the specified lockspace without communicating with it."""
await self.middleware.call('dlm.kernel.lockspace_stop', lockspace_name)
await self.middleware.call('dlm.kernel.lockspace_remove_node', lockspace_name, self.peernodeID)
await self.middleware.call('dlm.kernel.lockspace_start', lockspace_name)
@private
async def lockspaces(self):
"""Return a list of lockspaces to which we are currently joined."""
await self.middleware.call('dlm.create')
return list(await self.middleware.call('dlm.kernel.node_lockspaces', self.nodeID))
@private
async def peer_lockspaces(self):
"""Return a list of lockspaces to which we are currently joined, and which also
contain the PEER node"""
await self.middleware.call('dlm.create')
return list(await self.middleware.call('dlm.kernel.node_lockspaces', self.peernodeID))
@private
async def eject_peer(self):
"""Locally remove the PEER node from all of the lockspaces to which we are both joined."""
await self.middleware.call('dlm.create')
lockspace_names = await self.middleware.call('dlm.peer_lockspaces')
if lockspace_names:
self.logger.info('Ejecting peer from %d lockspaces', len(lockspace_names))
await asyncio.gather(*[self.local_remove_peer(lockspace_name) for lockspace_name in lockspace_names])
@private
async def local_reset(self, disable_iscsi=True):
"""Locally remove the PEER node from all lockspaces and reset cluster_mode to
zero, WITHOUT talking to the peer node."""
self.logger.info('local_reset starting: %r', disable_iscsi)
# First turn off all access to targets from outside.
if disable_iscsi:
await self.middleware.call('iscsi.scst.disable')
# Locally eject the peer. Will prevent remote comms below.
await self.eject_peer()
# Wait for up to 10 seconds for things to settle
retries = 10
while retries and await self.middleware.call('dlm.peer_lockspaces'):
await asyncio.sleep(1)
retries -= 1
if retries != 10:
self.logger.info('Waited %d seconds for lockspace to settle', 10 - retries)
# Finally turn off cluster mode locally on all extents
try:
DistributedLockManagerService.resetting = True
await self.middleware.call('iscsi.scst.set_all_cluster_mode', 0)
finally:
DistributedLockManagerService.resetting = False
self.logger.info('local_reset done')
@private
async def is_local_reset_complete(self):
if await self.middleware.call('dlm.peer_lockspaces'):
return False
return await self.middleware.call('iscsi.scst.check_cluster_modes_clear')
async def udev_dlm_hook(middleware, data):
"""
This hook is called on udevd dlm type events. It's purpose is to
allow configuration of dlm lockspaces by handling 'online' and
'offline' events.
At the moment this should only be used in HA systems with ALUA enabled
for iSCSI targets, but there are aspects that are generic and can
be implemented even if this was not the configuration.
"""
if data.get('SUBSYSTEM') != 'dlm' or data.get('ACTION') not in ['online', 'offline']:
return
lockspace = data.get('LOCKSPACE')
if lockspace is None:
middleware.logger.error('Missing lockspace name', exc_info=True)
return
if data['ACTION'] == 'online':
await middleware.call('dlm.join_lockspace', lockspace)
elif data['ACTION'] == 'offline':
await middleware.call('dlm.leave_lockspace', lockspace)
def remote_down_event(middleware, *args, **kwargs):
middleware.call_sync('dlm.remote_down')
async def setup(middleware):
middleware.register_hook('udev.dlm', udev_dlm_hook)
# Comment out placeholder call for possible future enhancement.
# await middleware.call('failover.remote_on_connect', remote_status_event)
# await middleware.call('failover.remote_on_disconnect', remote_down_event)
| 17,176 | Python | .py | 312 | 44.711538 | 140 | 0.653887 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,498 | service.py | truenas_middleware/src/middlewared/middlewared/plugins/service.py | import asyncio
import errno
import psutil
from middlewared.plugins.service_.services.all import all_services
from middlewared.plugins.service_.services.base import IdentifiableServiceInterface
from middlewared.plugins.service_.utils import app_has_write_privilege_for_service
from middlewared.schema import accepts, Bool, Dict, Int, List, Ref, returns, Str
from middlewared.service import filterable, CallError, CRUDService, pass_app, periodic, private
from middlewared.service_exception import MatchNotFound
import middlewared.sqlalchemy as sa
from middlewared.utils import filter_list, filter_getattrs
class ServiceModel(sa.Model):
__tablename__ = 'services_services'
id = sa.Column(sa.Integer(), primary_key=True)
srv_service = sa.Column(sa.String(120))
srv_enable = sa.Column(sa.Boolean(), default=False)
class ServiceService(CRUDService):
class Config:
cli_namespace = "service"
datastore_prefix = 'srv_'
datastore_extend = 'service.service_extend'
datastore_extend_context = 'service.service_extend_context'
role_prefix = "SERVICE"
ENTRY = Dict(
'service_entry',
Int('id'),
Str('service'),
Bool('enable'),
Str('state'),
List('pids', items=[Int('pid')]),
)
@private
async def service_extend_context(self, services, extra):
if not extra.get('include_state', True):
return {}
if not isinstance(services, list):
services = [services]
jobs = {
asyncio.ensure_future(
(await self.middleware.call('service.object', service['service'])).get_state()
): service
for service in services
}
if jobs:
done, pending = await asyncio.wait(list(jobs.keys()), timeout=15)
def result(task):
"""
Method to handle results of the coroutines.
In case of error or timeout, provide UNKNOWN state.
"""
entry = jobs.get(task)
result = None
if task in done:
try:
result = task.result()
except Exception:
self.logger.warning('Task %r failed', exc_info=True)
if result is None:
return None
return {
'service': entry['service'],
'info': {
'state': 'RUNNING' if result.running else 'STOPPED',
'pids': result.pids
}
}
return {srv['service']: srv['info'] for srv in map(result, jobs) if srv is not None}
@private
async def service_extend(self, svc, ctx):
return svc | ctx.get(svc['service'], {'state': 'UNKNOWN', 'pids': []})
@filterable
async def query(self, filters, options):
"""
Query all system services with `query-filters` and `query-options`.
Supports the following extra options:
`include_state` - performance optimization to avoid getting service state.
defaults to True.
"""
default_options = {
'prefix': self._config.datastore_prefix,
'extend': self._config.datastore_extend,
'extend_context': self._config.datastore_extend_context
}
if set(filter_getattrs(filters)) & {'state', 'pids'}:
services = await self.middleware.call('datastore.query', 'services.services', [], default_options)
return filter_list(services, filters, options)
return await self.middleware.call('datastore.query', 'services.services', filters, options | default_options)
@accepts(
Str('id_or_name'),
Dict(
'service-update',
Bool('enable', default=False),
),
roles=['SERVICE_WRITE', 'SHARING_NFS_WRITE', 'SHARING_SMB_WRITE', 'SHARING_ISCSI_WRITE', 'SHARING_FTP_WRITE'],
audit='Update service configuration',
audit_callback=True,
)
@returns(Int('service_primary_key'))
@pass_app(rest=True)
async def do_update(self, app, audit_callback, id_or_name, data):
"""
Update service entry of `id_or_name`.
Currently, it only accepts `enable` option which means whether the service should start on boot.
"""
if not id_or_name.isdigit():
filters = [['service', '=', id_or_name]]
else:
filters = [['id', '=', id_or_name]]
if not (svc := await self.middleware.call('datastore.query', 'services.services', filters, {'prefix': 'srv_'})):
raise CallError(f'Service {id_or_name} not found.', errno.ENOENT)
svc = svc[0]
audit_callback(svc['service'])
if not app_has_write_privilege_for_service(app, svc['service']):
raise CallError(f'{svc["service"]}: authenticated session lacks privilege to update service', errno.EPERM)
rv = await self.middleware.call(
'datastore.update', 'services.services', svc['id'], {'srv_enable': data['enable']}
)
await self.middleware.call('etc.generate', 'rc')
return rv
@accepts(
Str('service'),
Dict(
'service-control',
Bool('ha_propagate', default=True),
Bool('silent', default=True),
register=True,
),
roles=['SERVICE_WRITE', 'SHARING_NFS_WRITE', 'SHARING_SMB_WRITE', 'SHARING_ISCSI_WRITE', 'SHARING_FTP_WRITE']
)
@returns(Bool('started_service', description='Will return `true` if service successfully started'))
@pass_app(rest=True)
async def start(self, app, service, options):
"""
Start the service specified by `service`.
If `silent` is `true` then in case of service startup failure, `false` will be returned. If `silent` is `false`
then in case of service startup failure, an exception will be raised.
"""
service_object = await self.middleware.call('service.object', service)
if not app_has_write_privilege_for_service(app, service):
raise CallError(f'{service}: authenticated session lacks privilege to start service', errno.EPERM)
await self.middleware.call_hook('service.pre_action', service, 'start', options)
await self.middleware.call('service.generate_etc', service_object)
try:
await service_object.check_configuration()
except CallError:
if options['silent']:
self.logger.warning('%s: service failed configuration check',
service_object.name, exc_info=True)
return False
raise
await service_object.before_start()
await service_object.start()
state = await service_object.get_state()
if state.running:
await service_object.after_start()
await self.middleware.call('service.notify_running', service)
if service_object.deprecated:
await self.middleware.call(
'alert.oneshot_create',
'DeprecatedService',
{"service": service_object.name}
)
return True
else:
self.logger.error("Service %r not running after start", service)
await self.middleware.call('service.notify_running', service)
if options['silent']:
return False
else:
raise CallError(await service_object.failure_logs() or 'Service not running after start')
@accepts(Str('service'), roles=['SERVICE_READ'])
@returns(Bool('service_started', description='Will return `true` if service is running'))
async def started(self, service):
"""
Test if service specified by `service` has been started.
"""
service_object = await self.middleware.call('service.object', service)
state = await service_object.get_state()
if service_object.deprecated:
if state.running:
await self.middleware.call(
'alert.oneshot_create',
'DeprecatedService',
{"service": service_object.name}
)
else:
await self.middleware.call('alert.oneshot_delete', 'DeprecatedService', service_object.name)
return state.running
@accepts(Str('service'), roles=['SERVICE_READ'])
@returns(Bool('service_started_or_enabled',
description='Will return `true` if service is started or enabled to start automatically.'))
async def started_or_enabled(self, service):
"""
Test if service specified by `service` is started or enabled to start automatically.
"""
svc = await self.middleware.call('service.query', [['service', '=', service]], {'get': True})
return svc['state'] == 'RUNNING' or svc['enable']
@accepts(
Str('service'),
Ref('service-control'),
roles=['SERVICE_WRITE', 'SHARING_NFS_WRITE', 'SHARING_SMB_WRITE', 'SHARING_ISCSI_WRITE', 'SHARING_FTP_WRITE']
)
@returns(Bool('service_stopped', description='Will return `true` if service successfully stopped'))
@pass_app(rest=True)
async def stop(self, app, service, options):
"""
Stop the service specified by `service`.
"""
service_object = await self.middleware.call('service.object', service)
if not app_has_write_privilege_for_service(app, service):
raise CallError(f'{service}: authenticated session lacks privilege to stop service')
await self.middleware.call_hook('service.pre_action', service, 'stop', options)
try:
await service_object.before_stop()
except Exception:
self.logger.error("Failed before stop action for %r service", service)
await service_object.stop()
state = await service_object.get_state()
if not state.running:
await service_object.after_stop()
await self.middleware.call('service.notify_running', service)
if service_object.deprecated:
await self.middleware.call('alert.oneshot_delete', 'DeprecatedService', service_object.name)
return True
else:
self.logger.error("Service %r running after stop", service)
await self.middleware.call('service.notify_running', service)
if options['silent']:
return False
raise CallError(await service_object.failure_logs() or 'Service still running after stop')
@accepts(
Str('service'),
Ref('service-control'),
roles=['SERVICE_WRITE', 'SHARING_NFS_WRITE', 'SHARING_SMB_WRITE', 'SHARING_ISCSI_WRITE', 'SHARING_FTP_WRITE']
)
@returns(Bool('service_restarted'))
@pass_app(rest=True)
async def restart(self, app, service, options):
"""
Restart the service specified by `service`.
"""
service_object = await self.middleware.call('service.object', service)
if not app_has_write_privilege_for_service(app, service):
raise CallError(f'{service}: authenticated session lacks privilege to restart service', errno.EPERM)
await self.middleware.call_hook('service.pre_action', service, 'restart', options)
await self.middleware.call('service.generate_etc', service_object)
return await self._restart(service, service_object)
async def _restart(self, service, service_object):
if service_object.restartable:
await service_object.before_restart()
await service_object.restart()
await service_object.after_restart()
state = await service_object.get_state()
if not state.running:
await self.middleware.call('service.notify_running', service)
self.logger.error("Service %r not running after restart", service)
return False
else:
try:
await service_object.before_stop()
except Exception:
self.logger.error("Failed before stop action for %r service", service)
await service_object.stop()
state = await service_object.get_state()
if not state.running:
await service_object.after_stop()
else:
self.logger.error("Service %r running after restart-caused stop", service)
await service_object.before_start()
await service_object.start()
state = await service_object.get_state()
if not state.running:
await self.middleware.call('service.notify_running', service)
self.logger.error("Service %r not running after restart-caused start", service)
return False
await service_object.after_start()
await self.middleware.call('service.notify_running', service)
if service_object.deprecated:
await self.middleware.call('alert.oneshot_create', 'DeprecatedService', {"service": service_object.name})
return True
@accepts(
Str('service'),
Ref('service-control'),
roles=['SERVICE_WRITE', 'SHARING_NFS_WRITE', 'SHARING_SMB_WRITE', 'SHARING_ISCSI_WRITE', 'SHARING_FTP_WRITE']
)
@returns(Bool('service_reloaded'))
@pass_app(rest=True)
async def reload(self, app, service, options):
"""
Reload the service specified by `service`.
"""
service_object = await self.middleware.call('service.object', service)
if not app_has_write_privilege_for_service(app, service):
raise CallError(f'{service}: authenticated session lacks privilege to restart service', errno.EPERM)
await self.middleware.call_hook('service.pre_action', service, 'reload', options)
await self.middleware.call('service.generate_etc', service_object)
if service_object.reloadable:
await service_object.before_reload()
await service_object.reload()
await service_object.after_reload()
state = await service_object.get_state()
if state.running:
return True
else:
self.logger.error("Service %r not running after reload", service)
return False
else:
return await self._restart(service, service_object)
SERVICES = {}
@private
async def register_object(self, object_):
if object_.name in self.SERVICES:
raise CallError(f"Service object {object_.name} is already registered")
self.SERVICES[object_.name] = object_
@private
async def object(self, name):
try:
return self.SERVICES[name]
except KeyError:
raise MatchNotFound(name) from None
@private
async def generate_etc(self, object_):
for etc in object_.etc:
await self.middleware.call("etc.generate", etc)
@private
async def notify_running(self, service):
try:
svc = await self.middleware.call('service.query', [('service', '=', service)], {'get': True})
except MatchNotFound:
return
self.middleware.send_event('service.query', 'CHANGED', fields=svc)
@private
async def identify_process(self, procname):
for service_name, service in self.SERVICES.items():
if isinstance(service, IdentifiableServiceInterface):
if await service.identify(procname):
return service_name
@private
async def get_unit_state(self, service):
service_object = await self.middleware.call('service.object', service)
return await service_object.get_unit_state()
@private
async def become_active(self, service):
"""During a HA failover event certain services may support this method being called
when the node is becoming the new ACTIVE node"""
service_object = await self.middleware.call('service.object', service)
return await service_object.become_active()
@private
async def become_standby(self, service):
"""During a HA failover event certain services may support this method being called
when the node is becoming the new STANDBY node"""
service_object = await self.middleware.call('service.object', service)
return await service_object.become_standby()
@accepts(Int("pid"), Int("timeout", default=10))
@returns(Bool(
"process_terminated_nicely",
description="`true` is process has been successfully terminated with `TERM` and `false` if we had to use `KILL`"
))
def terminate_process(self, pid, timeout):
"""
Terminate process by `pid`.
First send `TERM` signal, then, if was not terminated in `timeout` seconds, send `KILL` signal.
"""
try:
process = psutil.Process(pid)
process.terminate()
gone, alive = psutil.wait_procs([process], timeout)
except psutil.NoSuchProcess:
raise CallError("Process does not exist")
if not alive:
return True
try:
alive[0].kill()
except psutil.NoSuchProcess:
return True
return False
@periodic(3600, run_on_start=False)
@private
async def check_deprecated_services(self):
"""
Simple call to service.started is sufficient to toggle alert
"""
for service_name, service in self.SERVICES.items():
if not service.deprecated:
continue
await self.started(service.name)
async def __event_service_ready(middleware, event_type, args):
middleware.create_task(middleware.call('service.check_deprecated_services'))
async def setup(middleware):
for klass in all_services:
await middleware.call('service.register_object', klass(middleware))
middleware.event_subscribe('system.ready', __event_service_ready)
| 18,118 | Python | .py | 392 | 35.915816 | 120 | 0.623349 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
24,499 | directoryservices.py | truenas_middleware/src/middlewared/middlewared/plugins/directoryservices.py | import enum
import struct
from base64 import b64decode
from middlewared.schema import accepts, Dict, List, OROperator, returns, Str
from middlewared.service import no_authz_required, Service, private, job
from middlewared.service_exception import CallError, MatchNotFound
from middlewared.utils.directoryservices.constants import (
DSStatus, DSType, NSS_Info
)
from middlewared.utils.directoryservices.health import DSHealthObj
DEPENDENT_SERVICES = ['smb', 'nfs', 'ssh']
class SSL(enum.Enum):
NOSSL = 'OFF'
USESSL = 'ON'
USESTARTTLS = 'START_TLS'
class SASL_Wrapping(enum.Enum):
PLAIN = 'PLAIN'
SIGN = 'SIGN'
SEAL = 'SEAL'
class DirectoryServices(Service):
class Config:
service = "directoryservices"
cli_namespace = "directory_service"
@no_authz_required
@accepts()
@returns(Dict(
'directoryservices_status',
Str('type', enum=[x.value for x in DSType], null=True),
Str('status', enum=[status.name for status in DSStatus], null=True),
Str('status_msg', null=True)
))
def status(self):
"""
Provide the type and status of the currently-enabled directory service
"""
if not DSHealthObj.initialized:
try:
self.middleware.call_sync('directoryservices.health.check')
except Exception:
pass
return DSHealthObj.dump()
@no_authz_required
@accepts()
@returns(Dict(
'directory_services_states',
Str(DSType.AD.value.lower(), enum=[status.name for status in DSStatus]),
Str(DSType.LDAP.value.lower(), enum=[status.name for status in DSStatus]),
))
def get_state(self):
"""
`DISABLED` Directory Service is disabled.
`FAULTED` Directory Service is enabled, but not HEALTHY. Review logs and generated alert
messages to debug the issue causing the service to be in a FAULTED state.
`LEAVING` Directory Service is in process of stopping.
`JOINING` Directory Service is in process of starting.
`HEALTHY` Directory Service is enabled, and last status check has passed.
"""
output = {'activedirectory': DSStatus.DISABLED.name, 'ldap': DSStatus.DISABLED.name}
status = self.status()
match status['type']:
case DSType.AD.value:
output[DSType.AD.value.lower()] = status['status']
case DSType.LDAP.value | DSType.IPA.value:
output[DSType.LDAP.value.lower()] = status['status']
return output
@accepts()
@job(lock="directoryservices_refresh_cache", lock_queue_size=1)
async def cache_refresh(self, job):
"""
This method refreshes the directory services cache for users and groups that is
used as a backing for `user.query` and `group.query` methods. The first cache fill in
an Active Directory domain may take a significant amount of time to complete and
so it is performed as within a job. The most likely situation in which a user may
desire to refresh the directory services cache is after new users or groups to a remote
directory server with the intention to have said users or groups appear in the
results of the aforementioned account-related methods.
A cache refresh is not required in order to use newly-added users and groups for in
permissions and ACL related methods. Likewise, a cache refresh will not resolve issues
with users being unable to authenticate to shares.
"""
return await job.wrap(await self.middleware.call('directoryservices.cache.refresh_impl'))
@private
@returns(List(
'ldap_ssl_choices', items=[
Str('ldap_ssl_choice', enum=[x.value for x in list(SSL)], default=SSL.USESSL.value, register=True)
]
))
async def ssl_choices(self, dstype):
return [x.value for x in list(SSL)]
@private
@returns(List(
'sasl_wrapping_choices', items=[
Str('sasl_wrapping_choice', enum=[x.value for x in list(SASL_Wrapping)], register=True)
]
))
async def sasl_wrapping_choices(self, dstype):
return [x.value for x in list(SASL_Wrapping)]
@private
@returns(OROperator(
List('ad_nss_choices', items=[Str(
'nss_info_ad',
enum=[x.value[0] for x in NSS_Info if DSType.AD in x.value[1]],
default=NSS_Info.SFU.value[0],
register=True
)]),
List('ldap_nss_choices', items=[Str(
'nss_info_ldap',
enum=[x.value[0] for x in NSS_Info if DSType.LDAP in x.value[1]],
default=NSS_Info.RFC2307.value[0],
register=True)
]),
name='nss_info_choices'
))
async def nss_info_choices(self, dstype):
ds = DSType(dstype)
ret = []
for x in list(NSS_Info):
if ds in x.value[1]:
ret.append(x.value[0])
return ret
@private
async def get_last_password_change(self, domain=None):
"""
Returns unix timestamp of last password change according to
the secrets.tdb (our current running configuration), and what
we have in our database.
"""
smb_config = await self.middleware.call('smb.config')
if domain is None:
domain = smb_config['workgroup']
try:
passwd_ts = await self.middleware.call(
'directoryservices.secrets.last_password_change', domain
)
except MatchNotFound:
passwd_ts = None
db_secrets = await self.middleware.call('directoryservices.secrets.get_db_secrets')
server_secrets = db_secrets.get(f"{smb_config['netbiosname_local'].upper()}$")
if server_secrets is None:
return {"dbconfig": None, "secrets": passwd_ts}
try:
stored_ts_bytes = server_secrets[f'SECRETS/MACHINE_LAST_CHANGE_TIME/{domain.upper()}']
stored_ts = struct.unpack("<L", b64decode(stored_ts_bytes))[0]
except KeyError:
stored_ts = None
return {"dbconfig": stored_ts, "secrets": passwd_ts}
@private
@job()
async def initialize(self, job, data=None):
# retrieve status to force initialization of status
if (await self.middleware.call('directoryservices.status'))['type'] is None:
return
try:
await self.middleware.call('idmap.gencache.flush')
except Exception:
self.logger.warning('Cache flush failed', exc_info=True)
await self.middleware.call('directoryservices.health.check')
@private
def restart_dependent_services(self):
for svc in self.middleware.call_sync('service.query', [['OR', [
['enable', '=', True],
['state', '=', 'RUNNING']
]], ['service', 'in', DEPENDENT_SERVICES]]):
self.middleware.call_sync('service.restart', svc['service'])
@private
@job(lock='ds_init', lock_queue_size=1)
def setup(self, job):
config_in_progress = self.middleware.call_sync("core.get_jobs", [
["method", "=", "smb.configure"],
["state", "=", "RUNNING"]
])
if config_in_progress:
job.set_progress(0, "waiting for smb.configure to complete")
wait_id = self.middleware.call_sync('core.job_wait', config_in_progress[0]['id'])
wait_id.wait_sync()
if not self.middleware.call_sync('smb.is_configured'):
raise CallError('Skipping directory service setup due to SMB service being unconfigured')
failover_status = self.middleware.call_sync('failover.status')
if failover_status not in ('SINGLE', 'MASTER'):
self.logger.debug('%s: skipping directory service setup due to failover status', failover_status)
job.set_progress(100, f'{failover_status}: skipping directory service setup due to failover status')
return
# Recover is called here because it short-circuits if health check
# shows we're healthy. If we can't recover due to things being irreparably
# broken then this will raise an exception.
self.middleware.call_sync('directoryservices.health.recover')
if DSHealthObj.dstype is None:
return
# nsswitch.conf needs to be updated
self.middleware.call_sync('etc.generate', 'nss')
job.set_progress(10, 'Refreshing cache'),
cache_refresh = self.middleware.call_sync('directoryservices.cache.refresh_impl')
cache_refresh.wait_sync()
job.set_progress(75, 'Restarting dependent services')
self.restart_dependent_services()
job.set_progress(100, 'Setup complete')
async def __init_directory_services(middleware, event_type, args):
await middleware.call('directoryservices.setup')
async def setup(middleware):
middleware.event_subscribe('system.ready', __init_directory_services)
middleware.event_register('directoryservices.status', 'Sent on directory service state changes.')
| 9,130 | Python | .py | 201 | 36.706468 | 112 | 0.647648 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |